repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
2013Commons/hue | refs/heads/master | desktop/core/ext-py/MySQL-python-1.2.3c1/MySQLdb/constants/ER.py | 118 | """MySQL ER Constants
These constants are error codes for the bulk of the error conditions
that may occur.
"""
HASHCHK = 1000
NISAMCHK = 1001
NO = 1002
YES = 1003
CANT_CREATE_FILE = 1004
CANT_CREATE_TABLE = 1005
CANT_CREATE_DB = 1006
DB_CREATE_EXISTS = 1007
DB_DROP_EXISTS = 1008
DB_DROP_DELETE = 1009
DB_DROP_RMDIR = 1010
CANT_DELETE_FILE = 1011
CANT_FIND_SYSTEM_REC = 1012
CANT_GET_STAT = 1013
CANT_GET_WD = 1014
CANT_LOCK = 1015
CANT_OPEN_FILE = 1016
FILE_NOT_FOUND = 1017
CANT_READ_DIR = 1018
CANT_SET_WD = 1019
CHECKREAD = 1020
DISK_FULL = 1021
DUP_KEY = 1022
ERROR_ON_CLOSE = 1023
ERROR_ON_READ = 1024
ERROR_ON_RENAME = 1025
ERROR_ON_WRITE = 1026
FILE_USED = 1027
FILSORT_ABORT = 1028
FORM_NOT_FOUND = 1029
GET_ERRNO = 1030
ILLEGAL_HA = 1031
KEY_NOT_FOUND = 1032
NOT_FORM_FILE = 1033
NOT_KEYFILE = 1034
OLD_KEYFILE = 1035
OPEN_AS_READONLY = 1036
OUTOFMEMORY = 1037
OUT_OF_SORTMEMORY = 1038
UNEXPECTED_EOF = 1039
CON_COUNT_ERROR = 1040
OUT_OF_RESOURCES = 1041
BAD_HOST_ERROR = 1042
HANDSHAKE_ERROR = 1043
DBACCESS_DENIED_ERROR = 1044
ACCESS_DENIED_ERROR = 1045
NO_DB_ERROR = 1046
UNKNOWN_COM_ERROR = 1047
BAD_NULL_ERROR = 1048
BAD_DB_ERROR = 1049
TABLE_EXISTS_ERROR = 1050
BAD_TABLE_ERROR = 1051
NON_UNIQ_ERROR = 1052
SERVER_SHUTDOWN = 1053
BAD_FIELD_ERROR = 1054
WRONG_FIELD_WITH_GROUP = 1055
WRONG_GROUP_FIELD = 1056
WRONG_SUM_SELECT = 1057
WRONG_VALUE_COUNT = 1058
TOO_LONG_IDENT = 1059
DUP_FIELDNAME = 1060
DUP_KEYNAME = 1061
DUP_ENTRY = 1062
WRONG_FIELD_SPEC = 1063
PARSE_ERROR = 1064
EMPTY_QUERY = 1065
NONUNIQ_TABLE = 1066
INVALID_DEFAULT = 1067
MULTIPLE_PRI_KEY = 1068
TOO_MANY_KEYS = 1069
TOO_MANY_KEY_PARTS = 1070
TOO_LONG_KEY = 1071
KEY_COLUMN_DOES_NOT_EXITS = 1072
BLOB_USED_AS_KEY = 1073
TOO_BIG_FIELDLENGTH = 1074
WRONG_AUTO_KEY = 1075
READY = 1076
NORMAL_SHUTDOWN = 1077
GOT_SIGNAL = 1078
SHUTDOWN_COMPLETE = 1079
FORCING_CLOSE = 1080
IPSOCK_ERROR = 1081
NO_SUCH_INDEX = 1082
WRONG_FIELD_TERMINATORS = 1083
BLOBS_AND_NO_TERMINATED = 1084
TEXTFILE_NOT_READABLE = 1085
FILE_EXISTS_ERROR = 1086
LOAD_INFO = 1087
ALTER_INFO = 1088
WRONG_SUB_KEY = 1089
CANT_REMOVE_ALL_FIELDS = 1090
CANT_DROP_FIELD_OR_KEY = 1091
INSERT_INFO = 1092
INSERT_TABLE_USED = 1093
NO_SUCH_THREAD = 1094
KILL_DENIED_ERROR = 1095
NO_TABLES_USED = 1096
TOO_BIG_SET = 1097
NO_UNIQUE_LOGFILE = 1098
TABLE_NOT_LOCKED_FOR_WRITE = 1099
TABLE_NOT_LOCKED = 1100
BLOB_CANT_HAVE_DEFAULT = 1101
WRONG_DB_NAME = 1102
WRONG_TABLE_NAME = 1103
TOO_BIG_SELECT = 1104
UNKNOWN_ERROR = 1105
UNKNOWN_PROCEDURE = 1106
WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
WRONG_PARAMETERS_TO_PROCEDURE = 1108
UNKNOWN_TABLE = 1109
FIELD_SPECIFIED_TWICE = 1110
INVALID_GROUP_FUNC_USE = 1111
UNSUPPORTED_EXTENSION = 1112
TABLE_MUST_HAVE_COLUMNS = 1113
RECORD_FILE_FULL = 1114
UNKNOWN_CHARACTER_SET = 1115
TOO_MANY_TABLES = 1116
TOO_MANY_FIELDS = 1117
TOO_BIG_ROWSIZE = 1118
STACK_OVERRUN = 1119
WRONG_OUTER_JOIN = 1120
NULL_COLUMN_IN_INDEX = 1121
CANT_FIND_UDF = 1122
CANT_INITIALIZE_UDF = 1123
UDF_NO_PATHS = 1124
UDF_EXISTS = 1125
CANT_OPEN_LIBRARY = 1126
CANT_FIND_DL_ENTRY = 1127
FUNCTION_NOT_DEFINED = 1128
HOST_IS_BLOCKED = 1129
HOST_NOT_PRIVILEGED = 1130
PASSWORD_ANONYMOUS_USER = 1131
PASSWORD_NOT_ALLOWED = 1132
PASSWORD_NO_MATCH = 1133
UPDATE_INFO = 1134
CANT_CREATE_THREAD = 1135
WRONG_VALUE_COUNT_ON_ROW = 1136
CANT_REOPEN_TABLE = 1137
INVALID_USE_OF_NULL = 1138
REGEXP_ERROR = 1139
MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
NONEXISTING_GRANT = 1141
TABLEACCESS_DENIED_ERROR = 1142
COLUMNACCESS_DENIED_ERROR = 1143
ILLEGAL_GRANT_FOR_TABLE = 1144
GRANT_WRONG_HOST_OR_USER = 1145
NO_SUCH_TABLE = 1146
NONEXISTING_TABLE_GRANT = 1147
NOT_ALLOWED_COMMAND = 1148
SYNTAX_ERROR = 1149
DELAYED_CANT_CHANGE_LOCK = 1150
TOO_MANY_DELAYED_THREADS = 1151
ABORTING_CONNECTION = 1152
NET_PACKET_TOO_LARGE = 1153
NET_READ_ERROR_FROM_PIPE = 1154
NET_FCNTL_ERROR = 1155
NET_PACKETS_OUT_OF_ORDER = 1156
NET_UNCOMPRESS_ERROR = 1157
NET_READ_ERROR = 1158
NET_READ_INTERRUPTED = 1159
NET_ERROR_ON_WRITE = 1160
NET_WRITE_INTERRUPTED = 1161
TOO_LONG_STRING = 1162
TABLE_CANT_HANDLE_BLOB = 1163
TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
DELAYED_INSERT_TABLE_LOCKED = 1165
WRONG_COLUMN_NAME = 1166
WRONG_KEY_COLUMN = 1167
WRONG_MRG_TABLE = 1168
DUP_UNIQUE = 1169
BLOB_KEY_WITHOUT_LENGTH = 1170
PRIMARY_CANT_HAVE_NULL = 1171
TOO_MANY_ROWS = 1172
REQUIRES_PRIMARY_KEY = 1173
NO_RAID_COMPILED = 1174
UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
KEY_DOES_NOT_EXITS = 1176
CHECK_NO_SUCH_TABLE = 1177
CHECK_NOT_IMPLEMENTED = 1178
CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
ERROR_DURING_COMMIT = 1180
ERROR_DURING_ROLLBACK = 1181
ERROR_DURING_FLUSH_LOGS = 1182
ERROR_DURING_CHECKPOINT = 1183
NEW_ABORTING_CONNECTION = 1184
DUMP_NOT_IMPLEMENTED = 1185
FLUSH_MASTER_BINLOG_CLOSED = 1186
INDEX_REBUILD = 1187
MASTER = 1188
MASTER_NET_READ = 1189
MASTER_NET_WRITE = 1190
FT_MATCHING_KEY_NOT_FOUND = 1191
LOCK_OR_ACTIVE_TRANSACTION = 1192
UNKNOWN_SYSTEM_VARIABLE = 1193
CRASHED_ON_USAGE = 1194
CRASHED_ON_REPAIR = 1195
WARNING_NOT_COMPLETE_ROLLBACK = 1196
TRANS_CACHE_FULL = 1197
SLAVE_MUST_STOP = 1198
SLAVE_NOT_RUNNING = 1199
BAD_SLAVE = 1200
MASTER_INFO = 1201
SLAVE_THREAD = 1202
TOO_MANY_USER_CONNECTIONS = 1203
SET_CONSTANTS_ONLY = 1204
LOCK_WAIT_TIMEOUT = 1205
LOCK_TABLE_FULL = 1206
READ_ONLY_TRANSACTION = 1207
DROP_DB_WITH_READ_LOCK = 1208
CREATE_DB_WITH_READ_LOCK = 1209
WRONG_ARGUMENTS = 1210
NO_PERMISSION_TO_CREATE_USER = 1211
UNION_TABLES_IN_DIFFERENT_DIR = 1212
LOCK_DEADLOCK = 1213
TABLE_CANT_HANDLE_FT = 1214
CANNOT_ADD_FOREIGN = 1215
NO_REFERENCED_ROW = 1216
ROW_IS_REFERENCED = 1217
CONNECT_TO_MASTER = 1218
QUERY_ON_MASTER = 1219
ERROR_WHEN_EXECUTING_COMMAND = 1220
WRONG_USAGE = 1221
WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
CANT_UPDATE_WITH_READLOCK = 1223
MIXING_NOT_ALLOWED = 1224
DUP_ARGUMENT = 1225
USER_LIMIT_REACHED = 1226
SPECIFIC_ACCESS_DENIED_ERROR = 1227
LOCAL_VARIABLE = 1228
GLOBAL_VARIABLE = 1229
NO_DEFAULT = 1230
WRONG_VALUE_FOR_VAR = 1231
WRONG_TYPE_FOR_VAR = 1232
VAR_CANT_BE_READ = 1233
CANT_USE_OPTION_HERE = 1234
NOT_SUPPORTED_YET = 1235
MASTER_FATAL_ERROR_READING_BINLOG = 1236
SLAVE_IGNORED_TABLE = 1237
INCORRECT_GLOBAL_LOCAL_VAR = 1238
WRONG_FK_DEF = 1239
KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
OPERAND_COLUMNS = 1241
SUBQUERY_NO_1_ROW = 1242
UNKNOWN_STMT_HANDLER = 1243
CORRUPT_HELP_DB = 1244
CYCLIC_REFERENCE = 1245
AUTO_CONVERT = 1246
ILLEGAL_REFERENCE = 1247
DERIVED_MUST_HAVE_ALIAS = 1248
SELECT_REDUCED = 1249
TABLENAME_NOT_ALLOWED_HERE = 1250
NOT_SUPPORTED_AUTH_MODE = 1251
SPATIAL_CANT_HAVE_NULL = 1252
COLLATION_CHARSET_MISMATCH = 1253
SLAVE_WAS_RUNNING = 1254
SLAVE_WAS_NOT_RUNNING = 1255
TOO_BIG_FOR_UNCOMPRESS = 1256
ZLIB_Z_MEM_ERROR = 1257
ZLIB_Z_BUF_ERROR = 1258
ZLIB_Z_DATA_ERROR = 1259
CUT_VALUE_GROUP_CONCAT = 1260
WARN_TOO_FEW_RECORDS = 1261
WARN_TOO_MANY_RECORDS = 1262
WARN_NULL_TO_NOTNULL = 1263
WARN_DATA_OUT_OF_RANGE = 1264
WARN_DATA_TRUNCATED = 1265
WARN_USING_OTHER_HANDLER = 1266
CANT_AGGREGATE_2COLLATIONS = 1267
DROP_USER = 1268
REVOKE_GRANTS = 1269
CANT_AGGREGATE_3COLLATIONS = 1270
CANT_AGGREGATE_NCOLLATIONS = 1271
VARIABLE_IS_NOT_STRUCT = 1272
UNKNOWN_COLLATION = 1273
SLAVE_IGNORED_SSL_PARAMS = 1274
SERVER_IS_IN_SECURE_AUTH_MODE = 1275
WARN_FIELD_RESOLVED = 1276
BAD_SLAVE_UNTIL_COND = 1277
MISSING_SKIP_SLAVE = 1278
UNTIL_COND_IGNORED = 1279
WRONG_NAME_FOR_INDEX = 1280
WRONG_NAME_FOR_CATALOG = 1281
WARN_QC_RESIZE = 1282
BAD_FT_COLUMN = 1283
UNKNOWN_KEY_CACHE = 1284
WARN_HOSTNAME_WONT_WORK = 1285
UNKNOWN_STORAGE_ENGINE = 1286
WARN_DEPRECATED_SYNTAX = 1287
NON_UPDATABLE_TABLE = 1288
FEATURE_DISABLED = 1289
OPTION_PREVENTS_STATEMENT = 1290
DUPLICATED_VALUE_IN_TYPE = 1291
TRUNCATED_WRONG_VALUE = 1292
TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
INVALID_ON_UPDATE = 1294
UNSUPPORTED_PS = 1295
GET_ERRMSG = 1296
GET_TEMPORARY_ERRMSG = 1297
UNKNOWN_TIME_ZONE = 1298
WARN_INVALID_TIMESTAMP = 1299
INVALID_CHARACTER_STRING = 1300
WARN_ALLOWED_PACKET_OVERFLOWED = 1301
CONFLICTING_DECLARATIONS = 1302
SP_NO_RECURSIVE_CREATE = 1303
SP_ALREADY_EXISTS = 1304
SP_DOES_NOT_EXIST = 1305
SP_DROP_FAILED = 1306
SP_STORE_FAILED = 1307
SP_LILABEL_MISMATCH = 1308
SP_LABEL_REDEFINE = 1309
SP_LABEL_MISMATCH = 1310
SP_UNINIT_VAR = 1311
SP_BADSELECT = 1312
SP_BADRETURN = 1313
SP_BADSTATEMENT = 1314
UPDATE_LOG_DEPRECATED_IGNORED = 1315
UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
QUERY_INTERRUPTED = 1317
SP_WRONG_NO_OF_ARGS = 1318
SP_COND_MISMATCH = 1319
SP_NORETURN = 1320
SP_NORETURNEND = 1321
SP_BAD_CURSOR_QUERY = 1322
SP_BAD_CURSOR_SELECT = 1323
SP_CURSOR_MISMATCH = 1324
SP_CURSOR_ALREADY_OPEN = 1325
SP_CURSOR_NOT_OPEN = 1326
SP_UNDECLARED_VAR = 1327
SP_WRONG_NO_OF_FETCH_ARGS = 1328
SP_FETCH_NO_DATA = 1329
SP_DUP_PARAM = 1330
SP_DUP_VAR = 1331
SP_DUP_COND = 1332
SP_DUP_CURS = 1333
SP_CANT_ALTER = 1334
SP_SUBSELECT_NYI = 1335
STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
SP_VARCOND_AFTER_CURSHNDLR = 1337
SP_CURSOR_AFTER_HANDLER = 1338
SP_CASE_NOT_FOUND = 1339
FPARSER_TOO_BIG_FILE = 1340
FPARSER_BAD_HEADER = 1341
FPARSER_EOF_IN_COMMENT = 1342
FPARSER_ERROR_IN_PARAMETER = 1343
FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
VIEW_NO_EXPLAIN = 1345
FRM_UNKNOWN_TYPE = 1346
WRONG_OBJECT = 1347
NONUPDATEABLE_COLUMN = 1348
VIEW_SELECT_DERIVED = 1349
VIEW_SELECT_CLAUSE = 1350
VIEW_SELECT_VARIABLE = 1351
VIEW_SELECT_TMPTABLE = 1352
VIEW_WRONG_LIST = 1353
WARN_VIEW_MERGE = 1354
WARN_VIEW_WITHOUT_KEY = 1355
VIEW_INVALID = 1356
SP_NO_DROP_SP = 1357
SP_GOTO_IN_HNDLR = 1358
TRG_ALREADY_EXISTS = 1359
TRG_DOES_NOT_EXIST = 1360
TRG_ON_VIEW_OR_TEMP_TABLE = 1361
TRG_CANT_CHANGE_ROW = 1362
TRG_NO_SUCH_ROW_IN_TRG = 1363
NO_DEFAULT_FOR_FIELD = 1364
DIVISION_BY_ZERO = 1365
TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
ILLEGAL_VALUE_FOR_TYPE = 1367
VIEW_NONUPD_CHECK = 1368
VIEW_CHECK_FAILED = 1369
PROCACCESS_DENIED_ERROR = 1370
RELAY_LOG_FAIL = 1371
PASSWD_LENGTH = 1372
UNKNOWN_TARGET_BINLOG = 1373
IO_ERR_LOG_INDEX_READ = 1374
BINLOG_PURGE_PROHIBITED = 1375
FSEEK_FAIL = 1376
BINLOG_PURGE_FATAL_ERR = 1377
LOG_IN_USE = 1378
LOG_PURGE_UNKNOWN_ERR = 1379
RELAY_LOG_INIT = 1380
NO_BINARY_LOGGING = 1381
RESERVED_SYNTAX = 1382
WSAS_FAILED = 1383
DIFF_GROUPS_PROC = 1384
NO_GROUP_FOR_PROC = 1385
ORDER_WITH_PROC = 1386
LOGGING_PROHIBIT_CHANGING_OF = 1387
NO_FILE_MAPPING = 1388
WRONG_MAGIC = 1389
PS_MANY_PARAM = 1390
KEY_PART_0 = 1391
VIEW_CHECKSUM = 1392
VIEW_MULTIUPDATE = 1393
VIEW_NO_INSERT_FIELD_LIST = 1394
VIEW_DELETE_MERGE_VIEW = 1395
CANNOT_USER = 1396
XAER_NOTA = 1397
XAER_INVAL = 1398
XAER_RMFAIL = 1399
XAER_OUTSIDE = 1400
XAER_RMERR = 1401
XA_RBROLLBACK = 1402
NONEXISTING_PROC_GRANT = 1403
PROC_AUTO_GRANT_FAIL = 1404
PROC_AUTO_REVOKE_FAIL = 1405
DATA_TOO_LONG = 1406
SP_BAD_SQLSTATE = 1407
STARTUP = 1408
LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
CANT_CREATE_USER_WITH_GRANT = 1410
WRONG_VALUE_FOR_TYPE = 1411
TABLE_DEF_CHANGED = 1412
SP_DUP_HANDLER = 1413
SP_NOT_VAR_ARG = 1414
SP_NO_RETSET = 1415
CANT_CREATE_GEOMETRY_OBJECT = 1416
FAILED_ROUTINE_BREAK_BINLOG = 1417
BINLOG_UNSAFE_ROUTINE = 1418
BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
EXEC_STMT_WITH_OPEN_CURSOR = 1420
STMT_HAS_NO_OPEN_CURSOR = 1421
COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
NO_DEFAULT_FOR_VIEW_FIELD = 1423
SP_NO_RECURSION = 1424
TOO_BIG_SCALE = 1425
TOO_BIG_PRECISION = 1426
M_BIGGER_THAN_D = 1427
WRONG_LOCK_OF_SYSTEM_TABLE = 1428
CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
QUERY_ON_FOREIGN_DATA_SOURCE = 1430
FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
FOREIGN_DATA_STRING_INVALID = 1433
CANT_CREATE_FEDERATED_TABLE = 1434
TRG_IN_WRONG_SCHEMA = 1435
STACK_OVERRUN_NEED_MORE = 1436
TOO_LONG_BODY = 1437
WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
TOO_BIG_DISPLAYWIDTH = 1439
XAER_DUPID = 1440
DATETIME_FUNCTION_OVERFLOW = 1441
CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
VIEW_PREVENT_UPDATE = 1443
PS_NO_RECURSION = 1444
SP_CANT_SET_AUTOCOMMIT = 1445
MALFORMED_DEFINER = 1446
VIEW_FRM_NO_USER = 1447
VIEW_OTHER_USER = 1448
NO_SUCH_USER = 1449
FORBID_SCHEMA_CHANGE = 1450
ROW_IS_REFERENCED_2 = 1451
NO_REFERENCED_ROW_2 = 1452
SP_BAD_VAR_SHADOW = 1453
TRG_NO_DEFINER = 1454
OLD_FILE_FORMAT = 1455
SP_RECURSION_LIMIT = 1456
SP_PROC_TABLE_CORRUPT = 1457
ERROR_LAST = 1457
|
double12gzh/nova | refs/heads/master | nova/db/__init__.py | 123 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DB abstraction for Nova
"""
from nova.db.api import * # noqa
|
klenks/jobsportal | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/admin/views/decorators.py | 638 | from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def staff_member_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME,
login_url='admin:login'):
"""
Decorator for views that checks that the user is logged in and is a staff
member, redirecting to the login page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_staff,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator
|
kdwink/intellij-community | refs/heads/master | python/lib/Lib/xml/etree/ElementTree.py | 79 | #
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z fredrik $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
# 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring",
"iselement", "iterparse",
"parse",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring",
"TreeBuilder",
"VERSION", "XML",
"XMLParser", "XMLTreeBuilder",
]
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} or {@link
# #SubElement} factory functions.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import string, sys, re
class _SimpleElementPath:
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None):
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
def findall(self, element, tag):
if tag[:3] == ".//":
return element.getiterator(tag[3:])
result = []
for elem in element:
if elem.tag == tag:
result.append(elem)
return result
try:
import ElementPath
except ImportError:
# FIXME: issue warning in this case?
ElementPath = _SimpleElementPath()
# TODO: add support for custom namespace resolvers/default namespaces
# TODO: add improved support for incremental parsing
VERSION = "1.2.6"
##
# Internal element class. This class defines the Element interface,
# and provides a reference implementation of this interface.
# <p>
# You should not create instances of this class directly. Use the
# appropriate factory functions instead, such as {@link #Element}
# and {@link #SubElement}.
#
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class _ElementInterface:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #_ElementInterface.get},
# {@link #_ElementInterface.set},
# {@link #_ElementInterface.keys}, and
# {@link #_ElementInterface.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None, if there was no text.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None, if there was no text.
tail = None # text after end tag, if any
def __init__(self, tag, attrib):
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at %x>" % (self.tag, id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return Element(tag, attrib)
##
# Returns the number of subelements.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
##
# Returns the given subelement.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
# @exception AssertionError If element is not a valid object.
def __setitem__(self, index, element):
assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Returns a list containing subelements in the given range.
#
# @param start The first subelement to return.
# @param stop The first subelement that shouldn't be returned.
# @return A sequence object containing subelements.
def __getslice__(self, start, stop):
return self._children[start:stop]
##
# Replaces a number of subelements with elements from a sequence.
#
# @param start The first subelement to replace.
# @param stop The first subelement that shouldn't be replaced.
# @param elements A sequence object with zero or more elements.
# @exception AssertionError If a sequence member is not a valid object.
def __setslice__(self, start, stop, elements):
for element in elements:
assert iselement(element)
self._children[start:stop] = list(elements)
##
# Deletes a number of subelements.
#
# @param start The first subelement to delete.
# @param stop The first subelement to leave in there.
def __delslice__(self, start, stop):
del self._children[start:stop]
##
# Adds a subelement to the end of this element.
#
# @param element The element to add.
# @exception AssertionError If a sequence member is not a valid object.
def append(self, element):
assert iselement(element)
self._children.append(element)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
# @exception AssertionError If the element is not a valid object.
def insert(self, index, element):
assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
# @exception AssertionError If the element is not a valid object.
def remove(self, element):
assert iselement(element)
self._children.remove(element)
##
# Returns all subelements. The elements are returned in document
# order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
return ElementPath.find(self, path)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
return ElementPath.findtext(self, path, default)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
return ElementPath.findall(self, path)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the text and tail attributes to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, the result
# is undefined.
#
# @param tag What tags to look for (default is to return all elements).
# @return A list or iterator containing all the matching elements.
# @defreturn list or iterator
def getiterator(self, tag=None):
nodes = []
if tag == "*":
tag = None
if tag is None or self.tag == tag:
nodes.append(self)
for node in self._children:
nodes.extend(node.getiterator(tag))
return nodes
# compatibility
_Element = _ElementInterface
##
# Element factory. This function returns an object implementing the
# standard Element interface. The exact class or type of that object
# is implementation dependent, but it will always be compatible with
# the {@link #_ElementInterface} class in this module.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def Element(tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
return _ElementInterface(tag, attrib)
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return The document root element.
# @defreturn Element
def parse(self, source, parser=None):
managed_file = not hasattr(source, "read")
if managed_file:
source = open(source, "rb")
try:
if not parser:
parser = XMLTreeBuilder()
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if managed_file:
source.close()
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def getiterator(self, tag=None):
assert self._root is not None
return self._root.getiterator(tag)
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.find(path)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findtext(path, default)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findall(path)
##
# Writes the element tree to a file, as XML.
#
# @param file A file name, or a file object opened for writing.
# @param encoding Optional output encoding (default is US-ASCII).
def write(self, file, encoding="us-ascii"):
assert self._root is not None
managed_file = not hasattr(file, "write")
if managed_file:
file = open(file, "wb")
try:
if not encoding:
encoding = "us-ascii"
elif encoding != "utf-8" and encoding != "us-ascii":
file.write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
self._write(file, self._root, encoding, {})
finally:
if managed_file:
file.close()
def _write(self, file, node, encoding, namespaces):
# write XML to file
tag = node.tag
if tag is Comment:
file.write("<!-- %s -->" % _escape_cdata(node.text, encoding))
elif tag is ProcessingInstruction:
file.write("<?%s?>" % _escape_cdata(node.text, encoding))
else:
items = node.items()
xmlns_items = [] # new namespaces in this scope
try:
if isinstance(tag, QName) or tag[:1] == "{":
tag, xmlns = fixtag(tag, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(tag)
file.write("<" + _encode(tag, encoding))
if items or xmlns_items:
items.sort() # lexical order
for k, v in items:
try:
if isinstance(k, QName) or k[:1] == "{":
k, xmlns = fixtag(k, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(k)
try:
if isinstance(v, QName):
v, xmlns = fixtag(v, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(v)
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
for k, v in xmlns_items:
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
if node.text or len(node):
file.write(">")
if node.text:
file.write(_escape_cdata(node.text, encoding))
for n in node:
self._write(file, n, encoding, namespaces)
file.write("</" + _encode(tag, encoding) + ">")
else:
file.write(" />")
for k, v in xmlns_items:
del namespaces[v]
if node.tail:
file.write(_escape_cdata(node.tail, encoding))
# --------------------------------------------------------------------
# helpers
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, _ElementInterface) or hasattr(element, "tag")
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
def _encode(s, encoding):
try:
return s.encode(encoding)
except AttributeError:
return s # 1.5.2: assume the string uses the right encoding
if sys.version[:3] == "1.5":
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
_escape_map = {
"&": "&",
"<": "<",
">": ">",
'"': """,
}
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m, map=_escape_map):
out = []
append = out.append
for char in m.group():
text = map.get(char)
if text is None:
text = "&#%d;" % ord(char)
append(text)
return string.join(out, "")
try:
return _encode(pattern.sub(escape_entities, text), "ascii")
except TypeError:
_raise_serialization_error(text)
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def _escape_cdata(text, encoding=None, replace=string.replace):
# escape character data
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding=None, replace=string.replace):
# escape attribute value
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "'", "'") # FIXME: overkill
text = replace(text, "\"", """)
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def fixtag(tag, namespaces):
# given a decorated tag (of the form {uri}tag), return prefixed
# tag and namespace declaration, if any
if isinstance(tag, QName):
tag = tag.text
namespace_uri, tag = string.split(tag[1:], "}", 1)
prefix = namespaces.get(namespace_uri)
if prefix is None:
prefix = _namespace_map.get(namespace_uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
namespaces[namespace_uri] = prefix
if prefix == "xml":
xmlns = None
else:
xmlns = ("xmlns:%s" % prefix, namespace_uri)
else:
xmlns = None
return "%s:%s" % (prefix, tag), xmlns
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @return A (event, elem) iterator.
class iterparse:
def __init__(self, source, events=None):
self._managed_file = not hasattr(source, "read")
if self._managed_file:
source = open(source, "rb")
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = XMLTreeBuilder()
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = _encode(uri, "ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri)))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
try:
raise StopIteration
except NameError:
raise IndexError
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
if self._managed_file:
self._file.close()
else:
self._index = self._index + 1
return item
try:
iter
def __iter__(self):
return self
except NameError:
def __getitem__(self, index):
return self.next()
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
def XML(text):
parser = XMLTreeBuilder()
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text):
parser = XMLTreeBuilder()
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.getiterator():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding)
return string.join(data, "")
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = _ElementInterface
self._factory = element_factory
##
# Flushes the parser buffers, and returns the toplevel documen
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last != None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = string.join(self._data, "")
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @see #ElementTree
# @see #TreeBuilder
class XMLTreeBuilder:
def __init__(self, html=0, target=None):
try:
from xml.parsers import expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
self._parser = parser = expat.ParserCreate(None, "}")
if target is None:
target = TreeBuilder()
self._target = target
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
encoding = None
if not parser.returns_unicode:
encoding = "utf-8"
# target.xml(encoding, None)
self._doctype = None
self.entity = {}
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return _encode(text, "ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = self._fixtext(value)
return self._target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
return self._target.start(tag, attrib)
def _data(self, text):
return self._target.data(self._fixtext(text))
def _end(self, tag):
return self._target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self._target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
raise expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = string.strip(text)
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
pass
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
self._parser.Parse(data, 0)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
self._parser.Parse("", 1) # end of data
tree = self._target.close()
del self._target, self._parser # get rid of circular references
return tree
# compatibility
XMLParser = XMLTreeBuilder
|
markovianlabs/pychain | refs/heads/master | setup.py | 1 | from distutils.core import setup
setup(
name = 'pychain',
packages = ['pychain'], # this must be the same as the name above
version = '0.0.1',
description = 'Markov Chain Monte Carlo (MCMC) method using Metropolis-Hastings algorithm.',
author = 'Irshad Mohammed, Janu Verma',
author_email = 'creativeishu@gmail.com, j.verma5@gmail.com',
url = 'https://github.com/creativeishu/pychain.git', # use the URL to the github repo
download_url = '', # I'll explain this in a second
keywords = ["MCMC", "Metropolis-Hastings","Sampling","Markov Chain","Monte-Carlo"], # arbitrary keywords
classifiers = [],
) |
ocadotechnology/django-tastypie | refs/heads/master | tests/validation/tests.py | 9 | import json
from basic.models import Note
from testcases import TestCaseWithFixture
class FilteringErrorsTestCase(TestCaseWithFixture):
urls = 'validation.api.urls'
def test_valid_date(self):
resp = self.client.get('/api/v1/notes/', data={
'format': 'json',
'created__gte': '2010-03-31'
})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized['objects']), Note.objects.filter(created__gte='2010-03-31').count())
def test_invalid_date(self):
resp = self.client.get('/api/v1/notes/', data={
'format': 'json',
'created__gte': 'foo-baz-bar'
})
self.assertEqual(resp.status_code, 400)
class PostNestResouceValidationTestCase(TestCaseWithFixture):
urls = 'validation.api.urls'
def test_valid_data(self):
data = json.dumps({
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'user': {'pk': 1}, # loaded from fixtures
'annotated': {'annotations': 'This is an annotations'},
})
resp = self.client.post('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 201)
note = json.loads(self.client.get(resp['location']).content.decode('utf-8'))
self.assertTrue(note['annotated'])
def test_invalid_data(self):
data = json.dumps({
'title': '',
'slug': 'test-title',
'content': 'This is the content',
'user': {'pk': 1}, # loaded from fixtures
'annotated': {'annotations': ''},
})
resp = self.client.post('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')), {
'notes': {
'title': ['This field is required.']
},
'annotated': {
'annotations': ['This field is required.']
}
})
class PutDetailNestResouceValidationTestCase(TestCaseWithFixture):
urls = 'validation.api.urls'
def test_valid_data(self):
data = json.dumps({
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'annotated': {'annotations': 'This is another annotations'},
})
resp = self.client.put('/api/v1/notes/1/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 204)
note = json.loads(self.client.get('/api/v1/notes/1/', content_type='application/json').content.decode('utf-8'))
self.assertTrue(note['annotated'])
self.assertEqual('test-title', note['slug'])
def test_invalid_data(self):
data = json.dumps({
'title': '',
'slug': '',
'content': 'This is the content',
'annotated': {'annotations': None},
})
resp = self.client.put('/api/v1/notes/1/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')), {
'notes': {
'slug': ['This field is required.'],
'title': ['This field is required.']
},
'annotated': {
'annotations': ['This field is required.']
}
})
class PutListNestResouceValidationTestCase(TestCaseWithFixture):
urls = 'validation.api.urls'
def test_valid_data(self):
data = json.dumps({'objects': [
{
'id': 1,
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'annotated': {'annotations': 'This is another annotations'},
'user': {'id': 1}
},
{
'id': 2,
'title': 'Test Title',
'slug': 'test-title',
'content': 'This is the content',
'annotated': {'annotations': 'This is the third annotations'},
'user': {'id': 1}
}
]})
resp = self.client.put('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 204)
note = json.loads(self.client.get('/api/v1/notes/1/', content_type='application/json').content.decode('utf-8'))
self.assertTrue(note['annotated'])
note = json.loads(self.client.get('/api/v1/notes/2/', content_type='application/json').content.decode('utf-8'))
self.assertTrue(note['annotated'])
def test_invalid_data(self):
data = json.dumps({'objects': [
{
'id': 1,
'title': 'Test Title',
'slug': 'test-title',
'annotated': {'annotations': None},
'user': {'id': 1}
},
{
'id': 2,
'title': 'Test Title',
'annotated': {'annotations': None},
'user': {'id': 1}
}
]})
resp = self.client.put('/api/v1/notes/', data=data, content_type='application/json')
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content.decode('utf-8')), {
'notes': {
'content': ['This field is required.']
},
'annotated': {
'annotations': ['This field is required.']
}
})
|
waytai/xhtml2pdf | refs/heads/master | xhtml2pdf/context.py | 28 | # -*- coding: utf-8 -*-
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.fonts import addMapping
from reportlab.lib.pagesizes import landscape, A4
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.frames import Frame, ShowBoundaryValue
from reportlab.platypus.paraparser import ParaFrag, ps2tt, tt2ps
from xhtml2pdf.util import getSize, getCoords, getFile, pisaFileObject, \
getFrameDimensions, getColor
from xhtml2pdf.w3c import css
from xhtml2pdf.xhtml2pdf_reportlab import PmlPageTemplate, PmlTableOfContents, \
PmlParagraph, PmlParagraphAndImage, PmlPageCount
import copy
import logging
import os
import re
import reportlab
import types
import urlparse
import xhtml2pdf.default
import xhtml2pdf.parser
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
reportlab.rl_config.warnOnMissingFontGlyphs = 0
log = logging.getLogger("xhtml2pdf")
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.4 # fraction of font size that a sub script should be lowered
superFraction = 0.4
NBSP = u"\u00a0"
def clone(self, **kwargs):
n = ParaFrag(**self.__dict__)
if kwargs:
d = n.__dict__
d.update(kwargs)
# This else could cause trouble in Paragraphs with images etc.
if "cbDefn" in d:
del d["cbDefn"]
n.bulletText = None
return n
ParaFrag.clone = clone
def getParaFrag(style):
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0 # XXX Need to be able to set color to fit CSS tests
frag.strike = 0
frag.greek = 0
frag.link = None
frag.text = ""
frag.fontName = "Times-Roman"
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
# Extras
frag.leading = 0
frag.letterSpacing = "normal"
frag.leadingSource = "150%"
frag.leadingSpace = 0
frag.backColor = None
frag.spaceBefore = 0
frag.spaceAfter = 0
frag.leftIndent = 0
frag.rightIndent = 0
frag.firstLineIndent = 0
frag.keepWithNext = False
frag.alignment = TA_LEFT
frag.vAlign = None
frag.borderWidth = 1
frag.borderStyle = None
frag.borderPadding = 0
frag.borderColor = None
frag.borderLeftWidth = frag.borderWidth
frag.borderLeftColor = frag.borderColor
frag.borderLeftStyle = frag.borderStyle
frag.borderRightWidth = frag.borderWidth
frag.borderRightColor = frag.borderColor
frag.borderRightStyle = frag.borderStyle
frag.borderTopWidth = frag.borderWidth
frag.borderTopColor = frag.borderColor
frag.borderTopStyle = frag.borderStyle
frag.borderBottomWidth = frag.borderWidth
frag.borderBottomColor = frag.borderColor
frag.borderBottomStyle = frag.borderStyle
frag.paddingLeft = 0
frag.paddingRight = 0
frag.paddingTop = 0
frag.paddingBottom = 0
frag.listStyleType = None
frag.listStyleImage = None
frag.whiteSpace = "normal"
frag.wordWrap = None
frag.pageNumber = False
frag.pageCount = False
frag.height = None
frag.width = None
frag.bulletIndent = 0
frag.bulletText = None
frag.bulletFontName = "Helvetica"
frag.zoom = 1.0
frag.outline = False
frag.outlineLevel = 0
frag.outlineOpen = False
frag.insideStaticFrame = 0
return frag
def getDirName(path):
parts = urlparse.urlparse(path)
if parts.scheme:
return path
else:
return os.path.dirname(os.path.abspath(path))
class pisaCSSBuilder(css.CSSBuilder):
def atFontFace(self, declarations):
"""
Embed fonts
"""
result = self.ruleset([self.selector('*')], declarations)
data = result[0].values()[0]
if "src" not in data:
# invalid - source is required, ignore this specification
return {}, {}
names = data["font-family"]
# Font weight
fweight = str(data.get("font-weight", "normal")).lower()
bold = fweight in ("bold", "bolder", "500", "600", "700", "800", "900")
if not bold and fweight <> "normal":
log.warn(self.c.warning("@fontface, unknown value font-weight '%s'", fweight))
# Font style
italic = str(data.get("font-style", "")).lower() in ("italic", "oblique")
src = self.c.getFile(data["src"], relative=self.c.cssParser.rootPath)
self.c.loadFont(
names,
src,
bold=bold,
italic=italic)
return {}, {}
def _pisaAddFrame(self, name, data, first=False, border=None, size=(0, 0)):
c = self.c
if not name:
name = "-pdf-frame-%d" % c.UID()
if data.get('is_landscape', False):
size = (size[1], size[0])
x, y, w, h = getFrameDimensions(data, size[0], size[1])
# print name, x, y, w, h
#if not (w and h):
# return None
if first:
return name, None, data.get("-pdf-frame-border", border), x, y, w, h, data
return (name, data.get("-pdf-frame-content", None),
data.get("-pdf-frame-border", border), x, y, w, h, data)
def _getFromData(self, data, attr, default=None, func=None):
if not func:
func = lambda x: x
if type(attr) in (list, tuple):
for a in attr:
if a in data:
return func(data[a])
return default
else:
if attr in data:
return func(data[attr])
return default
def atPage(self, name, pseudopage, declarations):
c = self.c
data = {}
name = name or "body"
pageBorder = None
if declarations:
result = self.ruleset([self.selector('*')], declarations)
if declarations:
data = result[0].values()[0]
pageBorder = data.get("-pdf-frame-border", None)
if name in c.templateList:
log.warn(self.c.warning("template '%s' has already been defined", name))
if "-pdf-page-size" in data:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES.get(str(data["-pdf-page-size"]).lower(), c.pageSize)
isLandscape = False
if "size" in data:
size = data["size"]
if type(size) is not types.ListType:
size = [size]
sizeList = []
for value in size:
valueStr = str(value).lower()
if type(value) is types.TupleType:
sizeList.append(getSize(value))
elif valueStr == "landscape":
isLandscape = True
elif valueStr in xhtml2pdf.default.PML_PAGESIZES:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES[valueStr]
else:
log.warn(c.warning("Unknown size value for @page"))
if len(sizeList) == 2:
c.pageSize = tuple(sizeList)
if isLandscape:
c.pageSize = landscape(c.pageSize)
padding_top = self._getFromData(data, 'padding-top', 0, getSize)
padding_left = self._getFromData(data, 'padding-left', 0, getSize)
padding_right = self._getFromData(data, 'padding-right', 0, getSize)
padding_bottom = self._getFromData(data, 'padding-bottom', 0, getSize)
border_color = self._getFromData(data, ('border-top-color', 'border-bottom-color',\
'border-left-color', 'border-right-color'), None, getColor)
border_width = self._getFromData(data, ('border-top-width', 'border-bottom-width',\
'border-left-width', 'border-right-width'), 0, getSize)
for prop in ("margin-top", "margin-left", "margin-right", "margin-bottom",
"top", "left", "right", "bottom", "width", "height"):
if prop in data:
c.frameList.append(self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize))
break
# Frames have to be calculated after we know the pagesize
frameList = []
staticList = []
for fname, static, border, x, y, w, h, fdata in c.frameList:
fpadding_top = self._getFromData(fdata, 'padding-top', padding_top, getSize)
fpadding_left = self._getFromData(fdata, 'padding-left', padding_left, getSize)
fpadding_right = self._getFromData(fdata, 'padding-right', padding_right, getSize)
fpadding_bottom = self._getFromData(fdata, 'padding-bottom', padding_bottom, getSize)
fborder_color = self._getFromData(fdata, ('border-top-color', 'border-bottom-color',\
'border-left-color', 'border-right-color'), border_color, getColor)
fborder_width = self._getFromData(fdata, ('border-top-width', 'border-bottom-width',\
'border-left-width', 'border-right-width'), border_width, getSize)
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(color=fborder_color, width=fborder_width)
#fix frame sizing problem.
if static:
x, y, w, h = getFrameDimensions(fdata, c.pageSize[0], c.pageSize[1])
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(self.c.warning("Negative width or height of frame. Check @frame definitions."))
frame = Frame(
x, y, w, h,
id=fname,
leftPadding=fpadding_left,
rightPadding=fpadding_right,
bottomPadding=fpadding_bottom,
topPadding=fpadding_top,
showBoundary=frame_border)
if static:
frame.pisaStaticStory = []
c.frameStatic[static] = [frame] + c.frameStatic.get(static, [])
staticList.append(frame)
else:
frameList.append(frame)
background = data.get("background-image", None)
if background:
#should be relative to the css file
background = self.c.getFile(background, relative=self.c.cssParser.rootPath)
if not frameList:
log.warn(c.warning("missing explicit frame definition for content or just static frames"))
fname, static, border, x, y, w, h, data = self._pisaAddFrame(name, data, first=True, border=pageBorder,
size=c.pageSize)
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(c.warning("Negative width or height of frame. Check @page definitions."))
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(color=border_color, width=border_width)
frameList.append(Frame(
x, y, w, h,
id=fname,
leftPadding=padding_left,
rightPadding=padding_right,
bottomPadding=padding_bottom,
topPadding=padding_top,
showBoundary=frame_border))
pt = PmlPageTemplate(
id=name,
frames=frameList,
pagesize=c.pageSize,
)
pt.pisaStaticList = staticList
pt.pisaBackground = background
pt.pisaBackgroundList = c.pisaBackgroundList
if isLandscape:
pt.pageorientation = pt.LANDSCAPE
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
return {}, {}
def atFrame(self, name, declarations):
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@BOX", name, declarations, result
data = result[0]
if data:
data = data.values()[0]
self.c.frameList.append(
self._pisaAddFrame(name, data, size=self.c.pageSize))
return {}, {} # TODO: It always returns empty dicts?
class pisaCSSParser(css.CSSParser):
def parseExternal(self, cssResourceName):
oldRootPath = self.rootPath
cssFile = self.c.getFile(cssResourceName, relative=self.rootPath)
if not cssFile:
return None
if self.rootPath and urlparse.urlparse(self.rootPath).scheme:
self.rootPath = urlparse.urljoin(self.rootPath, cssResourceName)
else:
self.rootPath = getDirName(cssFile.uri)
result = self.parse(cssFile.getData())
self.rootPath = oldRootPath
return result
class pisaContext(object):
"""
Helper class for creation of reportlab story and container for
various data.
"""
def __init__(self, path, debug=0, capacity=-1):
self.fontList = copy.copy(xhtml2pdf.default.DEFAULT_FONT)
self.path = []
self.capacity = capacity
self.node = None
self.toc = PmlTableOfContents()
self.story = []
self.indexing_story = None
self.text = []
self.log = []
self.err = 0
self.warn = 0
self.text = u""
self.uidctr = 0
self.multiBuild = False
self.pageSize = A4
self.template = None
self.templateList = {}
self.frameList = []
self.frameStatic = {}
self.frameStaticList = []
self.pisaBackgroundList = []
self.keepInFrameIndex = None
self.baseFontSize = getSize("12pt")
self.anchorFrag = []
self.anchorName = []
self.tableData = None
self.frag = self.fragBlock = getParaFrag(ParagraphStyle('default%d' % self.UID()))
self.fragList = []
self.fragAnchor = []
self.fragStack = []
self.fragStrip = True
self.listCounter = 0
self.cssText = ""
self.cssDefaultText = ""
self.image = None
self.imageData = {}
self.force = False
self.pathCallback = None # External callback function for path calculations
# Store path to document
self.pathDocument = path or "__dummy__"
parts = urlparse.urlparse(self.pathDocument)
if not parts.scheme:
self.pathDocument = os.path.abspath(self.pathDocument)
self.pathDirectory = getDirName(self.pathDocument)
self.meta = dict(
author="",
title="",
subject="",
keywords="",
pagesize=A4,
)
def UID(self):
self.uidctr += 1
return self.uidctr
# METHODS FOR CSS
def addCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssText += value.strip() + "\n"
# METHODS FOR CSS
def addDefaultCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssDefaultText += value.strip() + "\n"
def parseCSS(self):
# This self-reference really should be refactored. But for now
# we'll settle for using weak references. This avoids memory
# leaks because the garbage collector (at least on cPython
# 2.7.3) isn't aggressive enough.
import weakref
self.cssBuilder = pisaCSSBuilder(mediumSet=["all", "print", "pdf"])
#self.cssBuilder.c = self
self.cssBuilder._c = weakref.ref(self)
pisaCSSBuilder.c = property(lambda self: self._c())
self.cssParser = pisaCSSParser(self.cssBuilder)
self.cssParser.rootPath = self.pathDirectory
#self.cssParser.c = self
self.cssParser._c = weakref.ref(self)
pisaCSSParser.c = property(lambda self: self._c())
self.css = self.cssParser.parse(self.cssText)
self.cssDefault = self.cssParser.parse(self.cssDefaultText)
self.cssCascade = css.CSSCascadeStrategy(userAgent=self.cssDefault, user=self.css)
self.cssCascade.parser = self.cssParser
# METHODS FOR STORY
def addStory(self, data):
self.story.append(data)
def swapStory(self, story=[]):
self.story, story = copy.copy(story), copy.copy(self.story)
return story
def toParagraphStyle(self, first):
style = ParagraphStyle('default%d' % self.UID(), keepWithNext=first.keepWithNext)
style.fontName = first.fontName
style.fontSize = first.fontSize
style.letterSpacing = first.letterSpacing
style.leading = max(first.leading + first.leadingSpace, first.fontSize * 1.25)
style.backColor = first.backColor
style.spaceBefore = first.spaceBefore
style.spaceAfter = first.spaceAfter
style.leftIndent = first.leftIndent
style.rightIndent = first.rightIndent
style.firstLineIndent = first.firstLineIndent
style.textColor = first.textColor
style.alignment = first.alignment
style.bulletFontName = first.bulletFontName or first.fontName
style.bulletFontSize = first.fontSize
style.bulletIndent = first.bulletIndent
style.wordWrap = first.wordWrap
# Border handling for Paragraph
# Transfer the styles for each side of the border, *not* the whole
# border values that reportlab supports. We'll draw them ourselves in
# PmlParagraph.
style.borderTopStyle = first.borderTopStyle
style.borderTopWidth = first.borderTopWidth
style.borderTopColor = first.borderTopColor
style.borderBottomStyle = first.borderBottomStyle
style.borderBottomWidth = first.borderBottomWidth
style.borderBottomColor = first.borderBottomColor
style.borderLeftStyle = first.borderLeftStyle
style.borderLeftWidth = first.borderLeftWidth
style.borderLeftColor = first.borderLeftColor
style.borderRightStyle = first.borderRightStyle
style.borderRightWidth = first.borderRightWidth
style.borderRightColor = first.borderRightColor
# If no border color is given, the text color is used (XXX Tables!)
if (style.borderTopColor is None) and style.borderTopWidth:
style.borderTopColor = first.textColor
if (style.borderBottomColor is None) and style.borderBottomWidth:
style.borderBottomColor = first.textColor
if (style.borderLeftColor is None) and style.borderLeftWidth:
style.borderLeftColor = first.textColor
if (style.borderRightColor is None) and style.borderRightWidth:
style.borderRightColor = first.textColor
style.borderPadding = first.borderPadding
style.paddingTop = first.paddingTop
style.paddingBottom = first.paddingBottom
style.paddingLeft = first.paddingLeft
style.paddingRight = first.paddingRight
style.fontName = tt2ps(first.fontName, first.bold, first.italic)
return style
def addTOC(self):
styles = []
for i in xrange(20):
self.node.attributes["class"] = "pdftoclevel%d" % i
self.cssAttr = xhtml2pdf.parser.CSSCollect(self.node, self)
xhtml2pdf.parser.CSS2Frag(self, {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}, True)
pstyle = self.toParagraphStyle(self.frag)
styles.append(pstyle)
self.toc.levelStyles = styles
self.addStory(self.toc)
self.indexing_story = None
def addPageCount(self):
if not self.multiBuild:
self.indexing_story = PmlPageCount()
self.multiBuild = True
def dumpPara(self, frags, style):
return
def addPara(self, force=False):
force = (force or self.force)
self.force = False
# Cleanup the trail
try:
rfragList = reversed(self.fragList)
except:
# For Python 2.3 compatibility
rfragList = copy.copy(self.fragList)
rfragList.reverse()
# Find maximum lead
maxLeading = 0
#fontSize = 0
for frag in self.fragList:
leading = getSize(frag.leadingSource, frag.fontSize) + frag.leadingSpace
maxLeading = max(leading, frag.fontSize + frag.leadingSpace, maxLeading)
frag.leading = leading
if force or (self.text.strip() and self.fragList):
# Update paragraph style by style of first fragment
first = self.fragBlock
style = self.toParagraphStyle(first)
# style.leading = first.leading + first.leadingSpace
if first.leadingSpace:
style.leading = maxLeading
else:
style.leading = getSize(first.leadingSource, first.fontSize) + first.leadingSpace
bulletText = copy.copy(first.bulletText)
first.bulletText = None
# Add paragraph to story
if force or len(self.fragAnchor + self.fragList) > 0:
# We need this empty fragment to work around problems in
# Reportlab paragraphs regarding backGround etc.
if self.fragList:
self.fragList.append(self.fragList[- 1].clone(text=''))
else:
blank = self.frag.clone()
blank.fontName = "Helvetica"
blank.text = ''
self.fragList.append(blank)
self.dumpPara(self.fragAnchor + self.fragList, style)
para = PmlParagraph(
self.text,
style,
frags=self.fragAnchor + self.fragList,
bulletText=bulletText)
para.outline = first.outline
para.outlineLevel = first.outlineLevel
para.outlineOpen = first.outlineOpen
para.keepWithNext = first.keepWithNext
para.autoLeading = "max"
if self.image:
para = PmlParagraphAndImage(
para,
self.image,
side=self.imageData.get("align", "left"))
self.addStory(para)
self.fragAnchor = []
first.bulletText = None
# Reset data
self.image = None
self.imageData = {}
self.clearFrag()
# METHODS FOR FRAG
def clearFrag(self):
self.fragList = []
self.fragStrip = True
self.text = u""
def copyFrag(self, **kw):
return self.frag.clone(**kw)
def newFrag(self, **kw):
self.frag = self.frag.clone(**kw)
return self.frag
def _appendFrag(self, frag):
if frag.link and frag.link.startswith("#"):
self.anchorFrag.append((frag, frag.link[1:]))
self.fragList.append(frag)
# XXX Argument frag is useless!
def addFrag(self, text="", frag=None):
frag = baseFrag = self.frag.clone()
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
# XXX Has to be replaced by CSS styles like vertical-align and font-size
if frag.sub:
frag.rise = - frag.fontSize * subFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
elif frag.super:
frag.rise = frag.fontSize * superFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
# bold, italic, and underline
frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic)
# Replace ­ with empty and normalize NBSP
text = (text
.replace(u"\xad", u"")
.replace(u"\xc2\xa0", NBSP)
.replace(u"\xa0", NBSP))
if frag.whiteSpace == "pre":
# Handle by lines
for text in re.split(r'(\r\n|\n|\r)', text):
# This is an exceptionally expensive piece of code
self.text += text
if ("\n" in text) or ("\r" in text):
# If EOL insert a linebreak
frag = baseFrag.clone()
frag.text = ""
frag.lineBreak = 1
self._appendFrag(frag)
else:
# Handle tabs in a simple way
text = text.replace(u"\t", 8 * u" ")
# Somehow for Reportlab NBSP have to be inserted
# as single character fragments
for text in re.split(r'(\ )', text):
frag = baseFrag.clone()
if text == " ":
text = NBSP
frag.text = text
self._appendFrag(frag)
else:
for text in re.split(u'(' + NBSP + u')', text):
frag = baseFrag.clone()
if text == NBSP:
self.force = True
frag.text = NBSP
self.text += text
self._appendFrag(frag)
else:
frag.text = " ".join(("x" + text + "x").split())[1: - 1]
if self.fragStrip:
frag.text = frag.text.lstrip()
if frag.text:
self.fragStrip = False
self.text += frag.text
self._appendFrag(frag)
def pushFrag(self):
self.fragStack.append(self.frag)
self.newFrag()
def pullFrag(self):
self.frag = self.fragStack.pop()
# XXX
def _getFragment(self, l=20):
try:
return repr(" ".join(self.node.toxml().split()[:l]))
except:
return ""
def _getLineNumber(self):
return 0
def context(self, msg):
return "%s\n%s" % (
str(msg),
self._getFragment(50))
def warning(self, msg, *args):
self.warn += 1
self.log.append((xhtml2pdf.default.PML_WARNING, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
def error(self, msg, *args):
self.err += 1
self.log.append((xhtml2pdf.default.PML_ERROR, self._getLineNumber(), str(msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
# UTILS
def _getFileDeprecated(self, name, relative):
try:
path = relative or self.pathDirectory
if name.startswith("data:"):
return name
if self.pathCallback is not None:
nv = self.pathCallback(name, relative)
else:
if path is None:
log.warn("Could not find main directory for getting filename. Use CWD")
path = os.getcwd()
nv = os.path.normpath(os.path.join(path, name))
if not (nv and os.path.isfile(nv)):
nv = None
if nv is None:
log.warn(self.warning("File '%s' does not exist", name))
return nv
except:
log.warn(self.warning("getFile %r %r %r", name, relative, path), exc_info=1)
def getFile(self, name, relative=None):
"""
Returns a file name or None
"""
if self.pathCallback is not None:
return getFile(self._getFileDeprecated(name, relative))
return getFile(name, relative or self.pathDirectory)
def getFontName(self, names, default="helvetica"):
"""
Name of a font
"""
# print names, self.fontList
if type(names) is not types.ListType:
if type(names) not in types.StringTypes:
names = str(names)
names = names.strip().split(",")
for name in names:
if type(name) not in types.StringTypes:
name = str(name)
font = self.fontList.get(name.strip().lower(), None)
if font is not None:
return font
return self.fontList.get(default, None)
def registerFont(self, fontname, alias=[]):
self.fontList[str(fontname).lower()] = str(fontname)
for a in alias:
if type(fontname) not in types.StringTypes:
fontname = str(fontname)
self.fontList[str(a)] = fontname
def loadFont(self, names, src, encoding="WinAnsiEncoding", bold=0, italic=0):
# XXX Just works for local filenames!
if names and src:
file = src
src = file.uri
log.debug("Load font %r", src)
if type(names) is types.ListType:
fontAlias = names
else:
fontAlias = (x.lower().strip() for x in names.split(",") if x)
# XXX Problems with unicode here
fontAlias = [str(x) for x in fontAlias]
fontName = fontAlias[0]
parts = src.split(".")
baseName, suffix = ".".join(parts[: - 1]), parts[- 1]
suffix = suffix.lower()
if suffix in ["ttc", "ttf"]:
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed ", fullFontName))
else:
# Register TTF font and special name
filename = file.getNamedFile()
pdfmetrics.registerFont(TTFont(fullFontName, filename))
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fullFontName)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName])
elif suffix in ("afm", "pfb"):
if suffix == "afm":
afm = file.getNamedFile()
tfile = pisaFileObject(baseName + ".pfb")
pfb = tfile.getNamedFile()
else:
pfb = file.getNamedFile()
tfile = pisaFileObject(baseName + ".afm")
afm = tfile.getNamedFile()
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(self.warning("Repeated font embed for %s, skip new embed", fontName))
else:
# Include font
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
fontNameOriginal = face.name
pdfmetrics.registerTypeFace(face)
# print fontName, fontNameOriginal, fullFontName
justFont = pdfmetrics.Font(fullFontName, fontNameOriginal, encoding)
pdfmetrics.registerFont(justFont)
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(fontName, bold, italic, fontNameOriginal)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName, fontNameOriginal])
else:
log.warning(self.warning("wrong attributes for <pdf:font>"))
|
alobbs/ansible | refs/heads/devel | lib/ansible/module_utils/rax.py | 280 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from uuid import UUID
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception, e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='str', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(choices=BOOLEANS, type='bool'),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception, e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
|
icereval/osf.io | refs/heads/develop | addons/wiki/tests/conftest.py | 447 | from osf_tests.conftest import * # noqa
|
lekum/ansible | refs/heads/devel | v1/ansible/runner/lookup_plugins/inventory_hostnames.py | 173 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Steven Dossett <sdossett@panath.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
import ansible.inventory as inventory
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if 'runner' in kwargs:
self.host_list = kwargs['runner'].inventory.host_list
else:
raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list):
raise errors.AnsibleError("with_inventory_hostnames expects a list")
return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
|
virlos/virl-salt | refs/heads/master | _states/consul_check.py | 4 | # -*- coding: utf-8 -*-
'''
Management of consul checks
==========================
:maintainer: Aaron Bell <aarontbellgmail.com>
:maturity: new
:depends: - python-consul (http://python-consul.readthedocs.org/en/latest/)
:platform: Linux
.. versionadded:: 2014.7.0
:depends: - consul Python module
:configuration: See :py:mod:`salt.modules.consul` for setup instructions.
.. code-block:: yaml
check_in_consul:
consul_check.present:
- name: foo
- script: nc -z localhost 6969
- interval: 10s
check_not_in_consul:
consul_check.absent:
- name: foo
'''
__virtualname__ = 'consul_check'
def __virtual__():
'''
Only load if the consul module is in __salt__
'''
if 'consul.key_put' in __salt__:
return __virtualname__
return False
def present(name, check_id=None, script=None, interval=None, ttl=None, notes=None):
'''
Ensure the named check is present in Consul
name
consul check to manage
check_id
alternative check id
script + interval
path to script for health checks, paired with invocation interval
ttl
ttl for status
notes
not used internally by consul, meant to be human-readable
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Check "%s" updated' % (name)}
if not __salt__['consul.check_get'](name):
__salt__['consul.check_register'](name, check_id, script, interval, ttl, notes)
ret['changes'][name] = 'Check created'
ret['comment'] = 'Check "%s" created' % (name)
else:
__salt__['consul.check_register'](name, check_id, script, interval, ttl, notes)
return ret
def absent(name):
'''
Ensure the named check is absent in Consul
name
consul check to manage
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Check "%s" removed' % (name)}
if not __salt__['consul.check_get'](name):
ret['comment'] = 'Check "%s" already absent' % (name)
else:
__salt__['consul.check_deregister'](name)
return ret
def ttl_set(name, status, notes=None):
'''
Update a ttl-based service check to either passing, warning, or failing
name
consul service to manage
status
passing, warning, or failing
notes
optional notes for operators
'''
type = 'check'
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Check set to %s' % (status)}
statuses = ['passing', 'warning', 'failing']
if not __salt__['consul.service_get'](name):
ret['comment'] = 'Check does not exist' % (name)
if status not in statuses:
ret['result'] = False
ret['comment'] = 'Check must be one of: %s' % (" ".join(s))
else:
__salt__['consul.ttl_' + status[:-3] ](name, type, notes)
return ret
|
GdZ/scriptfile | refs/heads/master | software/googleAppEngine/google/appengine/_internal/django/utils/translation/trans_null.py | 23 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
import warnings
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.utils.encoding import force_unicode
from google.appengine._internal.django.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1: return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_unicode(ngettext(singular, plural, number))
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
# date formats shouldn't be used using gettext anymore. This
# is kept for backward compatibility
TECHNICAL_ID_MAP = {
"DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
"DATE_FORMAT": settings.DATE_FORMAT,
"DATETIME_FORMAT": settings.DATETIME_FORMAT,
"TIME_FORMAT": settings.TIME_FORMAT,
"YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
"MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
}
def gettext(message):
result = TECHNICAL_ID_MAP.get(message, message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def ugettext(message):
return force_unicode(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request):
return settings.LANGUAGE_CODE
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# but are kept for backward compatibility.
def get_date_formats():
warnings.warn(
'`django.utils.translation.get_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
PendingDeprecationWarning
)
return settings.DATE_FORMAT, settings.DATETIME_FORMAT, settings.TIME_FORMAT
def get_partial_date_formats():
warnings.warn(
'`django.utils.translation.get_partial_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
PendingDeprecationWarning
)
return settings.YEAR_MONTH_FORMAT, settings.MONTH_DAY_FORMAT
|
pdonadeo/django-oscar | refs/heads/master | tests/unit/shipping/method_tests.py | 44 | from decimal import Decimal as D
from django.test import TestCase
import mock
from oscar.apps.shipping import methods
from oscar.apps.basket.models import Basket
class TestFreeShipppingForEmptyBasket(TestCase):
def setUp(self):
self.method = methods.Free()
self.basket = Basket()
self.charge = self.method.calculate(self.basket)
def test_is_free(self):
self.assertEqual(D('0.00'), self.charge.incl_tax)
self.assertEqual(D('0.00'), self.charge.excl_tax)
def test_has_tax_known(self):
self.assertTrue(self.charge.is_tax_known)
def test_has_same_currency_as_basket(self):
self.assertEqual(self.basket.currency, self.charge.currency)
class TestFreeShipppingForNonEmptyBasket(TestCase):
def setUp(self):
self.method = methods.Free()
self.basket = mock.Mock()
self.basket.num_items = 1
self.charge = self.method.calculate(self.basket)
def test_is_free(self):
self.assertEqual(D('0.00'), self.charge.incl_tax)
self.assertEqual(D('0.00'), self.charge.excl_tax)
class TestNoShippingRequired(TestCase):
def setUp(self):
self.method = methods.NoShippingRequired()
basket = Basket()
self.charge = self.method.calculate(basket)
def test_is_free_for_empty_basket(self):
self.assertEqual(D('0.00'), self.charge.incl_tax)
self.assertEqual(D('0.00'), self.charge.excl_tax)
def test_has_a_different_code_to_free(self):
self.assertTrue(methods.NoShippingRequired.code !=
methods.Free.code)
class TestFixedPriceShippingWithoutTax(TestCase):
def setUp(self):
self.method = methods.FixedPrice(D('10.00'))
basket = Basket()
self.charge = self.method.calculate(basket)
def test_has_correct_charge(self):
self.assertEqual(D('10.00'), self.charge.excl_tax)
def test_does_not_include_tax(self):
self.assertFalse(self.charge.is_tax_known)
class TestFixedPriceShippingWithTax(TestCase):
def setUp(self):
self.method = methods.FixedPrice(
charge_excl_tax=D('10.00'),
charge_incl_tax=D('12.00'))
basket = Basket()
self.charge = self.method.calculate(basket)
def test_has_correct_charge(self):
self.assertEqual(D('10.00'), self.charge.excl_tax)
self.assertEqual(D('12.00'), self.charge.incl_tax)
def test_does_include_tax(self):
self.assertTrue(self.charge.is_tax_known)
|
Kloudless/kloudless-python | refs/heads/master | tests/unit/test_resources.py | 1 | import json
import requests
import copy
import datetime
import pytz
from mock import MagicMock, patch, call
from requests.models import Response
import helpers
import kloudless
from kloudless.resources import Account, Folder, File
@helpers.configured_test
def test_account_list():
resp = Response()
resp._content = helpers.account_list.encode('utf-8')
resp.encoding = 'utf-8'
with patch('kloudless.resources.request') as mock_req:
mock_req.return_value = resp
accounts = Account().all()
assert len(accounts) > 0
assert all([isinstance(x, Account) for x in accounts])
@helpers.configured_test
def test_account_retrieve():
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.account.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
account_data = json.loads(helpers.account)
account = Account().retrieve(account_data['id'])
assert isinstance(account, Account)
for attr in ['id', 'service', 'active', 'account']:
assert account_data[attr] == getattr(account, attr)
mock_req.assert_called_with(account._api_session.get,
'accounts/%s' % account_data['id'],
configuration=None,
headers=None,
params={})
@helpers.configured_test
def test_folder_contents():
account = Account.create_from_data(json.loads(helpers.account))
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.root_folder_contents.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
folder = account.folders()
contents = folder.contents(headers={'k': 'v'})
assert len(contents) > 0
assert all([(isinstance(x, Folder) or isinstance(x, File)) for x in contents])
mock_req.assert_called_with(folder._api_session.get,
('accounts/%s/storage/folders/root/contents'
% account.id),
configuration=account._configuration,
headers={'k': 'v'})
@helpers.configured_test
def test_folder_metadata():
account = Account.create_from_data(json.loads(helpers.account))
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.folder_data.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
folder_data = json.loads(helpers.folder_data)
folder = Folder.retrieve(id=folder_data['id'],
parent_resource=account)
assert isinstance(folder, Folder)
for attr in ['id', 'name', 'type', 'size', 'account']:
assert folder_data[attr] == getattr(folder, attr)
mock_req.assert_called_with(Folder._api_session.get,
('accounts/%s/storage/folders/%s'
% (account.id, folder_data['id'])),
configuration=None,
headers=None,
params={})
@helpers.configured_test
def test_folder_creation():
account = Account.create_from_data(json.loads(helpers.account))
folder_data = json.loads(helpers.folder_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp.status_code = 201
resp._content = helpers.folder_data.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
folder = Folder.create(parent_resource=account,
data={'name': "TestFolder",
'parent_id': "root"})
assert isinstance(folder, Folder)
for attr in ['id', 'name', 'type', 'size', 'account']:
assert folder_data[attr] == getattr(folder, attr)
mock_req.assert_called_with(Folder._api_session.post,
'accounts/%s/storage/folders' % account.id,
configuration=None, params={},
headers=None,
data={'name': 'TestFolder',
'parent_id': 'root'})
@helpers.configured_test
def test_folder_delete():
account = Account.create_from_data(json.loads(helpers.account))
folder_data = json.loads(helpers.folder_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp.status_code = 204
mock_req.resturn_value = resp
folder = Folder.create_from_data(json.loads(helpers.folder_data),
parent_resource=account)
folder.delete()
mock_req.assert_called_with(Folder._api_session.delete,
('accounts/%s/storage/folders/%s'
% (account.id, folder_data['id'])),
configuration=account._configuration,
headers=None,
params={})
@helpers.configured_test
def test_folder_permissions_list():
account = Account.create_from_data(json.loads(helpers.account))
folder_data = json.loads(helpers.folder_data)
folder_obj = Folder.create_from_data(folder_data, parent_resource=account)
permission_data = json.loads(helpers.permission_data)
permissions = permission_data.get('permissions')
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.permission_data.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
resp = Response()
permissions_obj = kloudless.resources.Permission.all(parent_resource=folder_obj)
assert permissions_obj is not None
perms = permissions_obj
assert len(perms) > 0
index = 0
for perm in perms:
for attr in ['role', 'email', 'id', 'name']:
assert perm.get(attr) == permissions[index].get(attr)
index += 1
mock_req.assert_called_with(
kloudless.resources.Permission._api_session.get,
'accounts/%s/storage/folders/%s/permissions' % (account['id'], folder_obj['id']),
configuration=None,
headers=None,
params={}
)
@helpers.configured_test
def test_folder_permissions_patch_and_put():
account = Account.create_from_data(json.loads(helpers.account))
folder_data = json.loads(helpers.folder_data)
folder_obj = Folder.create_from_data(folder_data, parent_resource=account)
permission_data = json.loads(helpers.permission_data)
permissions = permission_data.get('permissions')
# Prepare new permissions data to create
new_permissions = copy.deepcopy(permissions)
new_permissions[0]["role"] = "reader"
new_permissions[0]["email"] = "test1@example.com"
new_permissions[0]["id"] = "test1"
new_permissions[0]["name"] = "Test1"
# prepare response data for create
permissions.append(new_permissions[0])
permission_data['permissions'] = permissions
content = json.dumps(permission_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = content.encode('utf-8')
resp.encoding = 'utf-8'
resp.status_code = 200
mock_req.return_value = resp
permissions_obj = kloudless.resources.Permission.create(
parent_resource=folder_obj, data=new_permissions)
assert permissions_obj is not None
perms = permissions_obj.get("permissions")
assert len(perms) > 0
index = 0
for perm in perms:
for attr in ['role', 'email', 'id', 'name']:
assert perm.get(attr) == permissions[index].get(attr)
index += 1
mock_req.asssert_called_with(
kloudless.resources.Permission._api_session.patch,
'accounts/%s/storage/folders/%s/permissions' % (account['id'], folder_obj['id']),
configuration=folder_obj._configuration,
headers=None,
params={}
)
# Prepare new permissions data to update
new_permissions = copy.deepcopy(new_permissions)
new_permissions[0]["role"] = "writer"
# prepare response data for update
permission_data = json.loads(helpers.permission_data)
permissions = permission_data.get('permissions')
permissions.append(new_permissions[0])
permission_data['permissions'] = permissions
content = json.dumps(permission_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = content.encode('utf-8')
resp.encoding = 'utf-8'
resp.status_code = 200
mock_req.return_value = resp
permissions_obj = kloudless.resources.Permission.create(
parent_resource=folder_obj, data=new_permissions)
assert permissions_obj is not None
perms = permissions_obj.get("permissions")
assert len(perms) > 0
index = 0
for perm in perms:
for attr in ['role', 'email', 'id', 'name']:
assert perm.get(attr) == permissions[index].get(attr)
index += 1
mock_req.asssert_called_with(
kloudless.resources.Permission._api_session.put,
'accounts/%s/storage/folders/%s/permissions' % (account['id'], folder_obj['id']),
configuration=folder_obj._configuration,
headers=None,
params={}
)
@helpers.configured_test
def test_file_metadata():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.file_data.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
file_obj = File.retrieve(id=file_data['id'],
parent_resource=account)
assert isinstance(file_obj, File)
for attr in ['id', 'name', 'type', 'size', 'account']:
assert file_data[attr] == getattr(file_obj, attr)
mock_req.assert_called_with(File._api_session.get,
('accounts/%s/storage/files/%s'
% (account.id, file_data['id'])),
configuration=None,
headers=None,
params={})
@helpers.configured_test
def test_file_contents():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.file_contents
mock_req.return_value = resp
file_contents = file_obj.contents()
assert isinstance(file_contents, Response)
mock_req.assert_called_with(file_obj._api_session.get,
('accounts/%s/storage/files/%s/contents'
% (account.id, file_data['id'])),
configuration=file_obj._configuration,
headers=None,
stream=True)
@helpers.configured_test
def test_file_delete():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp.status_code = 204
mock_req.return_value = resp
file_obj.delete()
mock_req.assert_called_with(file_obj._api_session.delete,
('accounts/%s/storage/files/%s'
% (account.id, file_data['id'])),
configuration=file_obj._configuration,
headers=None,
params={})
@helpers.configured_test
def test_file_upload():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.file_data.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
file_obj = File.create(parent_resource=account,
file_name=file_data['name'],
parent_id='root',
headers={'k': 'v'},
file_data=helpers.file_contents)
assert isinstance(file_obj, File)
for attr in ['id', 'name', 'type', 'size', 'account']:
assert file_data[attr] == getattr(file_obj, attr)
mock_req.assert_called_with(File._api_session.post,
'accounts/%s/storage/files' % account.id,
data=helpers.file_contents,
headers={
'k': 'v',
'Content-Type':
'application/octet-stream',
'X-Kloudless-Metadata': json.dumps({
'name': file_data['name'],
'parent_id': 'root'})
},
params=None,
configuration=None)
@helpers.configured_test
def test_file_update():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
new_data = file_data.copy()
new_data['name'] = 'NewFileName'
resp._content = json.dumps(new_data).encode('utf-8')
resp.encoding = 'utf-8'
account_resp = Response()
account_resp._content = helpers.account.encode('utf-8')
account_resp.encoding = 'utf-8'
mock_req.side_effect = (resp,account_resp)
file_obj.name = 'NewFileName'
file_obj.parent_id = 'root'
file_obj.save()
expected_calls = [
# This is updating the file
call(file_obj._api_session.patch,
'accounts/%s/storage/files/%s' % (account.id,
file_data['id']),
params={},
data={'name': u'NewFileName',
'parent_id': 'root'},
headers=None,
configuration=file_obj._configuration),
# This is refreshing the parent resource
call(account._api_session.get,
'accounts/%s' % account.id,
headers=None,
configuration=account._configuration),
]
mock_req.assert_has_calls(expected_calls)
@helpers.configured_test
def test_file_copy():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
new_data = file_data.copy()
new_data['name'] = 'NewFileName'
resp._content = json.dumps(new_data).encode('utf-8')
resp.encoding = 'utf-8'
account_resp = Response()
account_resp._content = helpers.account
mock_req.side_effect = (resp,account_resp)
file_obj.copy_file(name='NewFileName', parent_id='root')
expected_calls = [
# This is copying the file
call(file_obj._api_session.post,
'accounts/%s/storage/files/%s/copy' % (account.id,
file_data['id']),
data={'name': u'NewFileName',
'parent_id': 'root'},
headers=None,
configuration=file_obj._configuration),
]
mock_req.assert_has_calls(expected_calls)
@helpers.configured_test
def test_file_property_list():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
property_data = json.loads(helpers.property_data)
properties = property_data.get('properties')
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.property_data.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
properties_obj = kloudless.resources.Property.all(parent_resource=file_obj)
assert properties_obj is not None
assert len(properties_obj) > 0
index = 0
for prop in properties_obj:
for attr in ['key', 'value', 'created', 'modified']:
if attr == 'created' or attr == 'modified':
datetime_obj = datetime.datetime.strptime(
properties[index].get(attr),
'%Y-%m-%dT%H:%M:%S.%fZ')
datetime_obj_utc = datetime_obj.replace(
tzinfo=pytz.timezone('UTC'))
assert prop.get(attr) == datetime_obj_utc
else:
assert prop.get(attr) == properties[index].get(attr)
index += 1
mock_req.assert_called_with(
kloudless.resources.Property._api_session.get,
'accounts/%s/storage/files/%s/properties' % (account['id'], file_obj['id']),
configuration=None,
headers=None,
params={}
)
@helpers.configured_test
def test_file_property_patch():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
property_data = json.loads(helpers.property_data)
properties = property_data.get('properties')
new_properties = copy.deepcopy(properties)
new_properties[0]['value'] = 'test update'
# remove the second one
new_properties.remove(new_properties[1])
new_properties.append({
'key':'newkey',
'value': 'newvalue',
'created': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'modified': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
})
content = json.dumps(new_properties)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = content.encode('utf-8')
resp.encoding = 'utf-8'
resp.status_code = 200
mock_req.return_value = resp
updated_properties = kloudless.resources.Property.update(
parent_resource=file_obj,data=new_properties)
assert updated_properties is not None
assert len(updated_properties) == 2
index = 0
for prop in updated_properties:
for attr in ['key', 'value', 'created', 'modified']:
if attr == 'created' or attr == 'modified':
datetime_obj = datetime.datetime.strptime(
new_properties[index].get(attr),
'%Y-%m-%dT%H:%M:%S.%fZ')
datetime_obj_utc = datetime_obj.replace(
tzinfo=pytz.timezone('UTC'))
assert prop.get(attr) == datetime_obj_utc
else:
assert prop.get(attr) == new_properties[index].get(attr)
index += 1
mock_req.asssert_called_with(kloudless.resources.Property._api_session.patch,
'accounts/%s/storage/files/%s/properties' %
(account['id'], file_obj['id']),
configuration=file_obj._configuration,
headers=None)
@helpers.configured_test
def test_file_property_delete():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp.status_code = 204
mock_req.return_value = resp
deleted_properties = kloudless.resources.Property.delete_all(
parent_resource=file_obj)
assert deleted_properties is not None
mock_req.assert_called_with(kloudless.resources.Property._api_session.delete,
'accounts/%s/storage/files/%s/properties' %
(account['id'], file_obj['id']), configuration=None,
headers=None)
@helpers.configured_test
def test_file_permissions_list():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
permission_data = json.loads(helpers.permission_data)
permissions = permission_data.get('permissions')
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = helpers.permission_data.encode('utf-8')
resp.encoding = 'utf-8'
mock_req.return_value = resp
permissions_obj = kloudless.resources.Permission.all(parent_resource=file_obj)
assert permissions_obj is not None
perms = permissions_obj
assert len(perms) > 0
index = 0
for perm in perms:
for attr in ['role', 'email', 'id', 'name']:
assert perm.get(attr) == permissions[index].get(attr)
index += 1
mock_req.assert_called_with(
kloudless.resources.Permission._api_session.get,
'accounts/%s/storage/files/%s/permissions' % (account['id'], file_obj['id']),
configuration=None,
headers=None,
params={}
)
@helpers.configured_test
def test_file_permissions_patch_and_put():
account = Account.create_from_data(json.loads(helpers.account))
file_data = json.loads(helpers.file_data)
file_obj = File.create_from_data(file_data, parent_resource=account)
permission_data = json.loads(helpers.permission_data)
permissions = permission_data.get('permissions')
# Prepare new permissions data to create
new_permissions = copy.deepcopy(permissions)
new_permissions[0]["role"] = "reader"
new_permissions[0]["email"] = "test1@example.com"
new_permissions[0]["id"] = "test1"
new_permissions[0]["name"] = "Test1"
# prepare response data for create
permissions.append(new_permissions[0])
permission_data['permissions'] = permissions
content = json.dumps(permission_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = content.encode('utf-8')
resp.encoding = 'utf-8'
resp.status_code = 200
mock_req.return_value = resp
permissions_obj = kloudless.resources.Permission.create(
parent_resource=file_obj, data=new_permissions)
assert permissions_obj is not None
perms = permissions_obj.get("permissions")
assert len(perms) > 0
index = 0
for perm in perms:
for attr in ['role', 'email', 'id', 'name']:
assert perm.get(attr) == permissions[index].get(attr)
index += 1
mock_req.asssert_called_with(
kloudless.resources.Permission._api_session.put,
'accounts/%s/storage/files/%s/permissions' % (account['id'], file_obj['id']),
configuration=file_obj._configuration,
headers=None,
params={}
)
# Prepare new permissions data to update
new_permissions = copy.deepcopy(new_permissions)
new_permissions[0]["role"] = "writer"
# prepare response data for update
permission_data = json.loads(helpers.permission_data)
permissions = permission_data.get('permissions')
permissions.append(new_permissions[0])
permission_data['permissions'] = permissions
content = json.dumps(permission_data)
with patch('kloudless.resources.request') as mock_req:
resp = Response()
resp._content = content.encode('utf-8')
resp.encoding = 'utf-8'
resp.status_code = 200
mock_req.return_value = resp
permissions_obj = kloudless.resources.Permission.create(
parent_resource=file_obj, data=new_permissions)
assert permissions_obj is not None
perms = permissions_obj.get("permissions")
assert len(perms) > 0
index = 0
for perm in perms:
for attr in ['role', 'email', 'id', 'name']:
assert perm.get(attr) == permissions[index].get(attr)
index += 1
mock_req.asssert_called_with(
kloudless.resources.Permission._api_session.patch,
'accounts/%s/storage/files/%s/permissions' % (account['id'], file_obj['id']),
configuration=file_obj._configuration,
headers=None,
params={}
)
|
CiscoSystems/cognitive | refs/heads/master | cognitive/app/api/experiments.py | 1 | # Copyright 2015 Cisco Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ..models import Experiment
from ..serializers import ExperimentSerializer
from ..views import send_response
from rest_framework import viewsets
class ExperimentViewSet(viewsets.ViewSet):
def list(self, request):
"""
Lists all experiments for a particular user
---
"""
exp = Experiment.objects.all()
serializer = ExperimentSerializer(exp, many=True)
return send_response(request.method, serializer)
def retrieve(self, request, pk=None):
"""
Retrieve an experiment for a particular user
---
"""
exp = Experiment.objects.get(pk=pk)
serializer = ExperimentSerializer(exp)
return send_response(request.method, serializer)
def create(self, request):
"""
Create an experiment for a particular user
---
request_serializer: ExperimentSerializer
"""
serializer = ExperimentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return send_response(request.method, serializer)
def update(self, request, pk=None):
"""
Update an experiment for a particular user
---
request_serializer: ExperimentSerializer
"""
exp = Experiment.objects.get(pk=pk)
serializer = ExperimentSerializer(exp, data=request.data)
if serializer.is_valid():
serializer.save()
return send_response(request.method, serializer)
def destroy(self, request, pk=None):
"""
Delete an experiment for a particular user
---
"""
exp = Experiment.objects.get(pk=pk)
serializer = None
exp.delete()
return send_response(request.method, serializer)
|
sileht/deb-openstack-python-keystoneclient | refs/heads/debian/unstable | tests/test_http.py | 1 | import httplib2
import mock
from keystoneclient import client
from tests import utils
fake_response = httplib2.Response({"status": 200})
fake_body = '{"hi": "there"}'
mock_request = mock.Mock(return_value=(fake_response, fake_body))
def get_client():
cl = client.HTTPClient(username="username", password="password",
tenant_id="tenant", auth_url="auth_test")
return cl
def get_authed_client():
cl = get_client()
cl.management_url = "http://127.0.0.1:5000"
cl.auth_token = "token"
return cl
class ClientTest(utils.TestCase):
def test_get(self):
cl = get_authed_client()
@mock.patch.object(httplib2.Http, "request", mock_request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
headers = {"X-Auth-Token": "token",
"User-Agent": cl.USER_AGENT,
}
mock_request.assert_called_with("http://127.0.0.1:5000/hi",
"GET", headers=headers)
# Automatic JSON parsing
self.assertEqual(body, {"hi": "there"})
test_get_call()
def test_post(self):
cl = get_authed_client()
@mock.patch.object(httplib2.Http, "request", mock_request)
def test_post_call():
cl.post("/hi", body=[1, 2, 3])
headers = {
"X-Auth-Token": "token",
"Content-Type": "application/json",
"User-Agent": cl.USER_AGENT
}
mock_request.assert_called_with("http://127.0.0.1:5000/hi", "POST",
headers=headers, body='[1, 2, 3]')
test_post_call()
|
theicfire/djangofun | refs/heads/master | django/templatetags/cache.py | 309 | from django.template import Library, Node, TemplateSyntaxError, Variable, VariableDoesNotExist
from django.template import resolve_variable
from django.core.cache import cache
from django.utils.encoding import force_unicode
from django.utils.http import urlquote
from django.utils.hashcompat import md5_constructor
register = Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on):
self.nodelist = nodelist
self.expire_time_var = Variable(expire_time_var)
self.fragment_name = fragment_name
self.vary_on = vary_on
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time)
# Build a unicode key for this fragment and all vary-on's.
args = md5_constructor(u':'.join([urlquote(resolve_variable(var, context)) for var in self.vary_on]))
cache_key = 'template.cache.%s.%s' % (self.fragment_name, args.hexdigest())
value = cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
cache.set(cache_key, value, expire_time)
return value
def do_cache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cache %}
{% cache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcache %}
This tag also supports varying by a list of arguments::
{% load cache %}
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcache',))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) < 3:
raise TemplateSyntaxError(u"'%r' tag requires at least 2 arguments." % tokens[0])
return CacheNode(nodelist, tokens[1], tokens[2], tokens[3:])
register.tag('cache', do_cache)
|
ademuk/django-oscar | refs/heads/master | src/oscar/apps/offer/models.py | 4 | import itertools
import os
import re
import operator
from decimal import Decimal as D, ROUND_DOWN, ROUND_UP
from django.core import exceptions
from django.template.defaultfilters import date as date_filter
from django.db import models
from django.db.models.query import Q
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now, get_current_timezone
from django.utils.translation import ungettext, ugettext_lazy as _
from django.utils.importlib import import_module
from django.utils import six
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.conf import settings
from oscar.core.compat import AUTH_USER_MODEL
from oscar.core.loading import get_class, get_model
from oscar.apps.offer.managers import ActiveOfferManager
from oscar.templatetags.currency_filters import currency
from oscar.models import fields
BrowsableRangeManager = get_class('offer.managers', 'BrowsableRangeManager')
def load_proxy(proxy_class):
module, classname = proxy_class.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise exceptions.ImproperlyConfigured(
"Error importing module %s: %s" % (module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
"Module %s does not define a %s" % (module, classname))
def range_anchor(range):
return u'<a href="%s">%s</a>' % (
reverse('dashboard:range-update', kwargs={'pk': range.pk}),
range.name)
def unit_price(offer, line):
"""
Return the relevant price for a given basket line.
This is required so offers can apply in circumstances where tax isn't known
"""
return line.unit_effective_price
def apply_discount(line, discount, quantity):
"""
Apply a given discount to the passed basket
"""
line.discount(discount, quantity, incl_tax=False)
@python_2_unicode_compatible
class ConditionalOffer(models.Model):
"""
A conditional offer (eg buy 1, get 10% off)
"""
name = models.CharField(
_("Name"), max_length=128, unique=True,
help_text=_("This is displayed within the customer's basket"))
slug = fields.AutoSlugField(
_("Slug"), max_length=128, unique=True, populate_from='name')
description = models.TextField(_("Description"), blank=True,
help_text=_("This is displayed on the offer"
" browsing page"))
# Offers come in a few different types:
# (a) Offers that are available to all customers on the site. Eg a
# 3-for-2 offer.
# (b) Offers that are linked to a voucher, and only become available once
# that voucher has been applied to the basket
# (c) Offers that are linked to a user. Eg, all students get 10% off. The
# code to apply this offer needs to be coded
# (d) Session offers - these are temporarily available to a user after some
# trigger event. Eg, users coming from some affiliate site get 10%
# off.
SITE, VOUCHER, USER, SESSION = ("Site", "Voucher", "User", "Session")
TYPE_CHOICES = (
(SITE, _("Site offer - available to all users")),
(VOUCHER, _("Voucher offer - only available after entering "
"the appropriate voucher code")),
(USER, _("User offer - available to certain types of user")),
(SESSION, _("Session offer - temporary offer, available for "
"a user for the duration of their session")),
)
offer_type = models.CharField(
_("Type"), choices=TYPE_CHOICES, default=SITE, max_length=128)
# We track a status variable so it's easier to load offers that are
# 'available' in some sense.
OPEN, SUSPENDED, CONSUMED = "Open", "Suspended", "Consumed"
status = models.CharField(_("Status"), max_length=64, default=OPEN)
condition = models.ForeignKey(
'offer.Condition', verbose_name=_("Condition"))
benefit = models.ForeignKey('offer.Benefit', verbose_name=_("Benefit"))
# Some complicated situations require offers to be applied in a set order.
priority = models.IntegerField(
_("Priority"), default=0,
help_text=_("The highest priority offers are applied first"))
# AVAILABILITY
# Range of availability. Note that if this is a voucher offer, then these
# dates are ignored and only the dates from the voucher are used to
# determine availability.
start_datetime = models.DateTimeField(
_("Start date"), blank=True, null=True)
end_datetime = models.DateTimeField(
_("End date"), blank=True, null=True,
help_text=_("Offers are active until the end of the 'end date'"))
# Use this field to limit the number of times this offer can be applied in
# total. Note that a single order can apply an offer multiple times so
# this is not the same as the number of orders that can use it.
max_global_applications = models.PositiveIntegerField(
_("Max global applications"),
help_text=_("The number of times this offer can be used before it "
"is unavailable"), blank=True, null=True)
# Use this field to limit the number of times this offer can be used by a
# single user. This only works for signed-in users - it doesn't really
# make sense for sites that allow anonymous checkout.
max_user_applications = models.PositiveIntegerField(
_("Max user applications"),
help_text=_("The number of times a single user can use this offer"),
blank=True, null=True)
# Use this field to limit the number of times this offer can be applied to
# a basket (and hence a single order).
max_basket_applications = models.PositiveIntegerField(
_("Max basket applications"),
blank=True, null=True,
help_text=_("The number of times this offer can be applied to a "
"basket (and order)"))
# Use this field to limit the amount of discount an offer can lead to.
# This can be helpful with budgeting.
max_discount = models.DecimalField(
_("Max discount"), decimal_places=2, max_digits=12, null=True,
blank=True,
help_text=_("When an offer has given more discount to orders "
"than this threshold, then the offer becomes "
"unavailable"))
# TRACKING
total_discount = models.DecimalField(
_("Total Discount"), decimal_places=2, max_digits=12,
default=D('0.00'))
num_applications = models.PositiveIntegerField(
_("Number of applications"), default=0)
num_orders = models.PositiveIntegerField(
_("Number of Orders"), default=0)
redirect_url = fields.ExtendedURLField(
_("URL redirect (optional)"), blank=True)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
objects = models.Manager()
active = ActiveOfferManager()
# We need to track the voucher that this offer came from (if it is a
# voucher offer)
_voucher = None
class Meta:
app_label = 'offer'
ordering = ['-priority']
verbose_name = _("Conditional offer")
verbose_name_plural = _("Conditional offers")
def save(self, *args, **kwargs):
# Check to see if consumption thresholds have been broken
if not self.is_suspended:
if self.get_max_applications() == 0:
self.status = self.CONSUMED
else:
self.status = self.OPEN
return super(ConditionalOffer, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('offer:detail', kwargs={'slug': self.slug})
def __str__(self):
return self.name
def clean(self):
if (self.start_datetime and self.end_datetime and
self.start_datetime > self.end_datetime):
raise exceptions.ValidationError(
_('End date should be later than start date'))
@property
def is_open(self):
return self.status == self.OPEN
@property
def is_suspended(self):
return self.status == self.SUSPENDED
def suspend(self):
self.status = self.SUSPENDED
self.save()
suspend.alters_data = True
def unsuspend(self):
self.status = self.OPEN
self.save()
unsuspend.alters_data = True
def is_available(self, user=None, test_date=None):
"""
Test whether this offer is available to be used
"""
if self.is_suspended:
return False
if test_date is None:
test_date = now()
predicates = []
if self.start_datetime:
predicates.append(self.start_datetime > test_date)
if self.end_datetime:
predicates.append(test_date > self.end_datetime)
if any(predicates):
return False
return self.get_max_applications(user) > 0
def is_condition_satisfied(self, basket):
return self.condition.proxy().is_satisfied(self, basket)
def is_condition_partially_satisfied(self, basket):
return self.condition.proxy().is_partially_satisfied(self, basket)
def get_upsell_message(self, basket):
return self.condition.proxy().get_upsell_message(self, basket)
def apply_benefit(self, basket):
"""
Applies the benefit to the given basket and returns the discount.
"""
if not self.is_condition_satisfied(basket):
return ZERO_DISCOUNT
return self.benefit.proxy().apply(
basket, self.condition.proxy(), self)
def apply_deferred_benefit(self, basket, order, application):
"""
Applies any deferred benefits. These are things like adding loyalty
points to somone's account.
"""
return self.benefit.proxy().apply_deferred(basket, order, application)
def set_voucher(self, voucher):
self._voucher = voucher
def get_voucher(self):
return self._voucher
def get_max_applications(self, user=None):
"""
Return the number of times this offer can be applied to a basket for a
given user.
"""
if self.max_discount and self.total_discount >= self.max_discount:
return 0
# Hard-code a maximum value as we need some sensible upper limit for
# when there are not other caps.
limits = [10000]
if self.max_user_applications and user:
limits.append(max(0, self.max_user_applications -
self.get_num_user_applications(user)))
if self.max_basket_applications:
limits.append(self.max_basket_applications)
if self.max_global_applications:
limits.append(
max(0, self.max_global_applications - self.num_applications))
return min(limits)
def get_num_user_applications(self, user):
OrderDiscount = get_model('order', 'OrderDiscount')
aggregates = OrderDiscount.objects.filter(offer_id=self.id,
order__user=user)\
.aggregate(total=models.Sum('frequency'))
return aggregates['total'] if aggregates['total'] is not None else 0
def shipping_discount(self, charge):
return self.benefit.proxy().shipping_discount(charge)
def record_usage(self, discount):
self.num_applications += discount['freq']
self.total_discount += discount['discount']
self.num_orders += 1
self.save()
record_usage.alters_data = True
def availability_description(self):
"""
Return a description of when this offer is available
"""
restrictions = self.availability_restrictions()
descriptions = [r['description'] for r in restrictions]
return "<br/>".join(descriptions)
def availability_restrictions(self): # noqa (too complex (15))
restrictions = []
if self.is_suspended:
restrictions.append({
'description': _("Offer is suspended"),
'is_satisfied': False})
if self.max_global_applications:
remaining = self.max_global_applications - self.num_applications
desc = _("Limited to %(total)d uses (%(remainder)d remaining)") \
% {'total': self.max_global_applications,
'remainder': remaining}
restrictions.append({'description': desc,
'is_satisfied': remaining > 0})
if self.max_user_applications:
if self.max_user_applications == 1:
desc = _("Limited to 1 use per user")
else:
desc = _("Limited to %(total)d uses per user") \
% {'total': self.max_user_applications}
restrictions.append({'description': desc,
'is_satisfied': True})
if self.max_basket_applications:
if self.max_user_applications == 1:
desc = _("Limited to 1 use per basket")
else:
desc = _("Limited to %(total)d uses per basket") \
% {'total': self.max_basket_applications}
restrictions.append({
'description': desc,
'is_satisfied': True})
def hide_time_if_zero(dt):
# Only show hours/minutes if they have been specified
if dt.tzinfo:
localtime = dt.astimezone(get_current_timezone())
else:
localtime = dt
if localtime.hour == 0 and localtime.minute == 0:
return date_filter(localtime, settings.DATE_FORMAT)
return date_filter(localtime, settings.DATETIME_FORMAT)
if self.start_datetime or self.end_datetime:
today = now()
if self.start_datetime and self.end_datetime:
desc = _("Available between %(start)s and %(end)s") \
% {'start': hide_time_if_zero(self.start_datetime),
'end': hide_time_if_zero(self.end_datetime)}
is_satisfied \
= self.start_datetime <= today <= self.end_datetime
elif self.start_datetime:
desc = _("Available from %(start)s") % {
'start': hide_time_if_zero(self.start_datetime)}
is_satisfied = today >= self.start_datetime
elif self.end_datetime:
desc = _("Available until %(end)s") % {
'end': hide_time_if_zero(self.end_datetime)}
is_satisfied = today <= self.end_datetime
restrictions.append({
'description': desc,
'is_satisfied': is_satisfied})
if self.max_discount:
desc = _("Limited to a cost of %(max)s") % {
'max': currency(self.max_discount)}
restrictions.append({
'description': desc,
'is_satisfied': self.total_discount < self.max_discount})
return restrictions
@property
def has_products(self):
return self.condition.range is not None
def products(self):
"""
Return a queryset of products in this offer
"""
Product = get_model('catalogue', 'Product')
if not self.has_products:
return Product.objects.none()
cond_range = self.condition.range
if cond_range.includes_all_products:
# Return ALL the products
queryset = Product.browsable
else:
queryset = cond_range.included_products
return queryset.filter(is_discountable=True).exclude(
structure=Product.CHILD)
@python_2_unicode_compatible
class Condition(models.Model):
"""
A condition for an offer to be applied. You can either specify a custom
proxy class, or need to specify a type, range and value.
"""
COUNT, VALUE, COVERAGE = ("Count", "Value", "Coverage")
TYPE_CHOICES = (
(COUNT, _("Depends on number of items in basket that are in "
"condition range")),
(VALUE, _("Depends on value of items in basket that are in "
"condition range")),
(COVERAGE, _("Needs to contain a set number of DISTINCT items "
"from the condition range")))
range = models.ForeignKey(
'offer.Range', verbose_name=_("Range"), null=True, blank=True)
type = models.CharField(_('Type'), max_length=128, choices=TYPE_CHOICES,
blank=True)
value = fields.PositiveDecimalField(
_('Value'), decimal_places=2, max_digits=12, null=True, blank=True)
proxy_class = fields.NullCharField(
_("Custom class"), max_length=255, unique=True, default=None)
class Meta:
app_label = 'offer'
verbose_name = _("Condition")
verbose_name_plural = _("Conditions")
def proxy(self):
"""
Return the proxy model
"""
klassmap = {
self.COUNT: CountCondition,
self.VALUE: ValueCondition,
self.COVERAGE: CoverageCondition}
# Short-circuit logic if current class is already a proxy class.
if self.__class__ in klassmap.values():
return self
field_dict = dict(self.__dict__)
for field in list(field_dict.keys()):
if field.startswith('_'):
del field_dict[field]
if self.proxy_class:
klass = load_proxy(self.proxy_class)
# Short-circuit again.
if self.__class__ == klass:
return self
return klass(**field_dict)
if self.type in klassmap:
return klassmap[self.type](**field_dict)
raise RuntimeError("Unrecognised condition type (%s)" % self.type)
def __str__(self):
return self.name
@property
def name(self):
"""
A plaintext description of the condition. Every proxy class has to
implement it.
This is used in the dropdowns within the offer dashboard.
"""
return self.proxy().name
@property
def description(self):
"""
A description of the condition.
Defaults to the name. May contain HTML.
"""
return self.name
def consume_items(self, offer, basket, affected_lines):
pass
def is_satisfied(self, offer, basket):
"""
Determines whether a given basket meets this condition. This is
stubbed in this top-class object. The subclassing proxies are
responsible for implementing it correctly.
"""
return False
def is_partially_satisfied(self, offer, basket):
"""
Determine if the basket partially meets the condition. This is useful
for up-selling messages to entice customers to buy something more in
order to qualify for an offer.
"""
return False
def get_upsell_message(self, offer, basket):
return None
def can_apply_condition(self, line):
"""
Determines whether the condition can be applied to a given basket line
"""
if not line.stockrecord_id:
return False
product = line.product
return (self.range.contains_product(product)
and product.get_is_discountable())
def get_applicable_lines(self, offer, basket, most_expensive_first=True):
"""
Return line data for the lines that can be consumed by this condition
"""
line_tuples = []
for line in basket.all_lines():
if not self.can_apply_condition(line):
continue
price = unit_price(offer, line)
if not price:
continue
line_tuples.append((price, line))
key = operator.itemgetter(0)
if most_expensive_first:
return sorted(line_tuples, reverse=True, key=key)
return sorted(line_tuples, key=key)
@python_2_unicode_compatible
class Benefit(models.Model):
range = models.ForeignKey(
'offer.Range', null=True, blank=True, verbose_name=_("Range"))
# Benefit types
PERCENTAGE, FIXED, MULTIBUY, FIXED_PRICE = (
"Percentage", "Absolute", "Multibuy", "Fixed price")
SHIPPING_PERCENTAGE, SHIPPING_ABSOLUTE, SHIPPING_FIXED_PRICE = (
'Shipping percentage', 'Shipping absolute', 'Shipping fixed price')
TYPE_CHOICES = (
(PERCENTAGE, _("Discount is a percentage off of the product's value")),
(FIXED, _("Discount is a fixed amount off of the product's value")),
(MULTIBUY, _("Discount is to give the cheapest product for free")),
(FIXED_PRICE,
_("Get the products that meet the condition for a fixed price")),
(SHIPPING_ABSOLUTE,
_("Discount is a fixed amount of the shipping cost")),
(SHIPPING_FIXED_PRICE, _("Get shipping for a fixed price")),
(SHIPPING_PERCENTAGE, _("Discount is a percentage off of the shipping"
" cost")),
)
type = models.CharField(
_("Type"), max_length=128, choices=TYPE_CHOICES, blank=True)
# The value to use with the designated type. This can be either an integer
# (eg for multibuy) or a decimal (eg an amount) which is slightly
# confusing.
value = fields.PositiveDecimalField(
_("Value"), decimal_places=2, max_digits=12, null=True, blank=True)
# If this is not set, then there is no upper limit on how many products
# can be discounted by this benefit.
max_affected_items = models.PositiveIntegerField(
_("Max Affected Items"), blank=True, null=True,
help_text=_("Set this to prevent the discount consuming all items "
"within the range that are in the basket."))
# A custom benefit class can be used instead. This means the
# type/value/max_affected_items fields should all be None.
proxy_class = fields.NullCharField(
_("Custom class"), max_length=255, unique=True, default=None)
class Meta:
app_label = 'offer'
verbose_name = _("Benefit")
verbose_name_plural = _("Benefits")
def proxy(self):
klassmap = {
self.PERCENTAGE: PercentageDiscountBenefit,
self.FIXED: AbsoluteDiscountBenefit,
self.MULTIBUY: MultibuyDiscountBenefit,
self.FIXED_PRICE: FixedPriceBenefit,
self.SHIPPING_ABSOLUTE: ShippingAbsoluteDiscountBenefit,
self.SHIPPING_FIXED_PRICE: ShippingFixedPriceBenefit,
self.SHIPPING_PERCENTAGE: ShippingPercentageDiscountBenefit}
# Short-circuit logic if current class is already a proxy class.
if self.__class__ in klassmap.values():
return self
field_dict = dict(self.__dict__)
for field in list(field_dict.keys()):
if field.startswith('_'):
del field_dict[field]
if self.proxy_class:
klass = load_proxy(self.proxy_class)
# Short-circuit again.
if self.__class__ == klass:
return self
return klass(**field_dict)
if self.type in klassmap:
return klassmap[self.type](**field_dict)
raise RuntimeError("Unrecognised benefit type (%s)" % self.type)
def __str__(self):
return self.name
@property
def name(self):
"""
A plaintext description of the benefit. Every proxy class has to
implement it.
This is used in the dropdowns within the offer dashboard.
"""
return self.proxy().name
@property
def description(self):
"""
A description of the benefit.
Defaults to the name. May contain HTML.
"""
return self.name
def apply(self, basket, condition, offer):
return ZERO_DISCOUNT
def apply_deferred(self, basket, order, application):
return None
def clean(self):
if not self.type:
return
method_name = 'clean_%s' % self.type.lower().replace(' ', '_')
if hasattr(self, method_name):
getattr(self, method_name)()
def clean_multibuy(self):
if not self.range:
raise ValidationError(
_("Multibuy benefits require a product range"))
if self.value:
raise ValidationError(
_("Multibuy benefits don't require a value"))
if self.max_affected_items:
raise ValidationError(
_("Multibuy benefits don't require a 'max affected items' "
"attribute"))
def clean_percentage(self):
if not self.range:
raise ValidationError(
_("Percentage benefits require a product range"))
if self.value > 100:
raise ValidationError(
_("Percentage discount cannot be greater than 100"))
def clean_shipping_absolute(self):
if not self.value:
raise ValidationError(
_("A discount value is required"))
if self.range:
raise ValidationError(
_("No range should be selected as this benefit does not "
"apply to products"))
if self.max_affected_items:
raise ValidationError(
_("Shipping discounts don't require a 'max affected items' "
"attribute"))
def clean_shipping_percentage(self):
if self.value > 100:
raise ValidationError(
_("Percentage discount cannot be greater than 100"))
if self.range:
raise ValidationError(
_("No range should be selected as this benefit does not "
"apply to products"))
if self.max_affected_items:
raise ValidationError(
_("Shipping discounts don't require a 'max affected items' "
"attribute"))
def clean_shipping_fixed_price(self):
if self.range:
raise ValidationError(
_("No range should be selected as this benefit does not "
"apply to products"))
if self.max_affected_items:
raise ValidationError(
_("Shipping discounts don't require a 'max affected items' "
"attribute"))
def clean_fixed_price(self):
if self.range:
raise ValidationError(
_("No range should be selected as the condition range will "
"be used instead."))
def clean_absolute(self):
if not self.range:
raise ValidationError(
_("Fixed discount benefits require a product range"))
if not self.value:
raise ValidationError(
_("Fixed discount benefits require a value"))
def round(self, amount):
"""
Apply rounding to discount amount
"""
if hasattr(settings, 'OSCAR_OFFER_ROUNDING_FUNCTION'):
return settings.OSCAR_OFFER_ROUNDING_FUNCTION(amount)
return amount.quantize(D('.01'), ROUND_DOWN)
def _effective_max_affected_items(self):
"""
Return the maximum number of items that can have a discount applied
during the application of this benefit
"""
return self.max_affected_items if self.max_affected_items else 10000
def can_apply_benefit(self, line):
"""
Determines whether the benefit can be applied to a given basket line
"""
return line.stockrecord and line.product.is_discountable
def get_applicable_lines(self, offer, basket, range=None):
"""
Return the basket lines that are available to be discounted
:basket: The basket
:range: The range of products to use for filtering. The fixed-price
benefit ignores its range and uses the condition range
"""
if range is None:
range = self.range
line_tuples = []
for line in basket.all_lines():
product = line.product
if (not range.contains(product) or
not self.can_apply_benefit(line)):
continue
price = unit_price(offer, line)
if not price:
# Avoid zero price products
continue
if line.quantity_without_discount == 0:
continue
line_tuples.append((price, line))
# We sort lines to be cheapest first to ensure consistent applications
return sorted(line_tuples, key=operator.itemgetter(0))
def shipping_discount(self, charge):
return D('0.00')
@python_2_unicode_compatible
class Range(models.Model):
"""
Represents a range of products that can be used within an offer.
Ranges only support adding parent or stand-alone products. Offers will
consider child products automatically.
"""
name = models.CharField(_("Name"), max_length=128, unique=True)
slug = fields.AutoSlugField(
_("Slug"), max_length=128, unique=True, populate_from="name")
description = models.TextField(blank=True)
# Whether this range is public
is_public = models.BooleanField(
_('Is public?'), default=False,
help_text=_("Public ranges have a customer-facing page"))
includes_all_products = models.BooleanField(
_('Includes all products?'), default=False)
included_products = models.ManyToManyField(
'catalogue.Product', related_name='includes', blank=True,
verbose_name=_("Included Products"), through='offer.RangeProduct')
excluded_products = models.ManyToManyField(
'catalogue.Product', related_name='excludes', blank=True,
verbose_name=_("Excluded Products"))
classes = models.ManyToManyField(
'catalogue.ProductClass', related_name='classes', blank=True,
verbose_name=_("Product Types"))
included_categories = models.ManyToManyField(
'catalogue.Category', related_name='includes', blank=True,
verbose_name=_("Included Categories"))
# Allow a custom range instance to be specified
proxy_class = fields.NullCharField(
_("Custom class"), max_length=255, default=None, unique=True)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
__included_product_ids = None
__excluded_product_ids = None
__class_ids = None
__category_ids = None
objects = models.Manager()
browsable = BrowsableRangeManager()
class Meta:
app_label = 'offer'
verbose_name = _("Range")
verbose_name_plural = _("Ranges")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse(
'catalogue:range', kwargs={'slug': self.slug})
def add_product(self, product, display_order=None):
""" Add product to the range
When adding product that is already in the range, prevent re-adding it.
If display_order is specified, update it.
Default display_order for a new product in the range is 0; this puts
the product at the top of the list.
"""
if product.is_child:
raise ValueError(
"Ranges can only contain parent and stand-alone products.")
initial_order = display_order or 0
relation, __ = RangeProduct.objects.get_or_create(
range=self, product=product,
defaults={'display_order': initial_order})
if (display_order is not None and
relation.display_order != display_order):
relation.display_order = display_order
relation.save()
def remove_product(self, product):
"""
Remove product from range. To save on queries, this function does not
check if the product is in fact in the range.
"""
RangeProduct.objects.filter(range=self, product=product).delete()
def contains_product(self, product): # noqa (too complex (12))
"""
Check whether the passed product is part of this range.
"""
# Child products are never part of the range, but the parent may be.
if product.is_child:
product = product.parent
# Delegate to a proxy class if one is provided
if self.proxy_class:
return load_proxy(self.proxy_class)().contains_product(product)
excluded_product_ids = self._excluded_product_ids()
if product.id in excluded_product_ids:
return False
if self.includes_all_products:
return True
if product.product_class_id in self._class_ids():
return True
included_product_ids = self._included_product_ids()
if product.id in included_product_ids:
return True
test_categories = self.included_categories.all()
if test_categories:
for category in product.get_categories().all():
for test_category in test_categories:
if category == test_category \
or category.is_descendant_of(test_category):
return True
return False
# Shorter alias
contains = contains_product
def __get_pks_and_child_pks(self, queryset):
"""
Expects a product queryset; gets the primary keys of the passed
products and their children.
Verbose, but database and memory friendly.
"""
# One query to get parent and children; [(4, None), (5, 10), (5, 11)]
pk_tuples_iterable = queryset.values_list('pk', 'children__pk')
# Flatten list without unpacking; [4, None, 5, 10, 5, 11]
flat_iterable = itertools.chain.from_iterable(pk_tuples_iterable)
# Ensure uniqueness and remove None; {4, 5, 10, 11}
return set(flat_iterable) - {None}
def _included_product_ids(self):
if not self.id:
return []
if self.__included_product_ids is None:
self.__included_product_ids = self.__get_pks_and_child_pks(
self.included_products)
return self.__included_product_ids
def _excluded_product_ids(self):
if not self.id:
return []
if self.__excluded_product_ids is None:
self.__excluded_product_ids = self.__get_pks_and_child_pks(
self.excluded_products)
return self.__excluded_product_ids
def _class_ids(self):
if None is self.__class_ids:
self.__class_ids = self.classes.values_list('pk', flat=True)
return self.__class_ids
def _category_ids(self):
if self.__category_ids is None:
category_ids_list = list(
self.included_categories.values_list('pk', flat=True))
for category in self.included_categories.all():
children_ids = category.get_descendants().values_list(
'pk', flat=True)
category_ids_list.extend(list(children_ids))
self.__category_ids = category_ids_list
return self.__category_ids
def num_products(self):
# Delegate to a proxy class if one is provided
if self.proxy_class:
return load_proxy(self.proxy_class)().num_products()
if self.includes_all_products:
return None
return self.all_products().count()
def all_products(self):
"""
Return a queryset containing all the products in the range
This includes included_products plus the products contained in the
included classes and categories, minus the products in
excluded_products.
"""
if self.proxy_class:
return load_proxy(self.proxy_class)().all_products()
Product = get_model("catalogue", "Product")
if self.includes_all_products:
# Filter out child products
return Product.browsable.all()
return Product.objects.filter(
Q(id__in=self._included_product_ids()) |
Q(product_class_id__in=self._class_ids()) |
Q(productcategory__category_id__in=self._category_ids())
).exclude(id__in=self._excluded_product_ids())
@property
def is_editable(self):
"""
Test whether this product can be edited in the dashboard
"""
return not self.proxy_class
class RangeProduct(models.Model):
""" Allow ordering products inside ranges """
range = models.ForeignKey('offer.Range')
product = models.ForeignKey('catalogue.Product')
display_order = models.IntegerField(default=0)
class Meta:
app_label = 'offer'
unique_together = ('range', 'product')
# ==========
# Conditions
# ==========
class CountCondition(Condition):
"""
An offer condition dependent on the NUMBER of matching items from the
basket.
"""
_description = _("Basket includes %(count)d item(s) from %(range)s")
@property
def name(self):
return self._description % {
'count': self.value,
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'count': self.value,
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Count condition")
verbose_name_plural = _("Count conditions")
def is_satisfied(self, offer, basket):
"""
Determines whether a given basket meets this condition
"""
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
if num_matches >= self.value:
return True
return False
def _get_num_matches(self, basket):
if hasattr(self, '_num_matches'):
return getattr(self, '_num_matches')
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
self._num_matches = num_matches
return num_matches
def is_partially_satisfied(self, offer, basket):
num_matches = self._get_num_matches(basket)
return 0 < num_matches < self.value
def get_upsell_message(self, offer, basket):
num_matches = self._get_num_matches(basket)
delta = self.value - num_matches
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) \
% {'delta': delta, 'range': self.range}
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
:basket: The basket
:affected_lines: The lines that have been affected by the discount.
This should be list of tuples (line, discount, qty)
"""
# We need to count how many items have already been consumed as part of
# applying the benefit, so we don't consume too many items.
num_consumed = 0
for line, __, quantity in affected_lines:
num_consumed += quantity
to_consume = max(0, self.value - num_consumed)
if to_consume == 0:
return
for __, line in self.get_applicable_lines(offer, basket,
most_expensive_first=True):
quantity_to_consume = min(line.quantity_without_discount,
to_consume)
line.consume(quantity_to_consume)
to_consume -= quantity_to_consume
if to_consume == 0:
break
class CoverageCondition(Condition):
"""
An offer condition dependent on the number of DISTINCT matching items from
the basket.
"""
_description = _("Basket includes %(count)d distinct item(s) from"
" %(range)s")
@property
def name(self):
return self._description % {
'count': self.value,
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'count': self.value,
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Coverage Condition")
verbose_name_plural = _("Coverage Conditions")
def is_satisfied(self, offer, basket):
"""
Determines whether a given basket meets this condition
"""
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(line) and product.id not in
covered_ids):
covered_ids.append(product.id)
if len(covered_ids) >= self.value:
return True
return False
def _get_num_covered_products(self, basket):
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(line) and product.id not in
covered_ids):
covered_ids.append(product.id)
return len(covered_ids)
def get_upsell_message(self, offer, basket):
delta = self.value - self._get_num_covered_products(basket)
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) \
% {'delta': delta, 'range': self.range}
def is_partially_satisfied(self, offer, basket):
return 0 < self._get_num_covered_products(basket) < self.value
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
"""
# Determine products that have already been consumed by applying the
# benefit
consumed_products = []
for line, __, quantity in affected_lines:
consumed_products.append(line.product)
to_consume = max(0, self.value - len(consumed_products))
if to_consume == 0:
return
for line in basket.all_lines():
product = line.product
if not self.can_apply_condition(line):
continue
if product in consumed_products:
continue
if not line.is_available_for_discount:
continue
# Only consume a quantity of 1 from each line
line.consume(1)
consumed_products.append(product)
to_consume -= 1
if to_consume == 0:
break
def get_value_of_satisfying_items(self, offer, basket):
covered_ids = []
value = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and line.product.id not in
covered_ids):
covered_ids.append(line.product.id)
value += unit_price(offer, line)
if len(covered_ids) >= self.value:
return value
return value
class ValueCondition(Condition):
"""
An offer condition dependent on the VALUE of matching items from the
basket.
"""
_description = _("Basket includes %(amount)s from %(range)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value),
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'amount': currency(self.value),
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Value condition")
verbose_name_plural = _("Value conditions")
def is_satisfied(self, offer, basket):
"""
Determine whether a given basket meets this condition
"""
value_of_matches = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and
line.quantity_without_discount > 0):
price = unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
if value_of_matches >= self.value:
return True
return False
def _get_value_of_matches(self, offer, basket):
if hasattr(self, '_value_of_matches'):
return getattr(self, '_value_of_matches')
value_of_matches = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and
line.quantity_without_discount > 0):
price = unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
self._value_of_matches = value_of_matches
return value_of_matches
def is_partially_satisfied(self, offer, basket):
value_of_matches = self._get_value_of_matches(offer, basket)
return D('0.00') < value_of_matches < self.value
def get_upsell_message(self, offer, basket):
value_of_matches = self._get_value_of_matches(offer, basket)
return _('Spend %(value)s more from %(range)s') % {
'value': currency(self.value - value_of_matches),
'range': self.range}
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
We allow lines to be passed in as sometimes we want them sorted
in a specific order.
"""
# Determine value of items already consumed as part of discount
value_consumed = D('0.00')
for line, __, qty in affected_lines:
price = unit_price(offer, line)
value_consumed += price * qty
to_consume = max(0, self.value - value_consumed)
if to_consume == 0:
return
for price, line in self.get_applicable_lines(
offer, basket, most_expensive_first=True):
quantity_to_consume = min(
line.quantity_without_discount,
(to_consume / price).quantize(D(1), ROUND_UP))
line.consume(quantity_to_consume)
to_consume -= price * quantity_to_consume
if to_consume <= 0:
break
# ============
# Result types
# ============
class ApplicationResult(object):
is_final = is_successful = False
# Basket discount
discount = D('0.00')
description = None
# Offer applications can affect 3 distinct things
# (a) Give a discount off the BASKET total
# (b) Give a discount off the SHIPPING total
# (a) Trigger a post-order action
BASKET, SHIPPING, POST_ORDER = 0, 1, 2
affects = None
@property
def affects_basket(self):
return self.affects == self.BASKET
@property
def affects_shipping(self):
return self.affects == self.SHIPPING
@property
def affects_post_order(self):
return self.affects == self.POST_ORDER
class BasketDiscount(ApplicationResult):
"""
For when an offer application leads to a simple discount off the basket's
total
"""
affects = ApplicationResult.BASKET
def __init__(self, amount):
self.discount = amount
@property
def is_successful(self):
return self.discount > 0
def __str__(self):
return '<Basket discount of %s>' % self.discount
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.discount)
# Helper global as returning zero discount is quite common
ZERO_DISCOUNT = BasketDiscount(D('0.00'))
class ShippingDiscount(ApplicationResult):
"""
For when an offer application leads to a discount from the shipping cost
"""
is_successful = is_final = True
affects = ApplicationResult.SHIPPING
SHIPPING_DISCOUNT = ShippingDiscount()
class PostOrderAction(ApplicationResult):
"""
For when an offer condition is met but the benefit is deferred until after
the order has been placed. Eg buy 2 books and get 100 loyalty points.
"""
is_final = is_successful = True
affects = ApplicationResult.POST_ORDER
def __init__(self, description):
self.description = description
# ========
# Benefits
# ========
class PercentageDiscountBenefit(Benefit):
"""
An offer benefit that gives a percentage discount
"""
_description = _("%(value)s%% discount on %(range)s")
@property
def name(self):
return self._description % {
'value': self.value,
'range': self.range.name}
@property
def description(self):
return self._description % {
'value': self.value,
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Percentage discount benefit")
verbose_name_plural = _("Percentage discount benefits")
def apply(self, basket, condition, offer, discount_percent=None,
max_total_discount=None):
if discount_percent is None:
discount_percent = self.value
discount_amount_available = max_total_discount
line_tuples = self.get_applicable_lines(offer, basket)
discount = D('0.00')
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
if discount_amount_available == 0:
break
quantity_affected = min(line.quantity_without_discount,
max_affected_items - affected_items)
line_discount = self.round(discount_percent / D('100.0') * price
* int(quantity_affected))
if discount_amount_available is not None:
line_discount = min(line_discount, discount_amount_available)
discount_amount_available -= line_discount
apply_discount(line, line_discount, quantity_affected)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class AbsoluteDiscountBenefit(Benefit):
"""
An offer benefit that gives an absolute discount
"""
_description = _("%(value)s discount on %(range)s")
@property
def name(self):
return self._description % {
'value': currency(self.value),
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'value': currency(self.value),
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Absolute discount benefit")
verbose_name_plural = _("Absolute discount benefits")
def apply(self, basket, condition, offer, discount_amount=None,
max_total_discount=None):
if discount_amount is None:
discount_amount = self.value
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket)
# Determine which lines can have the discount applied to them
max_affected_items = self._effective_max_affected_items()
num_affected_items = 0
affected_items_total = D('0.00')
lines_to_discount = []
for price, line in line_tuples:
if num_affected_items >= max_affected_items:
break
qty = min(line.quantity_without_discount,
max_affected_items - num_affected_items)
lines_to_discount.append((line, price, qty))
num_affected_items += qty
affected_items_total += qty * price
# Ensure we don't try to apply a discount larger than the total of the
# matching items.
discount = min(discount_amount, affected_items_total)
if max_total_discount is not None:
discount = min(discount, max_total_discount)
if discount == 0:
return ZERO_DISCOUNT
# Apply discount equally amongst them
affected_lines = []
applied_discount = D('0.00')
for i, (line, price, qty) in enumerate(lines_to_discount):
if i == len(lines_to_discount) - 1:
# If last line, then take the delta as the discount to ensure
# the total discount is correct and doesn't mismatch due to
# rounding.
line_discount = discount - applied_discount
else:
# Calculate a weighted discount for the line
line_discount = self.round(
((price * qty) / affected_items_total) * discount)
apply_discount(line, line_discount, qty)
affected_lines.append((line, line_discount, qty))
applied_discount += line_discount
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class FixedPriceBenefit(Benefit):
"""
An offer benefit that gives the items in the condition for a
fixed price. This is useful for "bundle" offers.
Note that we ignore the benefit range here and only give a fixed price
for the products in the condition range. The condition cannot be a value
condition.
We also ignore the max_affected_items setting.
"""
_description = _("The products that meet the condition are sold "
"for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price benefit")
verbose_name_plural = _("Fixed price benefits")
def apply(self, basket, condition, offer): # noqa (too complex (10))
if isinstance(condition, ValueCondition):
return ZERO_DISCOUNT
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket,
range=condition.range)
if not line_tuples:
return ZERO_DISCOUNT
# Determine the lines to consume
num_permitted = int(condition.value)
num_affected = 0
value_affected = D('0.00')
covered_lines = []
for price, line in line_tuples:
if isinstance(condition, CoverageCondition):
quantity_affected = 1
else:
quantity_affected = min(
line.quantity_without_discount,
num_permitted - num_affected)
num_affected += quantity_affected
value_affected += quantity_affected * price
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
discount = max(value_affected - self.value, D('0.00'))
if not discount:
return ZERO_DISCOUNT
# Apply discount to the affected lines
discount_applied = D('0.00')
last_line = covered_lines[-1][1]
for price, line, quantity in covered_lines:
if line == last_line:
# If last line, we just take the difference to ensure that
# rounding doesn't lead to an off-by-one error
line_discount = discount - discount_applied
else:
line_discount = self.round(
discount * (price * quantity) / value_affected)
apply_discount(line, line_discount, quantity)
discount_applied += line_discount
return BasketDiscount(discount)
class MultibuyDiscountBenefit(Benefit):
_description = _("Cheapest product from %(range)s is free")
@property
def name(self):
return self._description % {
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Multibuy discount benefit")
verbose_name_plural = _("Multibuy discount benefits")
def apply(self, basket, condition, offer):
line_tuples = self.get_applicable_lines(offer, basket)
if not line_tuples:
return ZERO_DISCOUNT
# Cheapest line gives free product
discount, line = line_tuples[0]
apply_discount(line, discount, 1)
affected_lines = [(line, discount, 1)]
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
# =================
# Shipping benefits
# =================
class ShippingBenefit(Benefit):
def apply(self, basket, condition, offer):
condition.consume_items(offer, basket, affected_lines=())
return SHIPPING_DISCOUNT
class Meta:
app_label = 'offer'
proxy = True
class ShippingAbsoluteDiscountBenefit(ShippingBenefit):
_description = _("%(amount)s off shipping cost")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping absolute discount benefit")
verbose_name_plural = _("Shipping absolute discount benefits")
def shipping_discount(self, charge):
return min(charge, self.value)
class ShippingFixedPriceBenefit(ShippingBenefit):
_description = _("Get shipping for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price shipping benefit")
verbose_name_plural = _("Fixed price shipping benefits")
def shipping_discount(self, charge):
if charge < self.value:
return D('0.00')
return charge - self.value
class ShippingPercentageDiscountBenefit(ShippingBenefit):
_description = _("%(value)s%% off of shipping cost")
@property
def name(self):
return self._description % {
'value': self.value}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping percentage discount benefit")
verbose_name_plural = _("Shipping percentage discount benefits")
def shipping_discount(self, charge):
discount = charge * self.value / D('100.0')
return discount.quantize(D('0.01'))
class RangeProductFileUpload(models.Model):
range = models.ForeignKey('offer.Range', related_name='file_uploads',
verbose_name=_("Range"))
filepath = models.CharField(_("File Path"), max_length=255)
size = models.PositiveIntegerField(_("Size"))
uploaded_by = models.ForeignKey(AUTH_USER_MODEL,
verbose_name=_("Uploaded By"))
date_uploaded = models.DateTimeField(_("Date Uploaded"), auto_now_add=True)
PENDING, FAILED, PROCESSED = 'Pending', 'Failed', 'Processed'
choices = (
(PENDING, PENDING),
(FAILED, FAILED),
(PROCESSED, PROCESSED),
)
status = models.CharField(_("Status"), max_length=32, choices=choices,
default=PENDING)
error_message = models.CharField(_("Error Message"), max_length=255,
blank=True)
# Post-processing audit fields
date_processed = models.DateTimeField(_("Date Processed"), null=True)
num_new_skus = models.PositiveIntegerField(_("Number of New SKUs"),
null=True)
num_unknown_skus = models.PositiveIntegerField(_("Number of Unknown SKUs"),
null=True)
num_duplicate_skus = models.PositiveIntegerField(
_("Number of Duplicate SKUs"), null=True)
class Meta:
app_label = 'offer'
ordering = ('-date_uploaded',)
verbose_name = _("Range Product Uploaded File")
verbose_name_plural = _("Range Product Uploaded Files")
@property
def filename(self):
return os.path.basename(self.filepath)
def mark_as_failed(self, message=None):
self.date_processed = now()
self.error_message = message
self.status = self.FAILED
self.save()
def mark_as_processed(self, num_new, num_unknown, num_duplicate):
self.status = self.PROCESSED
self.date_processed = now()
self.num_new_skus = num_new
self.num_unknown_skus = num_unknown
self.num_duplicate_skus = num_duplicate
self.save()
def was_processing_successful(self):
return self.status == self.PROCESSED
def process(self):
"""
Process the file upload and add products to the range
"""
all_ids = set(self.extract_ids())
products = self.range.included_products.all()
existing_skus = products.values_list(
'stockrecords__partner_sku', flat=True)
existing_skus = set(filter(bool, existing_skus))
existing_upcs = products.values_list('upc', flat=True)
existing_upcs = set(filter(bool, existing_upcs))
existing_ids = existing_skus.union(existing_upcs)
new_ids = all_ids - existing_ids
Product = models.get_model('catalogue', 'Product')
products = Product._default_manager.filter(
models.Q(stockrecords__partner_sku__in=new_ids) |
models.Q(upc__in=new_ids))
for product in products:
self.range.add_product(product)
# Processing stats
found_skus = products.values_list(
'stockrecords__partner_sku', flat=True)
found_skus = set(filter(bool, found_skus))
found_upcs = set(filter(bool, products.values_list('upc', flat=True)))
found_ids = found_skus.union(found_upcs)
missing_ids = new_ids - found_ids
dupes = set(all_ids).intersection(existing_ids)
self.mark_as_processed(products.count(), len(missing_ids), len(dupes))
def extract_ids(self):
"""
Extract all SKU- or UPC-like strings from the file
"""
for line in open(self.filepath, 'r'):
for id in re.split('[^\w:\.-]', line):
if id:
yield id
def delete_file(self):
os.unlink(self.filepath)
|
cindyker/p2pool | refs/heads/master | p2pool/test/util/test_skiplist.py | 287 | from p2pool.util import skiplist
class NotSkipList(object):
def __call__(self, start, *args):
pos = start
sol = self.initial_solution(start, args)
while True:
decision = self.judge(sol, args)
if decision > 0:
raise AssertionError()
elif decision == 0:
return self.finalize(sol)
delta = self.get_delta(pos)
sol = self.apply_delta(sol, delta, args)
pos = self.previous(pos)
def finalize(self, sol):
return sol
skiplist.SkipList
|
Salat-Cx65/python-for-android | refs/heads/master | python-build/python-libs/gdata/src/gdata/apps/groups/service.py | 137 | #!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to manage groups, groups memembers and groups owners.
EmailSettingsService: Set various email settings.
"""
__author__ = 'google-apps-apis@googlegroups.com'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
BASE_URL = '/a/feeds/group/' + API_VER + '/%s'
GROUP_MEMBER_URL = BASE_URL + '?member=%s'
GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s'
GROUP_ID_URL = BASE_URL + '/%s'
MEMBER_URL = BASE_URL + '/%s/member'
MEMBER_ID_URL = MEMBER_URL + '/%s'
OWNER_URL = BASE_URL + '/%s/owner'
OWNER_ID_URL = OWNER_URL + '/%s'
PERMISSION_OWNER = 'Owner'
PERMISSION_MEMBER = 'Member'
PERMISSION_DOMAIN = 'Domain'
PERMISSION_ANYONE = 'Anyone'
class GroupsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Groups service."""
def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email,
start_key, direct_only=None, domain=None):
if domain is None:
domain = self.domain
if service_type == 'group':
if group_id != '' and is_existed:
return GROUP_ID_URL % (domain, group_id)
if member_id != '':
if direct_only is not None:
return GROUP_MEMBER_DIRECT_URL % (domain, member_id,
self._Bool2Str(direct_only))
else:
return GROUP_MEMBER_URL % (domain, member_id)
if start_key != '':
return GROUP_START_URL % (domain, start_key)
return BASE_URL % (domain)
if service_type == 'member':
if member_id != '' and is_existed:
return MEMBER_ID_URL % (domain, group_id, member_id)
if start_key != '':
return MEMBER_START_URL % (domain, group_id, start_key)
return MEMBER_URL % (domain, group_id)
if service_type == 'owner':
if owner_email != '' and is_existed:
return OWNER_ID_URL % (domain, group_id, owner_email)
return OWNER_URL % (domain, group_id)
def _Bool2Str(self, b):
if b is None:
return None
return str(b is True).lower()
def _IsExisted(self, uri):
try:
properties = self._GetProperties(uri)
return True
except gdata.apps.service.AppsForYourDomainException, e:
if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST:
return False
else:
raise e
def CreateGroup(self, group_id, group_name, description, email_permission):
"""Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation.
"""
uri = self._ServiceUrl('group', False, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PostProperties(uri, properties)
def UpdateGroup(self, group_id, group_name, description, email_permission):
"""Update a group's name, description and/or permission.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the update operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PutProperties(uri, properties)
def RetrieveGroup(self, group_id):
"""Retrieve a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._GetProperties(uri)
def RetrieveAllGroups(self):
"""Retrieve all groups in the domain.
Args:
None.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '', '', '')
return self._GetPropertiesList(uri)
def RetrieveGroups(self, member_id, direct_only=False):
"""Retrieve all groups that belong to the given member_id.
Args:
member_id: The member's email address (e.g. member@example.com).
direct_only: Boolean whether only return groups that this member directly belongs to.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', member_id, '', '', direct_only)
return self._GetPropertiesList(uri)
def DeleteGroup(self, group_id):
"""Delete a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the delete operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._DeleteProperties(uri)
def AddMemberToGroup(self, member_id, group_id):
"""Add a member to a group.
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('member', False, group_id, member_id, '', '', '')
properties = {}
properties['memberId'] = member_id
return self._PostProperties(uri, properties)
def IsMember(self, member_id, group_id):
"""Check whether the given member already exists in the given group
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member exists in the group. False otherwise.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._IsExisted(uri)
def RetrieveMember(self, member_id, group_id):
"""Retrieve the given member in the given group
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._GetProperties(uri)
def RetrieveAllMembers(self, group_id):
"""Retrieve all members in the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveMemberFromGroup(self, member_id, group_id):
"""Remove the given member from the given group
Args:
group_id: The ID of the group (e.g. us-sales).
member_id: The member's email address (e.g. member@example.com).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._DeleteProperties(uri)
def AddOwnerToGroup(self, owner_email, group_id):
"""Add an owner to a group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('owner', False, group_id, '', owner_email, '', '')
properties = {}
properties['email'] = owner_email
return self._PostProperties(uri, properties)
def IsOwner(self, owner_email, group_id):
"""Check whether the given member an owner of the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member is an owner of the given group. False otherwise.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._IsExisted(uri)
def RetrieveOwner(self, owner_email, group_id):
"""Retrieve the given owner in the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._GetProperties(uri)
def RetrieveAllOwners(self, group_id):
"""Retrieve all owners of the given group
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveOwnerFromGroup(self, owner_email, group_id):
"""Remove the given owner from the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._DeleteProperties(uri)
|
p0psicles/SickRage | refs/heads/master | lib/sqlalchemy/ext/compiler.py | 78 | # ext/compiler.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an API for creation of custom ClauseElements and compilers.
Synopsis
========
Usage involves the creation of one or more
:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or
more callables defining its compilation::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import ColumnClause
class MyColumn(ColumnClause):
pass
@compiles(MyColumn)
def compile_mycolumn(element, compiler, **kw):
return "[%s]" % element.name
Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`,
the base expression element for named column objects. The ``compiles``
decorator registers itself with the ``MyColumn`` class so that it is invoked
when the object is compiled to a string::
from sqlalchemy import select
s = select([MyColumn('x'), MyColumn('y')])
print str(s)
Produces::
SELECT [x], [y]
Dialect-specific compilation rules
==================================
Compilers can also be made dialect-specific. The appropriate compiler will be
invoked for the dialect in use::
from sqlalchemy.schema import DDLElement
class AlterColumn(DDLElement):
def __init__(self, column, cmd):
self.column = column
self.cmd = cmd
@compiles(AlterColumn)
def visit_alter_column(element, compiler, **kw):
return "ALTER COLUMN %s ..." % element.column.name
@compiles(AlterColumn, 'postgresql')
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, element.column.name)
The second ``visit_alter_table`` will be invoked when any ``postgresql``
dialect is used.
Compiling sub-elements of a custom expression construct
=======================================================
The ``compiler`` argument is the
:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object
can be inspected for any information about the in-progress compilation,
including ``compiler.dialect``, ``compiler.statement`` etc. The
:class:`~sqlalchemy.sql.compiler.SQLCompiler` and
:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()``
method which can be used for compilation of embedded attributes::
from sqlalchemy.sql.expression import Executable, ClauseElement
class InsertFromSelect(Executable, ClauseElement):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s (%s)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select)
)
insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5))
print insert
Produces::
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z FROM mytable WHERE mytable.x > :x_1)"
.. note::
The above ``InsertFromSelect`` construct is only an example, this actual
functionality is already available using the
:meth:`.Insert.from_select` method.
.. note::
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
enabled. See :ref:`enabling_compiled_autocommit` for this step.
Cross Compiling between SQL and DDL compilers
---------------------------------------------
SQL and DDL constructs are each compiled using different base compilers -
``SQLCompiler`` and ``DDLCompiler``. A common need is to access the
compilation rules of SQL expressions from within a DDL expression. The
``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as
below where we generate a CHECK constraint that embeds a SQL expression::
@compiles(MyConstraint)
def compile_my_constraint(constraint, ddlcompiler, **kw):
return "CONSTRAINT %s CHECK (%s)" % (
constraint.name,
ddlcompiler.sql_compiler.process(constraint.expression)
)
.. _enabling_compiled_autocommit:
Enabling Autocommit on a Construct
==================================
Recall from the section :ref:`autocommit` that the :class:`.Engine`, when
asked to execute a construct in the absence of a user-defined transaction,
detects if the given construct represents DML or DDL, that is, a data
modification or data definition statement, which requires (or may require,
in the case of DDL) that the transaction generated by the DBAPI be committed
(recall that DBAPI always has a transaction going on regardless of what
SQLAlchemy does). Checking for this is actually accomplished by checking for
the "autocommit" execution option on the construct. When building a
construct like an INSERT derivation, a new DDL type, or perhaps a stored
procedure that alters data, the "autocommit" option needs to be set in order
for the statement to function with "connectionless" execution
(as described in :ref:`dbengine_implicit`).
Currently a quick way to do this is to subclass :class:`.Executable`, then
add the "autocommit" flag to the ``_execution_options`` dictionary (note this
is a "frozen" dictionary which supplies a generative ``union()`` method)::
from sqlalchemy.sql.expression import Executable, ClauseElement
class MyInsertThing(Executable, ClauseElement):
_execution_options = \\
Executable._execution_options.union({'autocommit': True})
More succinctly, if the construct is truly similar to an INSERT, UPDATE, or
DELETE, :class:`.UpdateBase` can be used, which already is a subclass
of :class:`.Executable`, :class:`.ClauseElement` and includes the
``autocommit`` flag::
from sqlalchemy.sql.expression import UpdateBase
class MyInsertThing(UpdateBase):
def __init__(self, ...):
...
DDL elements that subclass :class:`.DDLElement` already have the
"autocommit" flag turned on.
Changing the default compilation of existing constructs
=======================================================
The compiler extension applies just as well to the existing constructs. When
overriding the compilation of a built in SQL construct, the @compiles
decorator is invoked upon the appropriate class (be sure to use the class,
i.e. ``Insert`` or ``Select``, instead of the creation function such
as ``insert()`` or ``select()``).
Within the new compilation function, to get at the "original" compilation
routine, use the appropriate visit_XXX method - this
because compiler.process() will call upon the overriding routine and cause
an endless loop. Such as, to add "prefix" to all insert statements::
from sqlalchemy.sql.expression import Insert
@compiles(Insert)
def prefix_inserts(insert, compiler, **kw):
return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)
The above compiler will prefix all INSERT statements with "some prefix" when
compiled.
.. _type_compilation_extension:
Changing Compilation of Types
=============================
``compiler`` works for types, too, such as below where we implement the
MS-SQL specific 'max' keyword for ``String``/``VARCHAR``::
@compiles(String, 'mssql')
@compiles(VARCHAR, 'mssql')
def compile_varchar(element, compiler, **kw):
if element.length == 'max':
return "VARCHAR('max')"
else:
return compiler.visit_VARCHAR(element, **kw)
foo = Table('foo', metadata,
Column('data', VARCHAR('max'))
)
Subclassing Guidelines
======================
A big part of using the compiler extension is subclassing SQLAlchemy
expression constructs. To make this easier, the expression and
schema packages feature a set of "bases" intended for common tasks.
A synopsis is as follows:
* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root
expression class. Any SQL expression can be derived from this base, and is
probably the best choice for longer constructs such as specialized INSERT
statements.
* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all
"column-like" elements. Anything that you'd place in the "columns" clause of
a SELECT statement (as well as order by and group by) can derive from this -
the object will automatically have Python "comparison" behavior.
:class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a
``type`` member which is expression's return type. This can be established
at the instance level in the constructor, or at the class level if its
generally constant::
class timestamp(ColumnElement):
type = TIMESTAMP()
* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a
``ColumnElement`` and a "from clause" like object, and represents a SQL
function or stored procedure type of call. Since most databases support
statements along the line of "SELECT FROM <some function>"
``FunctionElement`` adds in the ability to be used in the FROM clause of a
``select()`` construct::
from sqlalchemy.sql.expression import FunctionElement
class coalesce(FunctionElement):
name = 'coalesce'
@compiles(coalesce)
def compile(element, compiler, **kw):
return "coalesce(%s)" % compiler.process(element.clauses)
@compiles(coalesce, 'oracle')
def compile(element, compiler, **kw):
if len(element.clauses) > 2:
raise TypeError("coalesce only supports two arguments on Oracle")
return "nvl(%s)" % compiler.process(element.clauses)
* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions,
like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement``
subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``.
``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the
``execute_at()`` method, allowing the construct to be invoked during CREATE
TABLE and DROP TABLE sequences.
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which
should be used with any expression class that represents a "standalone"
SQL statement that can be passed directly to an ``execute()`` method. It
is already implicit within ``DDLElement`` and ``FunctionElement``.
Further Examples
================
"UTC timestamp" function
-------------------------
A function that works like "CURRENT_TIMESTAMP" except applies the
appropriate conversions so that the time is in UTC time. Timestamps are best
stored in relational databases as UTC, without time zones. UTC so that your
database doesn't think time has gone backwards in the hour when daylight
savings ends, without timezones because timezones are like character
encodings - they're best applied only at the endpoints of an application
(i.e. convert to UTC upon user input, re-apply desired timezone upon display).
For Postgresql and Microsoft SQL Server::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
Example usage::
from sqlalchemy import (
Table, Column, Integer, String, DateTime, MetaData
)
metadata = MetaData()
event = Table("event", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50), nullable=False),
Column("timestamp", DateTime, server_default=utcnow())
)
"GREATEST" function
-------------------
The "GREATEST" function is given any number of arguments and returns the one
that is of the highest value - it's equivalent to Python's ``max``
function. A SQL standard version versus a CASE based version which only
accommodates two arguments::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'sqlite')
@compiles(greatest, 'mssql')
@compiles(greatest, 'oracle')
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return "CASE WHEN %s > %s THEN %s ELSE %s END" % (
compiler.process(arg1),
compiler.process(arg2),
compiler.process(arg1),
compiler.process(arg2),
)
Example usage::
Session.query(Account).\\
filter(
greatest(
Account.checking_balance,
Account.savings_balance) > 10000
)
"false" expression
------------------
Render a "false" constant expression, rendering as "0" on platforms that
don't have a "false" constant::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
class sql_false(expression.ColumnElement):
pass
@compiles(sql_false)
def default_false(element, compiler, **kw):
return "false"
@compiles(sql_false, 'mssql')
@compiles(sql_false, 'mysql')
@compiles(sql_false, 'oracle')
def int_false(element, compiler, **kw):
return "0"
Example usage::
from sqlalchemy import select, union_all
exp = union_all(
select([users.c.name, sql_false().label("enrolled")]),
select([customers.c.name, customers.c.enrolled])
)
"""
from .. import exc
from ..sql import visitors
def compiles(class_, *specs):
"""Register a function as a compiler for a
given :class:`.ClauseElement` type."""
def decorate(fn):
existing = class_.__dict__.get('_compiler_dispatcher', None)
existing_dispatch = class_.__dict__.get('_compiler_dispatch')
if not existing:
existing = _dispatcher()
if existing_dispatch:
existing.specs['default'] = existing_dispatch
# TODO: why is the lambda needed ?
setattr(class_, '_compiler_dispatch',
lambda *arg, **kw: existing(*arg, **kw))
setattr(class_, '_compiler_dispatcher', existing)
if specs:
for s in specs:
existing.specs[s] = fn
else:
existing.specs['default'] = fn
return fn
return decorate
def deregister(class_):
"""Remove all custom compilers associated with a given
:class:`.ClauseElement` type."""
if hasattr(class_, '_compiler_dispatcher'):
# regenerate default _compiler_dispatch
visitors._generate_dispatch(class_)
# remove custom directive
del class_._compiler_dispatcher
class _dispatcher(object):
def __init__(self):
self.specs = {}
def __call__(self, element, compiler, **kw):
# TODO: yes, this could also switch off of DBAPI in use.
fn = self.specs.get(compiler.dialect.name, None)
if not fn:
try:
fn = self.specs['default']
except KeyError:
raise exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element))
return fn(element, compiler, **kw)
|
ogenstad/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_snmp_contact.py | 43 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_contact
version_added: "2.4"
short_description: Manages SNMP contact configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP contact configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
contact:
description:
- Contact information.
required: true
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp contact test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP contact"
ce_snmp_contact:
state: present
contact: call Operator at 010-99999999
provider: "{{ cli }}"
- name: "Undo SNMP contact"
ce_snmp_contact:
state: absent
contact: call Operator at 010-99999999
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info contact call Operator at 010-99999999"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config, ce_argument_spec
class SnmpContact(object):
""" Manages SNMP contact configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.contact = self.module.params['contact']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.contact:
if len(self.contact) > 255 or len(self.contact) < 1:
self.module.fail_json(
msg='Error: The len of contact %s is out of [1 - 255].' % self.contact)
else:
self.module.fail_json(
msg='Error: The len of contact is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.contact:
self.proposed["contact"] = self.contact
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.cur_cfg["contact"] = temp_data[1]
self.existing["contact"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.end_state["contact"] = temp_data[1]
def cli_load_config(self, commands):
""" Load configure by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure by cli """
regular = "| include snmp | include contact"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info contact %s" % self.contact
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info contact"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
pass
else:
self.set_config()
else:
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
contact=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpContact(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
lgarren/spack | refs/heads/develop | var/spack/repos/builtin/packages/r-illuminaio/package.py | 1 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RIlluminaio(RPackage):
"""Tools for parsing Illumina's microarray output files, including
IDAT."""
homepage = "http://bioconductor.org/packages/illuminaio/"
url = "https://git.bioconductor.org/packages/illuminaio"
list_url = homepage
version('0.18.0', git='https://git.bioconductor.org/packages/illuminaio', commit='e6b8ab1f8eacb760aebdb4828e9cfbf07da06eda')
depends_on('r-base64', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@0.18.0')
|
kevin-hannegan/vps-droplet | refs/heads/master | website/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py | 335 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
try:
import ssl
except ImportError:
ssl = None
if sys.version_info[0] < 3: # pragma: no cover
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib2 import HTTPSHandler
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else: # pragma: no cover
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib.request import HTTPSHandler
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError: # pragma: no cover
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError: # pragma: no cover
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
ZipFile = BaseZipFile
else:
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
_fsencoding = sys.getfilesystemencoding()
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
|
ericfc/django | refs/heads/master | django/conf/locale/bs/formats.py | 702 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. N Y.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. N. Y. G:i T'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'Y M j'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
Vector35/binaryninja-api | refs/heads/dev | python/examples/kaitai/lvm2.py | 2 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Lvm2(KaitaiStruct):
"""### Building a test file
```
dd if=/dev/zero of=image.img bs=512 count=$(( 4 * 1024 * 2 ))
sudo losetup /dev/loop1 image.img
sudo pvcreate /dev/loop1
sudo vgcreate vg_test /dev/loop1
sudo lvcreate --name lv_test1 vg_test
sudo losetup -d /dev/loop1
```
.. seealso::
Source - https://github.com/libyal/libvslvm/blob/master/documentation/Logical%20Volume%20Manager%20(LVM)%20format.asciidoc
"""
SEQ_FIELDS = ["pv"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['pv']['start'] = self._io.pos()
self.pv = self._root.PhysicalVolume(self._io, self, self._root)
self.pv._read()
self._debug['pv']['end'] = self._io.pos()
class PhysicalVolume(KaitaiStruct):
SEQ_FIELDS = ["empty_sector", "label"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['empty_sector']['start'] = self._io.pos()
self.empty_sector = self._io.read_bytes(self._root.sector_size)
self._debug['empty_sector']['end'] = self._io.pos()
self._debug['label']['start'] = self._io.pos()
self.label = self._root.PhysicalVolume.Label(self._io, self, self._root)
self.label._read()
self._debug['label']['end'] = self._io.pos()
class Label(KaitaiStruct):
SEQ_FIELDS = ["label_header", "volume_header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['label_header']['start'] = self._io.pos()
self.label_header = self._root.PhysicalVolume.Label.LabelHeader(self._io, self, self._root)
self.label_header._read()
self._debug['label_header']['end'] = self._io.pos()
self._debug['volume_header']['start'] = self._io.pos()
self.volume_header = self._root.PhysicalVolume.Label.VolumeHeader(self._io, self, self._root)
self.volume_header._read()
self._debug['volume_header']['end'] = self._io.pos()
class LabelHeader(KaitaiStruct):
SEQ_FIELDS = ["signature", "sector_number", "checksum", "label_header_"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['signature']['start'] = self._io.pos()
self.signature = self._io.ensure_fixed_contents(b"\x4C\x41\x42\x45\x4C\x4F\x4E\x45")
self._debug['signature']['end'] = self._io.pos()
self._debug['sector_number']['start'] = self._io.pos()
self.sector_number = self._io.read_u8le()
self._debug['sector_number']['end'] = self._io.pos()
self._debug['checksum']['start'] = self._io.pos()
self.checksum = self._io.read_u4le()
self._debug['checksum']['end'] = self._io.pos()
self._debug['label_header_']['start'] = self._io.pos()
self.label_header_ = self._root.PhysicalVolume.Label.LabelHeader.LabelHeader(self._io, self, self._root)
self.label_header_._read()
self._debug['label_header_']['end'] = self._io.pos()
class LabelHeader(KaitaiStruct):
SEQ_FIELDS = ["data_offset", "type_indicator"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['data_offset']['start'] = self._io.pos()
self.data_offset = self._io.read_u4le()
self._debug['data_offset']['end'] = self._io.pos()
self._debug['type_indicator']['start'] = self._io.pos()
self.type_indicator = self._io.ensure_fixed_contents(b"\x4C\x56\x4D\x32\x20\x30\x30\x31")
self._debug['type_indicator']['end'] = self._io.pos()
class VolumeHeader(KaitaiStruct):
SEQ_FIELDS = ["id", "size", "data_area_descriptors", "metadata_area_descriptors"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['id']['start'] = self._io.pos()
self.id = (self._io.read_bytes(32)).decode(u"ascii")
self._debug['id']['end'] = self._io.pos()
self._debug['size']['start'] = self._io.pos()
self.size = self._io.read_u8le()
self._debug['size']['end'] = self._io.pos()
self._debug['data_area_descriptors']['start'] = self._io.pos()
self.data_area_descriptors = []
i = 0
while True:
if not 'arr' in self._debug['data_area_descriptors']:
self._debug['data_area_descriptors']['arr'] = []
self._debug['data_area_descriptors']['arr'].append({'start': self._io.pos()})
_t_data_area_descriptors = self._root.PhysicalVolume.Label.VolumeHeader.DataAreaDescriptor(self._io, self, self._root)
_t_data_area_descriptors._read()
_ = _t_data_area_descriptors
self.data_area_descriptors.append(_)
self._debug['data_area_descriptors']['arr'][len(self.data_area_descriptors) - 1]['end'] = self._io.pos()
if ((_.size != 0) and (_.offset != 0)) :
break
i += 1
self._debug['data_area_descriptors']['end'] = self._io.pos()
self._debug['metadata_area_descriptors']['start'] = self._io.pos()
self.metadata_area_descriptors = []
i = 0
while True:
if not 'arr' in self._debug['metadata_area_descriptors']:
self._debug['metadata_area_descriptors']['arr'] = []
self._debug['metadata_area_descriptors']['arr'].append({'start': self._io.pos()})
_t_metadata_area_descriptors = self._root.PhysicalVolume.Label.VolumeHeader.MetadataAreaDescriptor(self._io, self, self._root)
_t_metadata_area_descriptors._read()
_ = _t_metadata_area_descriptors
self.metadata_area_descriptors.append(_)
self._debug['metadata_area_descriptors']['arr'][len(self.metadata_area_descriptors) - 1]['end'] = self._io.pos()
if ((_.size != 0) and (_.offset != 0)) :
break
i += 1
self._debug['metadata_area_descriptors']['end'] = self._io.pos()
class DataAreaDescriptor(KaitaiStruct):
SEQ_FIELDS = ["offset", "size"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['offset']['start'] = self._io.pos()
self.offset = self._io.read_u8le()
self._debug['offset']['end'] = self._io.pos()
self._debug['size']['start'] = self._io.pos()
self.size = self._io.read_u8le()
self._debug['size']['end'] = self._io.pos()
@property
def data(self):
if hasattr(self, '_m_data'):
return self._m_data if hasattr(self, '_m_data') else None
if self.size != 0:
_pos = self._io.pos()
self._io.seek(self.offset)
self._debug['_m_data']['start'] = self._io.pos()
self._m_data = (self._io.read_bytes(self.size)).decode(u"ascii")
self._debug['_m_data']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_data if hasattr(self, '_m_data') else None
class MetadataAreaDescriptor(KaitaiStruct):
SEQ_FIELDS = ["offset", "size"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['offset']['start'] = self._io.pos()
self.offset = self._io.read_u8le()
self._debug['offset']['end'] = self._io.pos()
self._debug['size']['start'] = self._io.pos()
self.size = self._io.read_u8le()
self._debug['size']['end'] = self._io.pos()
@property
def data(self):
if hasattr(self, '_m_data'):
return self._m_data if hasattr(self, '_m_data') else None
if self.size != 0:
_pos = self._io.pos()
self._io.seek(self.offset)
self._debug['_m_data']['start'] = self._io.pos()
self._raw__m_data = self._io.read_bytes(self.size)
io = KaitaiStream(BytesIO(self._raw__m_data))
self._m_data = self._root.PhysicalVolume.Label.VolumeHeader.MetadataArea(io, self, self._root)
self._m_data._read()
self._debug['_m_data']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_data if hasattr(self, '_m_data') else None
class MetadataArea(KaitaiStruct):
"""According to `[REDHAT]` the metadata area is a circular buffer. New metadata is appended to the old metadata and then the pointer to the start of it is updated. The metadata area, therefore, can contain copies of older versions of the metadata."""
SEQ_FIELDS = ["header"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.PhysicalVolume.Label.VolumeHeader.MetadataArea.MetadataAreaHeader(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
class MetadataAreaHeader(KaitaiStruct):
SEQ_FIELDS = ["checksum", "signature", "version", "metadata_area_offset", "metadata_area_size", "raw_location_descriptors"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['checksum']['start'] = self._io.pos()
self.checksum = self._root.PhysicalVolume.Label.VolumeHeader.MetadataArea.MetadataAreaHeader(self._io, self, self._root)
self.checksum._read()
self._debug['checksum']['end'] = self._io.pos()
self._debug['signature']['start'] = self._io.pos()
self.signature = self._io.ensure_fixed_contents(b"\x20\x4C\x56\x4D\x32\x20\x78\x5B\x35\x41\x25\x72\x30\x4E\x2A\x3E")
self._debug['signature']['end'] = self._io.pos()
self._debug['version']['start'] = self._io.pos()
self.version = self._io.read_u4le()
self._debug['version']['end'] = self._io.pos()
self._debug['metadata_area_offset']['start'] = self._io.pos()
self.metadata_area_offset = self._io.read_u8le()
self._debug['metadata_area_offset']['end'] = self._io.pos()
self._debug['metadata_area_size']['start'] = self._io.pos()
self.metadata_area_size = self._io.read_u8le()
self._debug['metadata_area_size']['end'] = self._io.pos()
self._debug['raw_location_descriptors']['start'] = self._io.pos()
self.raw_location_descriptors = []
i = 0
while True:
if not 'arr' in self._debug['raw_location_descriptors']:
self._debug['raw_location_descriptors']['arr'] = []
self._debug['raw_location_descriptors']['arr'].append({'start': self._io.pos()})
_t_raw_location_descriptors = self._root.PhysicalVolume.Label.VolumeHeader.MetadataArea.MetadataAreaHeader.RawLocationDescriptor(self._io, self, self._root)
_t_raw_location_descriptors._read()
_ = _t_raw_location_descriptors
self.raw_location_descriptors.append(_)
self._debug['raw_location_descriptors']['arr'][len(self.raw_location_descriptors) - 1]['end'] = self._io.pos()
if ((_.offset != 0) and (_.size != 0) and (_.checksum != 0)) :
break
i += 1
self._debug['raw_location_descriptors']['end'] = self._io.pos()
class RawLocationDescriptor(KaitaiStruct):
"""The data area size can be 0. It is assumed it represents the remaining available data."""
class RawLocationDescriptorFlags(Enum):
raw_location_ignored = 1
SEQ_FIELDS = ["offset", "size", "checksum", "flags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['offset']['start'] = self._io.pos()
self.offset = self._io.read_u8le()
self._debug['offset']['end'] = self._io.pos()
self._debug['size']['start'] = self._io.pos()
self.size = self._io.read_u8le()
self._debug['size']['end'] = self._io.pos()
self._debug['checksum']['start'] = self._io.pos()
self.checksum = self._io.read_u4le()
self._debug['checksum']['end'] = self._io.pos()
self._debug['flags']['start'] = self._io.pos()
self.flags = KaitaiStream.resolve_enum(self._root.PhysicalVolume.Label.VolumeHeader.MetadataArea.MetadataAreaHeader.RawLocationDescriptor.RawLocationDescriptorFlags, self._io.read_u4le())
self._debug['flags']['end'] = self._io.pos()
@property
def metadata(self):
if hasattr(self, '_m_metadata'):
return self._m_metadata if hasattr(self, '_m_metadata') else None
_pos = self._io.pos()
self._io.seek(self.metadata_area_offset)
self._debug['_m_metadata']['start'] = self._io.pos()
self._m_metadata = self._io.read_bytes(self.metadata_area_size)
self._debug['_m_metadata']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_metadata if hasattr(self, '_m_metadata') else None
@property
def sector_size(self):
if hasattr(self, '_m_sector_size'):
return self._m_sector_size if hasattr(self, '_m_sector_size') else None
self._m_sector_size = 512
return self._m_sector_size if hasattr(self, '_m_sector_size') else None
|
huihoo/reader | refs/heads/master | apps/rss_feeds/migrations/0041_deuniqueify_feed_address_on_dupes.py | 18 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'DuplicateFeed', fields ['duplicate_address']
db.delete_unique('rss_feeds_duplicatefeed', ['duplicate_address'])
def backwards(self, orm):
# Adding unique constraint on 'DuplicateFeed', fields ['duplicate_address']
db.create_unique('rss_feeds_duplicatefeed', ['duplicate_address'])
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedicon': {
'Meta': {'object_name': 'FeedIcon'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'icon'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['rss_feeds.Feed']"}),
'icon_url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['rss_feeds']
|
coder-james/mxnet | refs/heads/master | example/speech-demo/speechSGD.py | 15 | import mxnet as mx
from mxnet.ndarray import NDArray, zeros, clip, sqrt
from mxnet.random import normal
@mx.optimizer.register
class speechSGD(mx.optimizer.Optimizer):
"""A very simple SGD optimizer with momentum and weight regularization.
Parameters
----------
learning_rate : float, optional
learning_rate of SGD
momentum : float, optional
momentum value
wd : float, optional
L2 regularization coefficient add to all the weights
rescale_grad : float, optional
rescaling factor of gradient.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
param_idx2name : dict of string/int to float, optional
special treat weight decay in parameter ends with bias, gamma, and beta
"""
def __init__(self, momentum=0.0, **kwargs):
super(speechSGD, self).__init__(**kwargs)
self.momentum = momentum
def create_state(self, index, weight):
"""Create additional optimizer state such as momentum.
Parameters
----------
weight : NDArray
The weight data
"""
if self.momentum == 0.0:
return None
else:
return zeros(weight.shape, weight.context, dtype=weight.dtype)
def _get_lr(self, index):
"""get learning rate for index.
Parameters
----------
index : int
The index for weight
Returns
-------
lr : float
learning rate for this index
"""
mom = 0.0
if self.lr_scheduler is not None:
(lr, mom) = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if index in self.lr_mult:
lr *= self.lr_mult[index]
elif index in self.idx2name:
lr *= self.lr_mult.get(self.idx2name[index], 1.0)
return lr, mom
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
(lr, momentum) = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state:
mom = state
mom[:] *= momentum
mom[:] += -lr * (1.0 - momentum) * (grad + wd * weight)
weight[:] += mom
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + self.wd * weight)
|
Chilledheart/chromium | refs/heads/master | tools/telemetry/catapult_base/refactor/module.py | 16 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from catapult_base.refactor import annotated_symbol
class Module(object):
def __init__(self, file_path):
self._file_path = file_path
with open(self._file_path, 'r') as f:
self._snippet = annotated_symbol.Annotate(f)
@property
def file_path(self):
return self._file_path
@property
def modified(self):
return self._snippet.modified
def FindAll(self, snippet_type):
return self._snippet.FindAll(snippet_type)
def FindChildren(self, snippet_type):
return self._snippet.FindChildren(snippet_type)
def Write(self):
"""Write modifications to the file."""
if not self.modified:
return
# Stringify before opening the file for writing.
# If we fail, we won't truncate the file.
string = str(self._snippet)
with open(self._file_path, 'w') as f:
f.write(string)
|
ArcEye/MK-Qt5 | refs/heads/master | src/emc/usr_intf/gmoccapy/player.py | 20 | #!/usr/bin/env python
'''
This class is used to handle sound messages from gmoccapy,
it is just a coppy of a class from gscreen and has been slighly modified
Copyright 2014 Norbert Schechner
nieson@web.de
original Author = Chris Morley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import gobject
import gst
# the player class does the work of playing the audio hints
# http://pygstdocs.berlios.de/pygst-tutorial/introduction.html
class Player:
def __init__(self):
import gst
# Element playbin automatic plays any file
self.player = gst.element_factory_make("playbin", "player")
# Enable message bus to check for errors in the pipeline
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self.on_message)
self.loop = gobject.MainLoop()
def run(self):
self.player.set_state(gst.STATE_PLAYING)
self.loop.run()
def set_sound(self, file):
# Set the uri to the file
self.player.set_property("uri", "file://" + file)
def on_message(self, bus, message):
t = message.type
if t == gst.MESSAGE_EOS:
# file ended, stop
self.player.set_state(gst.STATE_NULL)
self.loop.quit()
elif t == gst.MESSAGE_ERROR:
# Error ocurred, print and stop
self.player.set_state(gst.STATE_NULL)
err, debug = message.parse_error()
print ("Error: %s" % err, debug)
self.loop.quit()
|
riveridea/gnuradio | refs/heads/master | grc/gui/Connection.py | 4 | """
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import Utils
from Element import Element
import Colors
from Constants import CONNECTOR_ARROW_BASE, CONNECTOR_ARROW_HEIGHT
import gtk
class Connection(Element):
"""
A graphical connection for ports.
The connection has 2 parts, the arrow and the wire.
The coloring of the arrow and wire exposes the status of 3 states:
enabled/disabled, valid/invalid, highlighted/non-highlighted.
The wire coloring exposes the enabled and highlighted states.
The arrow coloring exposes the enabled and valid states.
"""
def __init__(self): Element.__init__(self)
def get_coordinate(self):
"""
Get the 0,0 coordinate.
Coordinates are irrelevant in connection.
Returns:
0, 0
"""
return (0, 0)
def get_rotation(self):
"""
Get the 0 degree rotation.
Rotations are irrelevant in connection.
Returns:
0
"""
return 0
def create_shapes(self):
"""Precalculate relative coordinates."""
Element.create_shapes(self)
self._sink_rot = None
self._source_rot = None
self._sink_coor = None
self._source_coor = None
#get the source coordinate
try:
connector_length = self.get_source().get_connector_length()
except:
return
self.x1, self.y1 = Utils.get_rotated_coordinate((connector_length, 0), self.get_source().get_rotation())
#get the sink coordinate
connector_length = self.get_sink().get_connector_length() + CONNECTOR_ARROW_HEIGHT
self.x2, self.y2 = Utils.get_rotated_coordinate((-connector_length, 0), self.get_sink().get_rotation())
#build the arrow
self.arrow = [(0, 0),
Utils.get_rotated_coordinate((-CONNECTOR_ARROW_HEIGHT, -CONNECTOR_ARROW_BASE/2), self.get_sink().get_rotation()),
Utils.get_rotated_coordinate((-CONNECTOR_ARROW_HEIGHT, CONNECTOR_ARROW_BASE/2), self.get_sink().get_rotation()),
]
self._update_after_move()
if not self.get_enabled(): self._arrow_color = Colors.CONNECTION_DISABLED_COLOR
elif not self.is_valid(): self._arrow_color = Colors.CONNECTION_ERROR_COLOR
else: self._arrow_color = Colors.CONNECTION_ENABLED_COLOR
def _update_after_move(self):
"""Calculate coordinates."""
self.clear() #FIXME do i want this here?
#source connector
source = self.get_source()
X, Y = source.get_connector_coordinate()
x1, y1 = self.x1 + X, self.y1 + Y
self.add_line((x1, y1), (X, Y))
#sink connector
sink = self.get_sink()
X, Y = sink.get_connector_coordinate()
x2, y2 = self.x2 + X, self.y2 + Y
self.add_line((x2, y2), (X, Y))
#adjust arrow
self._arrow = [(x+X, y+Y) for x,y in self.arrow]
#add the horizontal and vertical lines in this connection
if abs(source.get_connector_direction() - sink.get_connector_direction()) == 180:
#2 possible point sets to create a 3-line connector
mid_x, mid_y = (x1 + x2)/2.0, (y1 + y2)/2.0
points = [((mid_x, y1), (mid_x, y2)), ((x1, mid_y), (x2, mid_y))]
#source connector -> points[0][0] should be in the direction of source (if possible)
if Utils.get_angle_from_coordinates((x1, y1), points[0][0]) != source.get_connector_direction(): points.reverse()
#points[0][0] -> sink connector should not be in the direction of sink
if Utils.get_angle_from_coordinates(points[0][0], (x2, y2)) == sink.get_connector_direction(): points.reverse()
#points[0][0] -> source connector should not be in the direction of source
if Utils.get_angle_from_coordinates(points[0][0], (x1, y1)) == source.get_connector_direction(): points.reverse()
#create 3-line connector
p1, p2 = map(int, points[0][0]), map(int, points[0][1])
self.add_line((x1, y1), p1)
self.add_line(p1, p2)
self.add_line((x2, y2), p2)
else:
#2 possible points to create a right-angled connector
points = [(x1, y2), (x2, y1)]
#source connector -> points[0] should be in the direction of source (if possible)
if Utils.get_angle_from_coordinates((x1, y1), points[0]) != source.get_connector_direction(): points.reverse()
#points[0] -> sink connector should not be in the direction of sink
if Utils.get_angle_from_coordinates(points[0], (x2, y2)) == sink.get_connector_direction(): points.reverse()
#points[0] -> source connector should not be in the direction of source
if Utils.get_angle_from_coordinates(points[0], (x1, y1)) == source.get_connector_direction(): points.reverse()
#create right-angled connector
self.add_line((x1, y1), points[0])
self.add_line((x2, y2), points[0])
def draw(self, gc, window):
"""
Draw the connection.
Args:
gc: the graphics context
window: the gtk window to draw on
"""
sink = self.get_sink()
source = self.get_source()
#check for changes
if self._sink_rot != sink.get_rotation() or self._source_rot != source.get_rotation(): self.create_shapes()
elif self._sink_coor != sink.get_coordinate() or self._source_coor != source.get_coordinate():
try:
self._update_after_move()
except:
return
#cache values
self._sink_rot = sink.get_rotation()
self._source_rot = source.get_rotation()
self._sink_coor = sink.get_coordinate()
self._source_coor = source.get_coordinate()
#draw
if self.is_highlighted(): border_color = Colors.HIGHLIGHT_COLOR
elif self.get_enabled(): border_color = Colors.CONNECTION_ENABLED_COLOR
else: border_color = Colors.CONNECTION_DISABLED_COLOR
# make message connections dashed (no areas here)
normal_line_style = gc.line_style
if source.get_type() == "message": gc.line_style = gtk.gdk.LINE_ON_OFF_DASH
Element.draw(self, gc, window, bg_color=None, border_color=border_color)
gc.line_style = normal_line_style # restore line style
#draw arrow on sink port
try:
gc.set_foreground(self._arrow_color)
window.draw_polygon(gc, True, self._arrow)
except:
return
|
sudo87/cm_api | refs/heads/master | cm_api/endpoints/types.py | 2 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import copy
import datetime
import time
__docformat__ = "epytext"
class Attr(object):
"""
Encapsulates information about an attribute in the JSON encoding of the
object. It identifies properties of the attribute such as whether it's
read-only, its type, etc.
"""
DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
def __init__(self, atype=None, rw=True, is_api_list=False):
self._atype = atype
self._is_api_list = is_api_list
self.rw = rw
def to_json(self, value, preserve_ro):
"""
Returns the JSON encoding of the given attribute value.
If the value has a 'to_json_dict' object, that method is called. Otherwise,
the following values are returned for each input type:
- datetime.datetime: string with the API representation of a date.
- dictionary: if 'atype' is ApiConfig, a list of ApiConfig objects.
- python list: python list (or ApiList) with JSON encoding of items
- the raw value otherwise
"""
if hasattr(value, 'to_json_dict'):
return value.to_json_dict(preserve_ro)
elif isinstance(value, dict) and self._atype == ApiConfig:
return config_to_api_list(value)
elif isinstance(value, datetime.datetime):
return value.strftime(self.DATE_FMT)
elif isinstance(value, list) or isinstance(value, tuple):
if self._is_api_list:
return ApiList(value).to_json_dict()
else:
return [ self.to_json(x, preserve_ro) for x in value ]
else:
return value
def from_json(self, resource_root, data):
"""
Parses the given JSON value into an appropriate python object.
This means:
- a datetime.datetime if 'atype' is datetime.datetime
- a converted config dictionary or config list if 'atype' is ApiConfig
- if the attr is an API list, an ApiList with instances of 'atype'
- an instance of 'atype' if it has a 'from_json_dict' method
- a python list with decoded versions of the member objects if the input
is a python list.
- the raw value otherwise
"""
if data is None:
return None
if self._atype == datetime.datetime:
return datetime.datetime.strptime(data, self.DATE_FMT)
elif self._atype == ApiConfig:
# ApiConfig is special. We want a python dictionary for summary views,
# but an ApiList for full views. Try to detect each case from the JSON
# data.
if not data['items']:
return { }
first = data['items'][0]
return json_to_config(data, len(first) == 2)
elif self._is_api_list:
return ApiList.from_json_dict(data, resource_root, self._atype)
elif isinstance(data, list):
return [ self.from_json(resource_root, x) for x in data ]
elif hasattr(self._atype, 'from_json_dict'):
return self._atype.from_json_dict(data, resource_root)
else:
return data
class ROAttr(Attr):
"""
Subclass that just defines the attribute as read-only.
"""
def __init__(self, atype=None, is_api_list=False):
Attr.__init__(self, atype=atype, rw=False, is_api_list=is_api_list)
def check_api_version(resource_root, min_version):
"""
Checks if the resource_root's API version it at least the given minimum
version.
"""
if resource_root.version < min_version:
raise Exception("API version %s is required but %s is in use."
% (min_version, resource_root.version))
def call(method, path, ret_type,
ret_is_list=False, data=None, params=None, api_version=1):
"""
Generic function for calling a resource method and automatically dealing with
serialization of parameters and deserialization of return values.
@param method: method to call (must be bound to a resource;
e.g., "resource_root.get").
@param path: the full path of the API method to call.
@param ret_type: return type of the call.
@param ret_is_list: whether the return type is an ApiList.
@param data: Optional data to send as payload to the call.
@param params: Optional query parameters for the call.
@param api_version: minimum API version for the call.
"""
check_api_version(method.im_self, api_version)
if data is not None:
data = json.dumps(Attr(is_api_list=True).to_json(data, False))
ret = method(path, data=data, params=params)
else:
ret = method(path, params=params)
if ret_type is None:
return
elif ret_is_list:
return ApiList.from_json_dict(ret, method.im_self, ret_type)
elif isinstance(ret, list):
return [ ret_type.from_json_dict(x, method.im_self) for x in ret ]
else:
return ret_type.from_json_dict(ret, method.im_self)
class BaseApiObject(object):
"""
The BaseApiObject helps with (de)serialization from/to JSON.
The derived class has two ways of defining custom attributes:
- Overwriting the '_ATTRIBUTES' field with the attribute dictionary
- Override the _get_attributes() method, in case static initialization of
the above field is not possible.
It's recommended that the _get_attributes() implementation do caching to
avoid computing the dictionary on every invocation.
The derived class's constructor must call the base class's init() static
method. All constructor arguments (aside from self and resource_root) must
be keywords arguments with default values (typically None), or
from_json_dict() will not work.
"""
_ATTRIBUTES = { }
_WHITELIST = ( '_resource_root', '_attributes' )
@classmethod
def _get_attributes(cls):
"""
Returns a map of property names to attr instances (or None for default
attribute behavior) describing the properties of the object.
By default, this method will return the class's _ATTRIBUTES field.
Classes can override this method to do custom initialization of the
attributes when needed.
"""
return cls._ATTRIBUTES
@staticmethod
def init(obj, resource_root, attrs=None):
"""
Wraper around the real constructor to avoid issues with the 'self'
argument. Call like this, from a subclass's constructor:
- BaseApiObject.init(self, locals())
"""
# This works around http://bugs.python.org/issue2646
# We use unicode strings as keys in kwargs.
str_attrs = { }
if attrs:
for k, v in attrs.iteritems():
if k not in ('self', 'resource_root'):
str_attrs[k] = v
BaseApiObject.__init__(obj, resource_root, **str_attrs)
def __init__(self, resource_root, **attrs):
"""
Initializes internal state and sets all known writable properties of the
object to None. Then initializes the properties given in the provided
attributes dictionary.
@param resource_root: API resource object.
@param attrs: optional dictionary of attributes to set. This should only
contain r/w attributes.
"""
self._resource_root = resource_root
for name, attr in self._get_attributes().iteritems():
object.__setattr__(self, name, None)
if attrs:
self._set_attrs(attrs, from_json=False)
def _set_attrs(self, attrs, allow_ro=False, from_json=True):
"""
Sets all the attributes in the dictionary. Optionally, allows setting
read-only attributes (e.g. when deserializing from JSON) and skipping
JSON deserialization of values.
"""
for k, v in attrs.iteritems():
attr = self._check_attr(k, allow_ro)
if attr and from_json:
v = attr.from_json(self._get_resource_root(), v)
object.__setattr__(self, k, v)
def __setattr__(self, name, val):
if name not in BaseApiObject._WHITELIST:
self._check_attr(name, False)
object.__setattr__(self, name, val)
def _check_attr(self, name, allow_ro):
if name not in self._get_attributes():
raise AttributeError('Invalid property %s for class %s.' %
(name, self.__class__.__name__))
attr = self._get_attributes()[name]
if not allow_ro and attr and not attr.rw:
raise AttributeError('Attribute %s of class %s is read only.' %
(name, self.__class__.__name__))
return attr
def _get_resource_root(self):
return self._resource_root
def _update(self, api_obj):
"""Copy state from api_obj to this object."""
if not isinstance(self, api_obj.__class__):
raise ValueError(
"Class %s does not derive from %s; cannot update attributes." %
(self.__class__, api_obj.__class__))
for name in self._get_attributes().keys():
try:
val = getattr(api_obj, name)
setattr(self, name, val)
except AttributeError, ignored:
pass
def to_json_dict(self, preserve_ro=False):
dic = { }
for name, attr in self._get_attributes().iteritems():
if not preserve_ro and attr and not attr.rw:
continue
try:
value = getattr(self, name)
if value is not None:
if attr:
dic[name] = attr.to_json(value, preserve_ro)
else:
dic[name] = value
except AttributeError:
pass
return dic
def __str__(self):
"""
Default implementation of __str__. Uses the type name and the first
attribute retrieved from the attribute map to create the string.
"""
name = self._get_attributes().keys()[0]
value = getattr(self, name, None)
return "<%s>: %s = %s" % (self.__class__.__name__, name, value)
@classmethod
def from_json_dict(cls, dic, resource_root):
obj = cls(resource_root)
obj._set_attrs(dic, allow_ro=True)
return obj
class BaseApiResource(BaseApiObject):
"""
A specialization of BaseApiObject that provides some utility methods for
resources. This class allows easier serialization / deserialization of
parameters and return values.
"""
def _api_version(self):
"""
Returns the minimum API version for this resource. Defaults to 1.
"""
return 1
def _path(self):
"""
Returns the path to the resource.
e.g., for a service 'foo' in cluster 'bar', this should return
'/clusters/bar/services/foo'.
"""
raise NotImplementedError
def _require_min_api_version(self, version):
"""
Raise an exception if the version of the api is less than the given version.
@param version: The minimum required version.
"""
actual_version = self._get_resource_root().version
version = max(version, self._api_version())
if actual_version < version:
raise Exception("API version %s is required but %s is in use."
% (version, actual_version))
def _cmd(self, command, data=None, params=None, api_version=1):
"""
Invokes a command on the resource. Commands are expected to be under the
"commands/" sub-resource.
"""
return self._post("commands/" + command, ApiCommand,
data=data, params=params, api_version=api_version)
def _get_config(self, rel_path, view, api_version=1):
"""
Retrieves an ApiConfig list from the given relative path.
"""
self._require_min_api_version(api_version)
params = view and dict(view=view) or None
resp = self._get_resource_root().get(self._path() + '/' + rel_path,
params=params)
return json_to_config(resp, view == 'full')
def _update_config(self, rel_path, config, api_version=1):
self._require_min_api_version(api_version)
resp = self._get_resource_root().put(self._path() + '/' + rel_path,
data=config_to_json(config))
return json_to_config(resp, False)
def _delete(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('delete', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _get(self, rel_path, ret_type, ret_is_list=False, params=None,
api_version=1):
return self._call('get', rel_path, ret_type, ret_is_list, None, params,
api_version)
def _post(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('post', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _put(self, rel_path, ret_type, ret_is_list=False, data=None, params=None,
api_version=1):
return self._call('put', rel_path, ret_type, ret_is_list, data, params,
api_version)
def _call(self, method, rel_path, ret_type, ret_is_list=False, data=None,
params=None, api_version=1):
path = self._path()
if rel_path:
path += '/' + rel_path
return call(getattr(self._get_resource_root(), method),
path,
ret_type,
ret_is_list,
data,
params,
api_version)
class ApiList(BaseApiObject):
"""A list of some api object"""
LIST_KEY = "items"
def __init__(self, objects, resource_root=None, **attrs):
BaseApiObject.__init__(self, resource_root, **attrs)
# Bypass checks in BaseApiObject.__setattr__
object.__setattr__(self, 'objects', objects)
def __str__(self):
return "<ApiList>(%d): [%s]" % (
len(self.objects),
", ".join([str(item) for item in self.objects]))
def to_json_dict(self, preserve_ro=False):
ret = BaseApiObject.to_json_dict(self, preserve_ro)
attr = Attr()
ret[ApiList.LIST_KEY] = [ attr.to_json(x, preserve_ro) for x in self.objects ]
return ret
def __len__(self):
return self.objects.__len__()
def __iter__(self):
return self.objects.__iter__()
def __getitem__(self, i):
return self.objects.__getitem__(i)
def __getslice(self, i, j):
return self.objects.__getslice__(i, j)
@classmethod
def from_json_dict(cls, dic, resource_root, member_cls=None):
if not member_cls:
member_cls = cls._MEMBER_CLASS
attr = Attr(atype=member_cls)
items = []
if ApiList.LIST_KEY in dic:
items = [ attr.from_json(resource_root, x) for x in dic[ApiList.LIST_KEY] ]
ret = cls(items)
# If the class declares custom attributes, populate them based on the input
# dict. The check avoids extra overhead for the common case, where we just
# have a plain list. _set_attrs() also does not understand the "items"
# attribute, so it can't be in the input data.
if cls._ATTRIBUTES:
if ApiList.LIST_KEY in dic:
dic = copy.copy(dic)
del dic[ApiList.LIST_KEY]
ret._set_attrs(dic, allow_ro=True)
return ret
class ApiHostRef(BaseApiObject):
_ATTRIBUTES = {
'hostId' : None,
}
def __init__(self, resource_root, hostId=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiHostRef>: %s" % (self.hostId)
class ApiServiceRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'peerName' : None,
}
def __init__(self, resource_root, serviceName=None, clusterName=None,
peerName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiClusterRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
}
def __init__(self, resource_root, clusterName = None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleRef(BaseApiObject):
_ATTRIBUTES = {
'clusterName' : None,
'serviceName' : None,
'roleName' : None,
}
def __init__(self, resource_root, serviceName=None, roleName=None,
clusterName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiRoleConfigGroupRef(BaseApiObject):
_ATTRIBUTES = {
'roleConfigGroupName' : None,
}
def __init__(self, resource_root, roleConfigGroupName=None):
BaseApiObject.init(self, resource_root, locals())
class ApiCommand(BaseApiObject):
SYNCHRONOUS_COMMAND_ID = -1
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
cls._ATTRIBUTES = {
'id' : ROAttr(),
'name' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'active' : ROAttr(),
'success' : ROAttr(),
'resultMessage' : ROAttr(),
'clusterRef' : ROAttr(ApiClusterRef),
'serviceRef' : ROAttr(ApiServiceRef),
'roleRef' : ROAttr(ApiRoleRef),
'hostRef' : ROAttr(ApiHostRef),
'children' : ROAttr(ApiCommand, is_api_list=True),
'parent' : ROAttr(ApiCommand),
'resultDataUrl' : ROAttr(),
}
return cls._ATTRIBUTES
def __str__(self):
return "<ApiCommand>: '%s' (id: %s; active: %s; success: %s)" % (
self.name, self.id, self.active, self.success)
def _path(self):
return '/commands/%d' % self.id
def fetch(self):
"""
Retrieve updated data about the command from the server.
@return: A new ApiCommand object.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
resp = self._get_resource_root().get(self._path())
return ApiCommand.from_json_dict(resp, self._get_resource_root())
def wait(self, timeout=None):
"""
Wait for command to finish.
@param timeout: (Optional) Max amount of time (in seconds) to wait. Wait
forever by default.
@return: The final ApiCommand object, containing the last known state.
The command may still be running in case of timeout.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
SLEEP_SEC = 5
if timeout is None:
deadline = None
else:
deadline = time.time() + timeout
while True:
cmd = self.fetch()
if not cmd.active:
return cmd
if deadline is not None:
now = time.time()
if deadline < now:
return cmd
else:
time.sleep(min(SLEEP_SEC, deadline - now))
else:
time.sleep(SLEEP_SEC)
def abort(self):
"""
Abort a running command.
@return: A new ApiCommand object with the updated information.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
path = self._path() + '/abort'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root())
class ApiBulkCommandList(ApiList):
_ATTRIBUTES = {
'errors' : ROAttr(),
}
_MEMBER_CLASS = ApiCommand
class ApiCommandMetadata(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'argSchema' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiCommandMetadata>: %s (%s)" % (self.name, self.argSchema)
#
# Metrics.
#
class ApiMetricData(BaseApiObject):
"""Metric reading data."""
_ATTRIBUTES = {
'timestamp' : ROAttr(datetime.datetime),
'value' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
class ApiMetric(BaseApiObject):
"""Metric information."""
_ATTRIBUTES = {
'name' : ROAttr(),
'context' : ROAttr(),
'unit' : ROAttr(),
'data' : ROAttr(ApiMetricData),
'displayName' : ROAttr(),
'description' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
#
# Activities.
#
class ApiActivity(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'parent' : ROAttr(),
'startTime' : ROAttr(),
'finishTime' : ROAttr(),
'id' : ROAttr(),
'status' : ROAttr(),
'user' : ROAttr(),
'group' : ROAttr(),
'inputDir' : ROAttr(),
'outputDir' : ROAttr(),
'mapper' : ROAttr(),
'combiner' : ROAttr(),
'reducer' : ROAttr(),
'queueName' : ROAttr(),
'schedulerPriority' : ROAttr(),
}
def __init__(self, resource_root):
BaseApiObject.init(self, resource_root)
def __str__(self):
return "<ApiActivity>: %s (%s)" % (self.name, self.status)
#
# Replication
#
class ApiCmPeer(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'url' : None,
'username' : None,
'password' : None,
}
def __str__(self):
return "<ApiPeer>: %s (%s)" % (self.name, self.url)
class ApiLicensedFeatureUsage(BaseApiObject):
_ATTRIBUTES = {
'totals' : ROAttr(),
'clusters' : ROAttr(),
}
class ApiHdfsReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'sourcePath' : None,
'destinationPath' : None,
'mapreduceServiceName' : None,
'userName' : None,
'numMaps' : None,
'dryRun' : None,
'schedulerPoolName' : None,
'abortOnError' : None,
'preservePermissions' : None,
'preserveBlockSize' : None,
'preserveReplicationCount' : None,
'removeMissingFiles' : None,
'skipChecksumChecks' : None,
'skipTrash' : None,
'replicationStrategy' : None,
'preserveXAttrs' : None,
}
class ApiHdfsReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'progress' : ROAttr(),
'counters' : ROAttr(),
'numBytesDryRun' : ROAttr(),
'numFilesDryRun' : ROAttr(),
'numFilesExpected' : ROAttr(),
'numBytesExpected' : ROAttr(),
'numFilesCopied' : ROAttr(),
'numBytesCopied' : ROAttr(),
'numFilesSkipped' : ROAttr(),
'numBytesSkipped' : ROAttr(),
'numFilesDeleted' : ROAttr(),
'numFilesCopyFailed' : ROAttr(),
'numBytesCopyFailed' : ROAttr(),
'setupError' : ROAttr(),
'jobId' : ROAttr(),
'jobDetailsUri' : ROAttr(),
'dryRun' : ROAttr(),
'snapshottedDirs' : ROAttr(),
}
class ApiHiveTable(BaseApiObject):
_ATTRIBUTES = {
'database' : None,
'tableName' : None,
}
def __str__(self):
return "<ApiHiveTable>: %s, %s" % (self.database, self.tableName)
class ApiImpalaUDF(BaseApiObject):
_ATTRIBUTES = {
'database' : ROAttr(),
'signature' : ROAttr(),
}
def __str__(self):
return "<ApiImpalaUDF>: %s, %s" % (self.database, self.signature)
class ApiHiveReplicationArguments(BaseApiObject):
_ATTRIBUTES = {
'sourceService' : Attr(ApiServiceRef),
'tableFilters' : Attr(ApiHiveTable),
'exportDir' : None,
'force' : None,
'replicateData' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'dryRun' : None,
'replicateImpalaMetadata' : None,
}
class ApiHiveReplicationResult(BaseApiObject):
_ATTRIBUTES = {
'tableCount' : ROAttr(),
'tables' : ROAttr(ApiHiveTable),
'impalaUDFCount' : ROAttr(),
'impalaUDFs' : ROAttr(ApiImpalaUDF),
'errorCount' : ROAttr(),
'errors' : ROAttr(),
'dataReplicationResult' : ROAttr(ApiHdfsReplicationResult),
'dryRun' : ROAttr(),
'phase' : ROAttr(),
}
class ApiReplicationCommand(ApiCommand):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsReplicationResult),
'hiveResult' : ROAttr(ApiHiveReplicationResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiReplicationSchedule(BaseApiObject):
_ATTRIBUTES = {
'startTime' : Attr(datetime.datetime),
'endTime' : Attr(datetime.datetime),
'interval' : None,
'intervalUnit' : None,
'paused' : None,
'hdfsArguments' : Attr(ApiHdfsReplicationArguments),
'hiveArguments' : Attr(ApiHiveReplicationArguments),
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'id' : ROAttr(),
'nextRun' : ROAttr(datetime.datetime),
'history' : ROAttr(ApiReplicationCommand),
}
class ApiHBaseSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'tableRegExps' : None,
'storage' : None,
}
class ApiHdfsSnapshotPolicyArguments(BaseApiObject):
_ATTRIBUTES = {
'pathPatterns' : None,
}
class ApiHBaseSnapshot(BaseApiObject):
_ATTRIBUTES = {
'snapshotName' : None,
'tableName' : None,
'creationTime' : ROAttr(datetime.datetime),
'storage' : None,
}
class ApiHBaseSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'tableName' : ROAttr(),
'snapshotName' : ROAttr(),
'error' : ROAttr(),
'storage' : ROAttr(),
}
class ApiHdfsSnapshot(BaseApiObject):
_ATTRIBUTES = {
'path' : None,
'snapshotName' : None,
'snapshotPath' : None,
'creationTime' : ROAttr(datetime.datetime),
}
class ApiHdfsSnapshotError(BaseApiObject):
_ATTRIBUTES = {
'path' : ROAttr(),
'snapshotName' : ROAttr(),
'snapshotPath' : ROAttr(),
'error' : ROAttr(),
}
class ApiHBaseSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedTableCount' : ROAttr(),
'processedTables' : ROAttr(),
'unprocessedTableCount' : ROAttr(),
'unprocessedTables' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHBaseSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHBaseSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHBaseSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHBaseSnapshotError),
}
class ApiHdfsSnapshotResult(BaseApiObject):
_ATTRIBUTES = {
'processedPathCount' : ROAttr(),
'processedPaths' : ROAttr(),
'unprocessedPathCount' : ROAttr(),
'unprocessedPaths' : ROAttr(),
'createdSnapshotCount' : ROAttr(),
'createdSnapshots' : ROAttr(ApiHdfsSnapshot),
'deletedSnapshotCount' : ROAttr(),
'deletedSnapshots' : ROAttr(ApiHdfsSnapshot),
'creationErrorCount' : ROAttr(),
'creationErrors' : ROAttr(ApiHdfsSnapshotError),
'deletionErrorCount' : ROAttr(),
'deletionErrors' : ROAttr(ApiHdfsSnapshotError),
}
class ApiSnapshotCommand(BaseApiObject):
@classmethod
def _get_attributes(cls):
if not cls.__dict__.has_key('_ATTRIBUTES'):
attrs = {
'hdfsResult' : ROAttr(ApiHdfsSnapshotResult),
'hbaseResult' : ROAttr(ApiHBaseSnapshotResult),
}
attrs.update(ApiCommand._get_attributes())
cls._ATTRIBUTES = attrs
return cls._ATTRIBUTES
class ApiSnapshotPolicy(BaseApiObject):
"""
@type name: str
@ivar name: Name of the snapshot policy.
@type description: str
@ivar description: Description of the snapshot policy.
@type hourly_snapshots: int
@ivar hourly_snapshots: Number of hourly snapshots to be retained (default: 0).
@type daily_snapshots: int
@ivar daily_snapshots: Number of daily snapshots to be retained (default: 0).
@type weekly_snapshots: int
@ivar weekly_snapshots: Number of weekly snapshots to be retained (default: 0).
@type monthly_snapshots: int
@ivar monthly_snapshots: Number of monthly snapshots to be retained (default: 0).
@type yearly_snapshots: int
@ivar yearly_snapshots: Number of yearly snapshots to be retained (default: 0).
@type hours_for_hourly_snapshots: list of int
@ivar hours_for_hourly_snapshots: Hours of the day that hourly snapshots should be created.
Valid values are 0 to 23. If this list is empty, then hourly snapshots are
created for every hour.
@type minute_of_hour: int
@ivar minute_of_hour: Minute in the hour that hourly, daily, weekly, monthly and yearly
snapshots should be created. Valid values are 0 to 59 (default: 0).
@type hour_of_day: int
@ivar hour_of_day: Hour in the day that daily, weekly, monthly and yearly snapshots should be created.
Valid values are 0 to 23 (default: 0).
@type day_of_week: int
@ivar day_of_week: Day of the week that weekly snapshots should be created.
Valid values are 1 to 7, 1 representing Sunday (default: 1).
@type day_of_month: int
@ivar day_of_month: Day of the month that monthly and yearly snapshots should be created.
Values from 1 to 31 are allowed. Additionally 0 to -30 can be used to
specify offsets from the last day of the month (default: 1).
@type month_of_year: int
@ivar month_of_year: Month of the year that yearly snapshots should be created.
Valid values are 1 to 12, 1 representing January (default: 1).
@ivar alert_on_start: whether to generate alerts on start of snapshot creation/deletion activity.
@ivar alert_on_success: whether to generate alerts on successful completion of snapshot creation/deletion activity.
@ivar alert_on_fail: whether to generate alerts on failure of snapshot creation/deletion activity.
@ivar alert_on_abort: whether to generate alerts on abort of snapshot creation/deletion activity.
@type hbaseArguments: ApiHBaseSnapshotPolicyArguments
@ivar hbaseArguments: HBase specific arguments for the replication job.
@type hdfsArguments: ApiHdfsSnapshotPolicyArguments
@ivar hdfsArguments: HDFS specific arguments for the replication job.
"""
_ATTRIBUTES = {
'name' : None,
'description' : None,
'hourlySnapshots' : None,
'dailySnapshots' : None,
'weeklySnapshots' : None,
'monthlySnapshots' : None,
'yearlySnapshots' : None,
'minuteOfHour' : None,
'hourOfDay' : None,
'dayOfWeek' : None,
'dayOfMonth' : None,
'monthOfYear' : None,
'hoursForHourlySnapshots' : None,
'alertOnStart' : None,
'alertOnSuccess' : None,
'alertOnFail' : None,
'alertOnAbort' : None,
'hbaseArguments' : Attr(ApiHBaseSnapshotPolicyArguments),
'hdfsArguments' : Attr(ApiHdfsSnapshotPolicyArguments),
'lastCommand' : ROAttr(ApiSnapshotCommand),
'lastSuccessfulCommand' : ROAttr(ApiSnapshotCommand),
}
#
# Batch.
#
class ApiBatchRequestElement(BaseApiObject):
"""One element in a batch request."""
_ATTRIBUTES = {
'method' : None,
'url' : None,
'body' : None,
'contentType' : None,
'acceptType' : None,
}
class ApiBatchResponseElement(BaseApiObject):
"""One element in a batch response."""
_ATTRIBUTES = {
'statusCode' : ROAttr(),
'response' : ROAttr(),
}
class ApiBatchResponseList(ApiList):
"""A list of batch response objects."""
_ATTRIBUTES = {
'success' : ROAttr(),
}
_MEMBER_CLASS = ApiBatchResponseElement
#
# Configuration helpers.
#
class ApiConfig(BaseApiObject):
_ATTRIBUTES = {
'name' : None,
'value' : None,
'required' : ROAttr(),
'default' : ROAttr(),
'displayName' : ROAttr(),
'description' : ROAttr(),
'relatedName' : ROAttr(),
'validationState' : ROAttr(),
'validationMessage' : ROAttr(),
}
def __init__(self, resource_root, name=None, value=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiConfig>: %s = %s" % (self.name, self.value)
class ApiImpalaQuery(BaseApiObject):
_ATTRIBUTES = {
'queryId' : ROAttr(),
'queryState' : ROAttr(),
'queryType' : ROAttr(),
'statement' : ROAttr(),
'database' : ROAttr(),
'rowsProduced' : ROAttr(),
'coordinator' : ROAttr(ApiHostRef),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'detailsAvailable' : ROAttr(),
'attributes' : ROAttr(),
'durationMillis' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQuery>: %s" % (self.queryId)
class ApiImpalaQueryResponse(BaseApiObject):
_ATTRIBUTES = {
'queries' : ROAttr(ApiImpalaQuery),
'warnings' : ROAttr()
}
class ApiImpalaQueryDetailsResponse(BaseApiObject):
_ATTRIBUTES = {
'details' : ROAttr()
}
def __str__(self):
return "<AipImpalaQueryDetailsResponse> %s" % self.details
class ApiImpalaCancelResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiImpalaCancelResponse> %s" % self.warning
class ApiImpalaQueryAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiImpalaQueryAttribute> %s" % name
class ApiMr2AppInformation(BaseApiObject):
_ATTRIBUTES = {
'jobState' : ROAttr()
}
def __str__(self):
return "<ApiMr2AppInformation>: %s" % (self.jobState)
class ApiYarnApplication(BaseApiObject):
_ATTRIBUTES = {
'applicationId' : ROAttr(),
'name' : ROAttr(),
'user' : ROAttr(),
'startTime' : ROAttr(datetime.datetime),
'endTime' : ROAttr(datetime.datetime),
'pool' : ROAttr(),
'state' : ROAttr(),
'progress' : ROAttr(),
'mr2AppInformation' : ROAttr(ApiMr2AppInformation),
'attributes' : ROAttr(),
}
def __str__(self):
return "<ApiYarnApplication>: %s" % (self.applicationId)
class ApiYarnApplicationResponse(BaseApiObject):
_ATTRIBUTES = {
'applications' : ROAttr(ApiYarnApplication),
'warnings' : ROAttr()
}
class ApiYarnKillResponse(BaseApiObject):
_ATTRIBUTES = {
'warning' : ROAttr()
}
def __str__(self):
return "<ApiYarnKillResponse> %s" % self.warning
class ApiYarnApplicationAttribute(BaseApiObject):
_ATTRIBUTES = {
'name' : ROAttr(),
'type' : ROAttr(),
'displayName' : ROAttr(),
'supportsHistograms' : ROAttr(),
'description' : ROAttr()
}
def __str__(self):
return "<ApiYarnApplicationAttribute> %s" % name
def config_to_api_list(dic):
"""
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList).
"""
config = [ ]
for k, v in dic.iteritems():
config.append({ 'name' : k, 'value': v })
return { ApiList.LIST_KEY : config }
def config_to_json(dic):
"""
Converts a python dictionary into a JSON payload.
The payload matches the expected "apiConfig list" type used to update
configuration parameters using the API.
@param dic: Key-value pairs to convert.
@return: String with the JSON-encoded data.
"""
return json.dumps(config_to_api_list(dic))
def json_to_config(dic, full = False):
"""
Converts a JSON-decoded config dictionary to a python dictionary.
When materializing the full view, the values in the dictionary will be
instances of ApiConfig, instead of strings.
@param dic: JSON-decoded config dictionary.
@param full: Whether to materialize the full view of the config data.
@return: Python dictionary with config data.
"""
config = { }
for entry in dic['items']:
k = entry['name']
if full:
config[k] = ApiConfig.from_json_dict(entry, None)
else:
config[k] = entry.get('value')
return config
|
edx-solutions/edx-platform | refs/heads/master | openedx/core/djangoapps/course_date_signals/tests.py | 3 | from datetime import timedelta
import ddt
from unittest.mock import patch
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from . import utils
@ddt.ddt
class SelfPacedDueDatesTests(ModuleStoreTestCase):
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
for i in range(4):
ItemFactory(parent=self.course, category="sequential", display_name="Section {}".format(i))
def test_basic_spacing(self):
expected_sections = [
(0, 'Section 0', timedelta(days=7)),
(1, 'Section 1', timedelta(days=14)),
(2, 'Section 2', timedelta(days=21)),
(3, 'Section 3', timedelta(days=28)),
]
with patch.object(utils, 'get_expected_duration', return_value=timedelta(weeks=4)):
actual = [(idx, section.display_name, offset) for (idx, section, offset) in utils.spaced_out_sections(self.course)]
self.assertEqual(actual, expected_sections)
def test_hidden_sections(self):
for _ in range(2):
ItemFactory(parent=self.course, category="sequential", visible_to_staff_only=True)
expected_sections = [
(0, 'Section 0', timedelta(days=7)),
(1, 'Section 1', timedelta(days=14)),
(2, 'Section 2', timedelta(days=21)),
(3, 'Section 3', timedelta(days=28)),
]
with patch.object(utils, 'get_expected_duration', return_value=timedelta(weeks=4)):
actual = [(idx, section.display_name, offset) for (idx, section, offset) in utils.spaced_out_sections(self.course)]
self.assertEqual(actual, expected_sections)
|
HalcyonChimera/osf.io | refs/heads/develop | scripts/osfstorage/usage_audit.py | 2 | """File: usage_audit.py
Find all users and projects where their total usage (current file + deleted files) is >= the set limit
Projects or users can have their GUID whitelisted via `usage_audit whitelist [GUID ...]`
User usage is defined as the total usage of all projects they have > READ access on
Project usage is defined as the total usage of it and all its children
total usage is defined as the sum of the size of all verions associated with X via OsfStorageFileNode and OsfStorageTrashedFileNode
"""
import os
import gc
import json
import logging
import functools
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
import progressbar
from framework.celery_tasks import app as celery_app
from osf.models import TrashedFile, Node
from website import mails
from website.app import init_app
from scripts import utils as scripts_utils
# App must be init'd before django models are imported
init_app(set_backends=True, routes=False)
from osf.models import BaseFileNode, FileVersion, OSFUser, AbstractNode
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
GBs = 1024 ** 3.0
USER_LIMIT = 5 * GBs
PROJECT_LIMIT = 5 * GBs
WHITE_LIST_PATH = os.path.join(os.path.dirname(__file__), 'usage_whitelist.json')
try:
with open(WHITE_LIST_PATH, 'r') as fobj:
WHITE_LIST = set(json.load(fobj)) # Cast to set for constant time look ups
logger.info('Loaded whitelist.json from {}'.format(WHITE_LIST_PATH))
except IOError:
WHITE_LIST = set()
logger.warning('No whitelist found')
def add_to_white_list(gtg):
gtg = set(gtg).difference(WHITE_LIST)
logger.info('Adding {} to whitelist'.format(gtg))
with open(WHITE_LIST_PATH, 'w') as fobj:
json.dump(list(WHITE_LIST.union(gtg)), fobj) # Sets are not JSON serializable
logger.info('Whitelist updated to {}'.format(WHITE_LIST))
def get_usage(node):
node_content_type = ContentType.objects.get_for_model(Node)
vids = [each for each in BaseFileNode.active.filter(provider='osfstorage', target_object_id=node.id, target_content_type=node_content_type).values_list('versions', flat=True) if each]
t_vids = [each for eac in TrashedFile.objects.filter(provider='osfstorage', target_object_id=node.id, target_content_type=node_content_type).values_list('versions', flat=True) if each]
usage = sum([v.size or 0 for v in FileVersion.objects.filter(id__in=vids)])
trashed_usage = sum([v.size or 0 for v in FileVersion.objects.filter(id__in=t_vids)])
return list(map(sum, zip(*([(usage, trashed_usage)] + [get_usage(child) for child in node.nodes_primary])))) # Adds tuples together, map(sum, zip((a, b), (c, d))) -> (a+c, b+d)
def limit_filter(limit, (item, usage)):
"""Note: usage is a tuple(current_usage, deleted_usage)"""
return item not in WHITE_LIST and sum(usage) >= limit
def main(send_email=False):
logger.info('Starting Project storage audit')
lines = []
projects = {}
users = defaultdict(lambda: (0, 0))
top_level_nodes = AbstractNode.objects.get_roots()
progress_bar = progressbar.ProgressBar(maxval=top_level_nodes.count()).start()
top_level_nodes = top_level_nodes.iterator()
for i, node in enumerate(top_level_nodes):
progress_bar.update(i+1)
if node._id in WHITE_LIST:
continue # Dont count whitelisted nodes against users
projects[node._id] = get_usage(node)
for contrib in node.contributors:
if node.can_edit(user=contrib):
users[contrib._id] = tuple(map(sum, zip(users[contrib._id], projects[node._id]))) # Adds tuples together, map(sum, zip((a, b), (c, d))) -> (a+c, b+d)
if i % 25 == 0:
gc.collect()
progress_bar.finish()
for model, collection, limit in ((OSFUser, users, USER_LIMIT), (AbstractNode, projects, PROJECT_LIMIT)):
for item, (used, deleted) in filter(functools.partial(limit_filter, limit), collection.items()):
line = '{!r} has exceeded the limit {:.2f}GBs ({}b) with {:.2f}GBs ({}b) used and {:.2f}GBs ({}b) deleted.'.format(model.load(item), limit / GBs, limit, used / GBs, used, deleted / GBs, deleted)
logger.info(line)
lines.append(line)
if lines:
if send_email:
logger.info('Sending email...')
mails.send_mail('support+scripts@osf.io', mails.EMPTY, body='\n'.join(lines), subject='Script: OsfStorage usage audit', can_change_preferences=False,)
else:
logger.info('send_email is False, not sending email'.format(len(lines)))
logger.info('{} offending project(s) and user(s) found'.format(len(lines)))
else:
logger.info('No offending projects or users found')
@celery_app.task(name='scripts.osfstorage.usage_audit')
def run_main(send_mail=False, white_list=None):
scripts_utils.add_file_logger(logger, __file__)
if white_list:
add_to_white_list(white_list)
else:
main(send_mail)
|
shubhdev/openedx | refs/heads/master | pavelib/servers.py | 30 | """
Run and manage servers for local development.
"""
from __future__ import print_function
import sys
import argparse
from paver.easy import *
from .utils.cmd import django_cmd
from .utils.process import run_process, run_multi_processes
DEFAULT_PORT = {"lms": 8000, "studio": 8001}
DEFAULT_SETTINGS = 'devstack'
def run_server(system, settings=None, port=None, skip_assets=False, contracts=False):
"""
Start the server for the specified `system` (lms or studio).
`settings` is the Django settings module to use; if not provided, use the default.
`port` is the port to run the server on; if not provided, use the default port for the system.
If `skip_assets` is True, skip the asset compilation step.
"""
if system not in ['lms', 'studio']:
print("System must be either lms or studio", file=sys.stderr)
exit(1)
if not settings:
settings = DEFAULT_SETTINGS
if not skip_assets:
# Local dev settings use staticfiles to serve assets, so we can skip the collecstatic step
args = [system, '--settings={}'.format(settings), '--skip-collect', '--watch']
call_task('pavelib.assets.update_assets', args=args)
if port is None:
port = DEFAULT_PORT[system]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', '0.0.0.0:{}'.format(port)]
if contracts:
args.append("--contracts")
run_process(django_cmd(system, *args))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets")
])
def lms(options):
"""
Run the LMS server.
"""
settings = getattr(options, 'settings', None)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server('lms', settings=settings, port=port, skip_assets=fast)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets")
])
def studio(options):
"""
Run the Studio server.
"""
settings = getattr(options, 'settings', None)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server('studio', settings=settings, port=port, skip_assets=fast)
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
@no_help
def devstack(args):
"""
Start the devstack lms or studio server
"""
parser = argparse.ArgumentParser(prog='paver devstack')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('--fast', action='store_true', default=False, help="Skip updating assets")
parser.add_argument(
'--no-contracts',
action='store_true',
default=False,
help="Disable contracts. By default, they're enabled in devstack."
)
args = parser.parse_args(args)
run_server(args.system[0], settings='devstack', skip_assets=args.fast, contracts=(not args.no_contracts))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def celery(options):
"""
Runs Celery workers.
"""
settings = getattr(options, 'settings', 'dev_with_worker')
run_process(django_cmd('lms', settings, 'celery', 'worker', '--loglevel=INFO', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings for both LMS and Studio"),
("worker_settings=", "w", "Celery worker Django settings"),
("fast", "f", "Skip updating assets"),
("settings_lms=", "l", "Set LMS only, overriding the value from --settings (if provided)"),
("settings_cms=", "c", "Set Studio only, overriding the value from --settings (if provided)"),
])
def run_all_servers(options):
"""
Runs Celery workers, Studio, and LMS.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
settings_lms = getattr(options, 'settings_lms', settings)
settings_cms = getattr(options, 'settings_cms', settings)
worker_settings = getattr(options, 'worker_settings', 'dev_with_worker')
fast = getattr(options, 'fast', False)
if not fast:
args = ['lms', '--settings={}'.format(settings_lms), '--skip-collect']
call_task('pavelib.assets.update_assets', args=args)
args = ['studio', '--settings={}'.format(settings_cms), '--skip-collect']
call_task('pavelib.assets.update_assets', args=args)
call_task('pavelib.assets.watch_assets', options={'background': True})
run_multi_processes([
django_cmd('lms', settings_lms, 'runserver', '--traceback', '--pythonpath=.', "0.0.0.0:{}".format(DEFAULT_PORT['lms'])),
django_cmd('studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', "0.0.0.0:{}".format(DEFAULT_PORT['studio'])),
django_cmd('lms', worker_settings, 'celery', 'worker', '--loglevel=INFO', '--pythonpath=.')
])
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def update_db():
"""
Runs syncdb and then migrate.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
for system in ('lms', 'cms'):
sh(django_cmd(system, settings, 'syncdb', '--migrate', '--traceback', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def check_settings(args):
"""
Checks settings files.
"""
parser = argparse.ArgumentParser(prog='paver check_settings')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('settings', type=str, nargs=1, help='Django settings')
args = parser.parse_args(args)
system = args.system[0]
settings = args.settings[0]
try:
import_cmd = "echo 'import {system}.envs.{settings}'".format(system=system, settings=settings)
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh("{import_cmd} | {shell_cmd}".format(import_cmd=import_cmd, shell_cmd=django_shell_cmd))
except:
print("Failed to import settings", file=sys.stderr)
|
Cinntax/home-assistant | refs/heads/dev | homeassistant/components/opple/light.py | 4 | """Support for the Opple light."""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
Light,
)
from homeassistant.const import CONF_HOST, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "opple light"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Opple light platform."""
name = config[CONF_NAME]
host = config[CONF_HOST]
entity = OppleLight(name, host)
add_entities([entity])
_LOGGER.debug("Init light %s %s", host, entity.unique_id)
class OppleLight(Light):
"""Opple light device."""
def __init__(self, name, host):
"""Initialize an Opple light."""
from pyoppleio.OppleLightDevice import OppleLightDevice
self._device = OppleLightDevice(host)
self._name = name
self._is_on = None
self._brightness = None
self._color_temp = None
@property
def available(self):
"""Return True if light is available."""
return self._device.is_online
@property
def unique_id(self):
"""Return unique ID for light."""
return self._device.mac
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature of this light."""
return kelvin_to_mired(self._color_temp)
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
return 175
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
return 333
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
_LOGGER.debug("Turn on light %s %s", self._device.ip, kwargs)
if not self.is_on:
self._device.power_on = True
if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]:
self._device.brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs and self.color_temp != kwargs[ATTR_COLOR_TEMP]:
color_temp = mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
self._device.color_temperature = color_temp
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._device.power_on = False
_LOGGER.debug("Turn off light %s", self._device.ip)
def update(self):
"""Synchronize state with light."""
prev_available = self.available
self._device.update()
if (
prev_available == self.available
and self._is_on == self._device.power_on
and self._brightness == self._device.brightness
and self._color_temp == self._device.color_temperature
):
return
if not self.available:
_LOGGER.debug("Light %s is offline", self._device.ip)
return
self._is_on = self._device.power_on
self._brightness = self._device.brightness
self._color_temp = self._device.color_temperature
if not self.is_on:
_LOGGER.debug("Update light %s success: power off", self._device.ip)
else:
_LOGGER.debug(
"Update light %s success: power on brightness %s "
"color temperature %s",
self._device.ip,
self._brightness,
self._color_temp,
)
|
iut-ibk/DynaMind-UrbanSim | refs/heads/master | 3rdparty/opus/src/urbansim_parcel/building/age_masked.py | 2 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
from opus_core.simulation_state import SimulationState
from numpy import maximum, ma, logical_not
class age_masked(Variable):
"""The age of the building, computed by subtracting the year built
from the current simulation year. Entries that have invalid year_built are masked."""
year_built = "year_built"
_return_type="float32" #TODO: this is a work around for numpy ndimage would not working with int
def dependencies(self):
return [my_attribute_label(self.year_built), my_attribute_label("has_valid_year_built")]
def compute(self, dataset_pool):
current_year = SimulationState().get_current_time()
if current_year == None:
raise StandardError, "'SimulationState().get_current_time()' returns None."
is_year_built = self.get_dataset().get_attribute("has_valid_year_built")
building_age = maximum(0, current_year - self.get_dataset().get_attribute(self.year_built))
return ma.masked_where( logical_not(is_year_built), building_age)
def post_check(self, values, dataset_pool):
self.do_check("x >= 0", values)
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim_parcel','urbansim'],
test_data={
'building':{
'building_id': array([1,2,3,4]),
'year_built': array([1995, 2000, 2006, 200])
},
'urbansim_constant':{
'absolute_min_year': array([1800])
}
}
)
SimulationState().set_current_time(2005)
should_be = array([10, 5, 0, -999])
tester.test_is_equal_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main() |
makinacorpus/odoo | refs/heads/8.0 | addons/l10n_uy/__openerp__.py | 260 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Openerp.uy <openerp.uy@lists.launchpad.net>
# Proyecto de Localización de OperERP para Uruguay
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Uruguay - Chart of Accounts',
'version': '0.1',
'author': 'Uruguay l10n Team & Guillem Barba',
'category': 'Localization/Account Charts',
'website': 'https://launchpad.net/openerp-uruguay',
'description': """
General Chart of Accounts.
==========================
Provide Templates for Chart of Accounts, Taxes for Uruguay.
""",
'license': 'AGPL-3',
'depends': ['account'],
'data': [
'account_types.xml',
'taxes_code_template.xml',
'account_chart_template.xml',
'taxes_template.xml',
'l10n_uy_wizard.xml',
],
'demo': [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
fredericmohr/mitro | refs/heads/master | mitro-mail/build/venv/lib/python2.7/site-packages/tornado/ioloop.py | 7 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, with_statement
import datetime
import errno
import heapq
import os
import logging
import select
import thread
import threading
import time
import traceback
from tornado import stack_context
try:
import signal
except ImportError:
signal = None
from tornado.platform.auto import set_close_exec, Waker
class IOLoop(object):
"""A level-triggered I/O loop.
We use epoll (Linux) or kqueue (BSD and Mac OS X; requires python
2.6+) if they are available, or else we fall back on select(). If
you are implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports either
epoll or queue.
Example usage for a simple TCP server::
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
def __init__(self, impl=None):
self._impl = impl or _poll()
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._running = False
self._stopped = False
self._thread_ident = None
self._blocking_signal_threshold = None
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
@staticmethod
def instance():
"""Returns a global IOLoop instance.
Most single-threaded applications have a single, global IOLoop.
Use this method instead of passing around IOLoop instances
throughout your code.
A common pattern for classes that depend on IOLoops is to use
a default argument to enable programs with multiple IOLoops
but not require the argument for simpler applications::
class MyClass(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.instance()
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this IOloop object as the singleton instance.
This is normally not necessary as `instance()` will create
an IOLoop on demand, but you may want to call `install` to use
a custom subclass of IOLoop.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
def close(self, all_fds=False):
"""Closes the IOLoop, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the IOLoop itself).
Many applications will only use a single IOLoop that runs for the
entire lifetime of the process. In that case closing the IOLoop
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
IOLoops.
An IOLoop must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
"""
self.remove_handler(self._waker.fileno())
if all_fds:
for fd in self._handlers.keys()[:]:
try:
os.close(fd)
except Exception:
logging.debug("error closing fd %s", fd, exc_info=True)
self._waker.close()
self._impl.close()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for fd."""
self._handlers[fd] = stack_context.wrap(handler)
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
"""Changes the events we listen for fd."""
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
"""Stop listening for events on fd."""
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except (OSError, IOError):
logging.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the ioloop is blocked for more than s seconds.
Pass seconds=None to disable. Requires python 2.6 on a unixy
platform.
The action parameter is a python signal handler. Read the
documentation for the python 'signal' module for more information.
If action is None, the process will be killed if it is blocked for
too long.
"""
if not hasattr(signal, "setitimer"):
logging.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the ioloop is blocked for more than s seconds.
Equivalent to set_blocking_signal_threshold(seconds, self.log_stack)
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with set_blocking_signal_threshold.
"""
logging.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the I/O handlers calls stop(), which
will make the loop stop after the current event iteration completes.
"""
if self._stopped:
self._stopped = False
return
self._thread_ident = thread.get_ident()
self._running = True
while True:
poll_timeout = 3600.0
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
for callback in callbacks:
self._run_callback(callback)
if self._timeouts:
now = time.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# the timeout was cancelled
heapq.heappop(self._timeouts)
elif self._timeouts[0].deadline <= now:
timeout = heapq.heappop(self._timeouts)
self._run_callback(timeout.callback)
else:
seconds = self._timeouts[0].deadline - now
poll_timeout = min(seconds, poll_timeout)
break
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception, e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if (getattr(e, 'errno', None) == errno.EINTR or
(isinstance(getattr(e, 'args', None), tuple) and
len(e.args) == 2 and e.args[0] == errno.EINTR)):
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
self._handlers[fd](fd, events)
except (OSError, IOError), e:
if e.args[0] == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
logging.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
except Exception:
logging.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
def stop(self):
"""Stop the loop after the current event loop iteration is complete.
If the event loop is not currently running, the next call to start()
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
ioloop.start() will return after async_method has run its callback,
whether that callback was invoked before or after ioloop.start.
Note that even after `stop` has been called, the IOLoop is not
completely stopped until `IOLoop.start` has also returned.
"""
self._running = False
self._stopped = True
self._waker.wake()
def running(self):
"""Returns true if this IOLoop is currently running."""
return self._running
def add_timeout(self, deadline, callback):
"""Calls the given callback at the time deadline from the I/O loop.
Returns a handle that may be passed to remove_timeout to cancel.
``deadline`` may be a number denoting a unix timestamp (as returned
by ``time.time()`` or a ``datetime.timedelta`` object for a deadline
relative to the current time.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
IOLoop's thread, and then call `add_timeout` from there.
"""
timeout = _Timeout(deadline, stack_context.wrap(callback))
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by add_timeout.
"""
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
def add_callback(self, callback):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time.
Note that this is the *only* method in IOLoop that makes this
guarantee; all other interaction with the IOLoop must be done
from that IOLoop's thread. add_callback() may be used to transfer
control from other threads to the IOLoop's thread.
"""
with self._callback_lock:
list_empty = not self._callbacks
self._callbacks.append(stack_context.wrap(callback))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def _run_callback(self, callback):
try:
callback()
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the IOLoop
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in sys.exc_info.
"""
logging.error("Exception in callback %r", callback, exc_info=True)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback']
def __init__(self, deadline, callback):
if isinstance(deadline, (int, long, float)):
self.deadline = deadline
elif isinstance(deadline, datetime.timedelta):
self.deadline = time.time() + _Timeout.timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r" % deadline)
self.callback = callback
@staticmethod
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, id(self)) <
(other.deadline, id(other)))
def __le__(self, other):
return ((self.deadline, id(self)) <=
(other.deadline, id(other)))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every callback_time milliseconds.
`start` must be called after the PeriodicCallback is created.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.instance()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = time.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _run(self):
if not self._running:
return
try:
self.callback()
except Exception:
logging.error("Error in periodic callback", exc_info=True)
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = time.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
class _EPoll(object):
"""An epoll-based event loop using our C module for Python 2.5 systems"""
_EPOLL_CTL_ADD = 1
_EPOLL_CTL_DEL = 2
_EPOLL_CTL_MOD = 3
def __init__(self):
self._epoll_fd = epoll.epoll_create()
def fileno(self):
return self._epoll_fd
def close(self):
os.close(self._epoll_fd)
def register(self, fd, events):
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_ADD, fd, events)
def modify(self, fd, events):
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_MOD, fd, events)
def unregister(self, fd):
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_DEL, fd, 0)
def poll(self, timeout):
return epoll.epoll_wait(self._epoll_fd, int(timeout * 1000))
class _KQueue(object):
"""A kqueue-based event loop for BSD/Mac systems."""
def __init__(self):
self._kqueue = select.kqueue()
self._active = {}
def fileno(self):
return self._kqueue.fileno()
def close(self):
self._kqueue.close()
def register(self, fd, events):
if fd in self._active:
raise IOError("fd %d already registered" % fd)
self._control(fd, events, select.KQ_EV_ADD)
self._active[fd] = events
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
events = self._active.pop(fd)
self._control(fd, events, select.KQ_EV_DELETE)
def _control(self, fd, events, flags):
kevents = []
if events & IOLoop.WRITE:
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_WRITE, flags=flags))
if events & IOLoop.READ or not kevents:
# Always read when there is not a write
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_READ, flags=flags))
# Even though control() takes a list, it seems to return EINVAL
# on Mac OS X (10.6) when there is more than one event in the list.
for kevent in kevents:
self._kqueue.control([kevent], 0)
def poll(self, timeout):
kevents = self._kqueue.control(None, 1000, timeout)
events = {}
for kevent in kevents:
fd = kevent.ident
if kevent.filter == select.KQ_FILTER_READ:
events[fd] = events.get(fd, 0) | IOLoop.READ
if kevent.filter == select.KQ_FILTER_WRITE:
if kevent.flags & select.KQ_EV_EOF:
# If an asynchronous connection is refused, kqueue
# returns a write event with the EOF flag set.
# Turn this into an error for consistency with the
# other IOLoop implementations.
# Note that for read events, EOF may be returned before
# all data has been consumed from the socket buffer,
# so we only check for EOF on write events.
events[fd] = IOLoop.ERROR
else:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
if kevent.flags & select.KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
class _Select(object):
"""A simple, select()-based IOLoop implementation for non-Linux systems"""
def __init__(self):
self.read_fds = set()
self.write_fds = set()
self.error_fds = set()
self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
def close(self):
pass
def register(self, fd, events):
if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
raise IOError("fd %d already registered" % fd)
if events & IOLoop.READ:
self.read_fds.add(fd)
if events & IOLoop.WRITE:
self.write_fds.add(fd)
if events & IOLoop.ERROR:
self.error_fds.add(fd)
# Closed connections are reported as errors by epoll and kqueue,
# but as zero-byte reads by select, so when errors are requested
# we need to listen for both read and error.
self.read_fds.add(fd)
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
self.read_fds.discard(fd)
self.write_fds.discard(fd)
self.error_fds.discard(fd)
def poll(self, timeout):
readable, writeable, errors = select.select(
self.read_fds, self.write_fds, self.error_fds, timeout)
events = {}
for fd in readable:
events[fd] = events.get(fd, 0) | IOLoop.READ
for fd in writeable:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
for fd in errors:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
# Choose a poll implementation. Use epoll if it is available, fall back to
# select() for non-Linux platforms
if hasattr(select, "epoll"):
# Python 2.6+ on Linux
_poll = select.epoll
elif hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
_poll = _KQueue
else:
try:
# Linux systems with our C module installed
from tornado import epoll
_poll = _EPoll
except Exception:
# All other systems
import sys
if "linux" in sys.platform:
logging.warning("epoll module not found; using select()")
_poll = _Select
|
savanu/servo | refs/heads/master | components/script/dom/bindings/codegen/parser/tests/test_newobject.py | 68 | # Import the WebIDL module, so we can do isinstance checks and whatnot
import WebIDL
def WebIDLTest(parser, harness):
# Basic functionality
parser.parse(
"""
interface Iface {
[NewObject] readonly attribute Iface attr;
[NewObject] Iface method();
};
""")
results = parser.finish()
harness.ok(results, "Should not have thrown on basic [NewObject] usage")
parser = parser.reset()
threw = False
try:
parser.parse(
"""
interface Iface {
[Pure, NewObject] readonly attribute Iface attr;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[NewObject] attributes must depend on something")
parser = parser.reset()
threw = False
try:
parser.parse(
"""
interface Iface {
[Pure, NewObject] Iface method();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[NewObject] methods must depend on something")
parser = parser.reset()
threw = False
try:
parser.parse(
"""
interface Iface {
[Cached, NewObject, Affects=Nothing] readonly attribute Iface attr;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[NewObject] attributes must not be [Cached]")
parser = parser.reset()
threw = False
try:
parser.parse(
"""
interface Iface {
[StoreInSlot, NewObject, Affects=Nothing] readonly attribute Iface attr;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[NewObject] attributes must not be [StoreInSlot]")
|
vied12/superdesk | refs/heads/master | server/worker.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from logging.handlers import SysLogHandler
from app import get_app
from settings import LOG_SERVER_ADDRESS, LOG_SERVER_PORT
logging.basicConfig(handlers=[SysLogHandler(address=(LOG_SERVER_ADDRESS, LOG_SERVER_PORT))])
celery = get_app().celery
|
was4444/chromium.src | refs/heads/nw15 | third_party/cython/src/Cython/Compiler/FlowControl.py | 90 | import cython
cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object,
Builtin=object, InternalError=object,
error=object, warning=object,
py_object_type=object, unspecified_type=object,
object_expr=object, object_expr_not_none=object,
fake_rhs_expr=object, TypedExprNode=object)
import Builtin
import ExprNodes
import Nodes
import Options
from PyrexTypes import py_object_type, unspecified_type
import PyrexTypes
from Visitor import TreeVisitor, CythonTransform
from Errors import error, warning, InternalError
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type, may_be_none=None, pos=None):
super(TypedExprNode, self).__init__(pos)
self.type = type
self._may_be_none = may_be_none
def may_be_none(self):
return self._may_be_none != False
object_expr = TypedExprNode(py_object_type, may_be_none=True)
object_expr_not_none = TypedExprNode(py_object_type, may_be_none=False)
# Fake rhs to silence "unused variable" warning
fake_rhs_expr = TypedExprNode(unspecified_type)
class ControlBlock(object):
"""Control flow graph node. Sequence of assignments and name references.
children set of children nodes
parents set of parent nodes
positions set of position markers
stats list of block statements
gen dict of assignments generated by this block
bounded set of entries that are definitely bounded in this block
Example:
a = 1
b = a + c # 'c' is already bounded or exception here
stats = [Assignment(a), NameReference(a), NameReference(c),
Assignment(b)]
gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)}
bounded = set([Entry(a), Entry(c)])
"""
def __init__(self):
self.children = set()
self.parents = set()
self.positions = set()
self.stats = []
self.gen = {}
self.bounded = set()
self.i_input = 0
self.i_output = 0
self.i_gen = 0
self.i_kill = 0
self.i_state = 0
def empty(self):
return (not self.stats and not self.positions)
def detach(self):
"""Detach block from parents and children."""
for child in self.children:
child.parents.remove(self)
for parent in self.parents:
parent.children.remove(self)
self.parents.clear()
self.children.clear()
def add_child(self, block):
self.children.add(block)
block.parents.add(self)
class ExitBlock(ControlBlock):
"""Non-empty exit point block."""
def empty(self):
return False
class AssignmentList(object):
def __init__(self):
self.stats = []
class ControlFlow(object):
"""Control-flow graph.
entry_point ControlBlock entry point for this graph
exit_point ControlBlock normal exit point
block ControlBlock current block
blocks set children nodes
entries set tracked entries
loops list stack for loop descriptors
exceptions list stack for exception descriptors
"""
def __init__(self):
self.blocks = set()
self.entries = set()
self.loops = []
self.exceptions = []
self.entry_point = ControlBlock()
self.exit_point = ExitBlock()
self.blocks.add(self.exit_point)
self.block = self.entry_point
def newblock(self, parent=None):
"""Create floating block linked to `parent` if given.
NOTE: Block is NOT added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
return block
def nextblock(self, parent=None):
"""Create block children block linked to current or `parent` if given.
NOTE: Block is added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
elif self.block:
self.block.add_child(block)
self.block = block
return self.block
def is_tracked(self, entry):
if entry.is_anonymous:
return False
return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or
entry.from_closure or entry.in_closure or
entry.error_on_uninitialized)
def is_statically_assigned(self, entry):
if (entry.is_local and entry.is_variable and
(entry.type.is_struct_or_union or
entry.type.is_complex or
entry.type.is_array or
entry.type.is_cpp_class)):
# stack allocated structured variable => never uninitialised
return True
return False
def mark_position(self, node):
"""Mark position, will be used to draw graph nodes."""
if self.block:
self.block.positions.add(node.pos[:2])
def mark_assignment(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = NameAssignment(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_argument(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = Argument(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_deletion(self, node, entry):
if self.block and self.is_tracked(entry):
assignment = NameDeletion(node, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = Uninitialized
self.entries.add(entry)
def mark_reference(self, node, entry):
if self.block and self.is_tracked(entry):
self.block.stats.append(NameReference(node, entry))
## XXX: We don't track expression evaluation order so we can't use
## XXX: successful reference as initialization sign.
## # Local variable is definitely bound after this reference
## if not node.allow_null:
## self.block.bounded.add(entry)
self.entries.add(entry)
def normalize(self):
"""Delete unreachable and orphan blocks."""
queue = set([self.entry_point])
visited = set()
while queue:
root = queue.pop()
visited.add(root)
for child in root.children:
if child not in visited:
queue.add(child)
unreachable = self.blocks - visited
for block in unreachable:
block.detach()
visited.remove(self.entry_point)
for block in visited:
if block.empty():
for parent in block.parents: # Re-parent
for child in block.children:
parent.add_child(child)
block.detach()
unreachable.add(block)
self.blocks -= unreachable
def initialize(self):
"""Set initial state, map assignments to bits."""
self.assmts = {}
bit = 1
for entry in self.entries:
assmts = AssignmentList()
assmts.mask = assmts.bit = bit
self.assmts[entry] = assmts
bit <<= 1
for block in self.blocks:
for stat in block.stats:
if isinstance(stat, NameAssignment):
stat.bit = bit
assmts = self.assmts[stat.entry]
assmts.stats.append(stat)
assmts.mask |= bit
bit <<= 1
for block in self.blocks:
for entry, stat in block.gen.items():
assmts = self.assmts[entry]
if stat is Uninitialized:
block.i_gen |= assmts.bit
else:
block.i_gen |= stat.bit
block.i_kill |= assmts.mask
block.i_output = block.i_gen
for entry in block.bounded:
block.i_kill |= self.assmts[entry].bit
for assmts in self.assmts.itervalues():
self.entry_point.i_gen |= assmts.bit
self.entry_point.i_output = self.entry_point.i_gen
def map_one(self, istate, entry):
ret = set()
assmts = self.assmts[entry]
if istate & assmts.bit:
if self.is_statically_assigned(entry):
ret.add(StaticAssignment(entry))
elif entry.from_closure:
ret.add(Unknown)
else:
ret.add(Uninitialized)
for assmt in assmts.stats:
if istate & assmt.bit:
ret.add(assmt)
return ret
def reaching_definitions(self):
"""Per-block reaching definitions analysis."""
dirty = True
while dirty:
dirty = False
for block in self.blocks:
i_input = 0
for parent in block.parents:
i_input |= parent.i_output
i_output = (i_input & ~block.i_kill) | block.i_gen
if i_output != block.i_output:
dirty = True
block.i_input = i_input
block.i_output = i_output
class LoopDescr(object):
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
class ExceptionDescr(object):
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
class NameAssignment(object):
def __init__(self, lhs, rhs, entry):
if lhs.cf_state is None:
lhs.cf_state = set()
self.lhs = lhs
self.rhs = rhs
self.entry = entry
self.pos = lhs.pos
self.refs = set()
self.is_arg = False
self.is_deletion = False
self.inferred_type = None
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
def infer_type(self):
self.inferred_type = self.rhs.infer_type(self.entry.scope)
return self.inferred_type
def type_dependencies(self):
return self.rhs.type_dependencies(self.entry.scope)
@property
def type(self):
if not self.entry.type.is_unspecified:
return self.entry.type
return self.inferred_type
class StaticAssignment(NameAssignment):
"""Initialised at declaration time, e.g. stack allocation."""
def __init__(self, entry):
if not entry.type.is_pyobject:
may_be_none = False
else:
may_be_none = None # unknown
lhs = TypedExprNode(
entry.type, may_be_none=may_be_none, pos=entry.pos)
super(StaticAssignment, self).__init__(lhs, lhs, entry)
def infer_type(self):
return self.entry.type
def type_dependencies(self):
return ()
class Argument(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
class NameDeletion(NameAssignment):
def __init__(self, lhs, entry):
NameAssignment.__init__(self, lhs, lhs, entry)
self.is_deletion = True
def infer_type(self):
inferred_type = self.rhs.infer_type(self.entry.scope)
if (not inferred_type.is_pyobject and
inferred_type.can_coerce_to_pyobject(self.entry.scope)):
return py_object_type
self.inferred_type = inferred_type
return inferred_type
class Uninitialized(object):
"""Definitely not initialised yet."""
class Unknown(object):
"""Coming from outer closure, might be initialised or not."""
class NameReference(object):
def __init__(self, node, entry):
if node.cf_state is None:
node.cf_state = set()
self.node = node
self.entry = entry
self.pos = node.pos
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
class ControlFlowState(list):
# Keeps track of Node's entry assignments
#
# cf_is_null [boolean] It is uninitialized
# cf_maybe_null [boolean] May be uninitialized
# is_single [boolean] Has only one assignment at this point
cf_maybe_null = False
cf_is_null = False
is_single = False
def __init__(self, state):
if Uninitialized in state:
state.discard(Uninitialized)
self.cf_maybe_null = True
if not state:
self.cf_is_null = True
elif Unknown in state:
state.discard(Unknown)
self.cf_maybe_null = True
else:
if len(state) == 1:
self.is_single = True
# XXX: Remove fake_rhs_expr
super(ControlFlowState, self).__init__(
[i for i in state if i.rhs is not fake_rhs_expr])
def one(self):
return self[0]
class GVContext(object):
"""Graphviz subgraph object."""
def __init__(self):
self.blockids = {}
self.nextid = 0
self.children = []
self.sources = {}
def add(self, child):
self.children.append(child)
def nodeid(self, block):
if block not in self.blockids:
self.blockids[block] = 'block%d' % self.nextid
self.nextid += 1
return self.blockids[block]
def extract_sources(self, block):
if not block.positions:
return ''
start = min(block.positions)
stop = max(block.positions)
srcdescr = start[0]
if not srcdescr in self.sources:
self.sources[srcdescr] = list(srcdescr.get_lines())
lines = self.sources[srcdescr]
return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]])
def render(self, fp, name, annotate_defs=False):
"""Render graphviz dot graph"""
fp.write('digraph %s {\n' % name)
fp.write(' node [shape=box];\n')
for child in self.children:
child.render(fp, self, annotate_defs)
fp.write('}\n')
def escape(self, text):
return text.replace('"', '\\"').replace('\n', '\\n')
class GV(object):
"""Graphviz DOT renderer."""
def __init__(self, name, flow):
self.name = name
self.flow = flow
def render(self, fp, ctx, annotate_defs=False):
fp.write(' subgraph %s {\n' % self.name)
for block in self.flow.blocks:
label = ctx.extract_sources(block)
if annotate_defs:
for stat in block.stats:
if isinstance(stat, NameAssignment):
label += '\n %s [definition]' % stat.entry.name
elif isinstance(stat, NameReference):
if stat.entry:
label += '\n %s [reference]' % stat.entry.name
if not label:
label = 'empty'
pid = ctx.nodeid(block)
fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label)))
for block in self.flow.blocks:
pid = ctx.nodeid(block)
for child in block.children:
fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child)))
fp.write(' }\n')
class MessageCollection(object):
"""Collect error/warnings messages first then sort"""
def __init__(self):
self.messages = []
def error(self, pos, message):
self.messages.append((pos, True, message))
def warning(self, pos, message):
self.messages.append((pos, False, message))
def report(self):
self.messages.sort()
for pos, is_error, message in self.messages:
if is_error:
error(pos, message)
else:
warning(pos, message, 2)
def check_definitions(flow, compiler_directives):
flow.initialize()
flow.reaching_definitions()
# Track down state
assignments = set()
# Node to entry map
references = {}
assmt_nodes = set()
for block in flow.blocks:
i_state = block.i_input
for stat in block.stats:
i_assmts = flow.assmts[stat.entry]
state = flow.map_one(i_state, stat.entry)
if isinstance(stat, NameAssignment):
stat.lhs.cf_state.update(state)
assmt_nodes.add(stat.lhs)
i_state = i_state & ~i_assmts.mask
if stat.is_deletion:
i_state |= i_assmts.bit
else:
i_state |= stat.bit
assignments.add(stat)
if stat.rhs is not fake_rhs_expr:
stat.entry.cf_assignments.append(stat)
elif isinstance(stat, NameReference):
references[stat.node] = stat.entry
stat.entry.cf_references.append(stat)
stat.node.cf_state.update(state)
## if not stat.node.allow_null:
## i_state &= ~i_assmts.bit
## # after successful read, the state is known to be initialised
state.discard(Uninitialized)
state.discard(Unknown)
for assmt in state:
assmt.refs.add(stat)
# Check variable usage
warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized']
warn_unused_result = compiler_directives['warn.unused_result']
warn_unused = compiler_directives['warn.unused']
warn_unused_arg = compiler_directives['warn.unused_arg']
messages = MessageCollection()
# assignment hints
for node in assmt_nodes:
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if len(node.cf_state) == 1:
node.cf_is_null = True
else:
node.cf_is_null = False
elif Unknown in node.cf_state:
node.cf_maybe_null = True
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Find uninitialized references and cf-hints
for node, entry in references.iteritems():
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if not entry.from_closure and len(node.cf_state) == 1:
node.cf_is_null = True
if (node.allow_null or entry.from_closure
or entry.is_pyclass_attr or entry.type.is_error):
pass # Can be uninitialized here
elif node.cf_is_null:
if entry.error_on_uninitialized or (
Options.error_on_uninitialized and (
entry.type.is_pyobject or entry.type.is_unspecified)):
messages.error(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
else:
messages.warning(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
elif warn_maybe_uninitialized:
messages.warning(
node.pos,
"local variable '%s' might be referenced before assignment"
% entry.name)
elif Unknown in node.cf_state:
# TODO: better cross-closure analysis to know when inner functions
# are being called before a variable is being set, and when
# a variable is known to be set before even defining the
# inner function, etc.
node.cf_maybe_null = True
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Unused result
for assmt in assignments:
if (not assmt.refs and not assmt.entry.is_pyclass_attr
and not assmt.entry.in_closure):
if assmt.entry.cf_references and warn_unused_result:
if assmt.is_arg:
messages.warning(assmt.pos, "Unused argument value '%s'" %
assmt.entry.name)
else:
messages.warning(assmt.pos, "Unused result in '%s'" %
assmt.entry.name)
assmt.lhs.cf_used = False
# Unused entries
for entry in flow.entries:
if (not entry.cf_references
and not entry.is_pyclass_attr):
if entry.name != '_':
# '_' is often used for unused variables, e.g. in loops
if entry.is_arg:
if warn_unused_arg:
messages.warning(entry.pos, "Unused argument '%s'" %
entry.name)
else:
if warn_unused:
messages.warning(entry.pos, "Unused entry '%s'" %
entry.name)
entry.cf_used = False
messages.report()
for node in assmt_nodes:
node.cf_state = ControlFlowState(node.cf_state)
for node in references:
node.cf_state = ControlFlowState(node.cf_state)
class AssignmentCollector(TreeVisitor):
def __init__(self):
super(AssignmentCollector, self).__init__()
self.assignments = []
def visit_Node(self):
self._visitchildren(self, None)
def visit_SingleAssignmentNode(self, node):
self.assignments.append((node.lhs, node.rhs))
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.assignments.append((lhs, node.rhs))
class ControlFlowAnalysis(CythonTransform):
def visit_ModuleNode(self, node):
self.gv_ctx = GVContext()
# Set of NameNode reductions
self.reductions = set()
self.in_inplace_assignment = False
self.env_stack = []
self.env = node.scope
self.stack = []
self.flow = ControlFlow()
self.visitchildren(node)
check_definitions(self.flow, self.current_directives)
dot_output = self.current_directives['control_flow.dot_output']
if dot_output:
annotate_defs = self.current_directives['control_flow.dot_annotate_defs']
fp = open(dot_output, 'wt')
try:
self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
finally:
fp.close()
return node
def visit_FuncDefNode(self, node):
for arg in node.args:
if arg.default:
self.visitchildren(arg)
self.visitchildren(node, ('decorators',))
self.env_stack.append(self.env)
self.env = node.local_scope
self.stack.append(self.flow)
self.flow = ControlFlow()
# Collect all entries
for entry in node.local_scope.entries.values():
if self.flow.is_tracked(entry):
self.flow.entries.add(entry)
self.mark_position(node)
# Function body block
self.flow.nextblock()
for arg in node.args:
self._visit(arg)
if node.star_arg:
self.flow.mark_argument(node.star_arg,
TypedExprNode(Builtin.tuple_type,
may_be_none=False),
node.star_arg.entry)
if node.starstar_arg:
self.flow.mark_argument(node.starstar_arg,
TypedExprNode(Builtin.dict_type,
may_be_none=False),
node.starstar_arg.entry)
self._visit(node.body)
# Workaround for generators
if node.is_generator:
self._visit(node.gbody.body)
# Exit point
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
# Cleanup graph
self.flow.normalize()
check_definitions(self.flow, self.current_directives)
self.flow.blocks.add(self.flow.entry_point)
self.gv_ctx.add(GV(node.local_scope.name, self.flow))
self.flow = self.stack.pop()
self.env = self.env_stack.pop()
return node
def visit_DefNode(self, node):
node.used = True
return self.visit_FuncDefNode(node)
def visit_GeneratorBodyDefNode(self, node):
return node
def visit_CTypeDefNode(self, node):
return node
def mark_assignment(self, lhs, rhs=None):
if not self.flow.block:
return
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
if not rhs:
rhs = object_expr
if lhs.is_name:
if lhs.entry is not None:
entry = lhs.entry
else:
entry = self.env.lookup(lhs.name)
if entry is None: # TODO: This shouldn't happen...
return
self.flow.mark_assignment(lhs, rhs, entry)
elif isinstance(lhs, ExprNodes.SequenceNode):
for arg in lhs.args:
self.mark_assignment(arg)
else:
self._visit(lhs)
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
def mark_position(self, node):
"""Mark position if DOT output is enabled."""
if self.current_directives['control_flow.dot_output']:
self.flow.mark_position(node)
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target)
self.visitchildren(node)
return node
def visit_AssignmentNode(self, node):
raise InternalError("Unhandled assignment node")
def visit_SingleAssignmentNode(self, node):
self._visit(node.rhs)
self.mark_assignment(node.lhs, node.rhs)
return node
def visit_CascadedAssignmentNode(self, node):
self._visit(node.rhs)
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
return node
def visit_ParallelAssignmentNode(self, node):
collector = AssignmentCollector()
collector.visitchildren(node)
for lhs, rhs in collector.assignments:
self._visit(rhs)
for lhs, rhs in collector.assignments:
self.mark_assignment(lhs, rhs)
return node
def visit_InPlaceAssignmentNode(self, node):
self.in_inplace_assignment = True
self.visitchildren(node)
self.in_inplace_assignment = False
self.mark_assignment(node.lhs, node.create_binop_node())
return node
def visit_DelStatNode(self, node):
for arg in node.args:
if arg.is_name:
entry = arg.entry or self.env.lookup(arg.name)
if entry.in_closure or entry.from_closure:
error(arg.pos,
"can not delete variable '%s' "
"referenced in nested scope" % entry.name)
# Mark reference
self._visit(arg)
self.flow.mark_deletion(arg, entry)
else:
self._visit(arg)
return node
def visit_CArgDeclNode(self, node):
entry = self.env.lookup(node.name)
if entry:
may_be_none = not node.not_none
self.flow.mark_argument(
node, TypedExprNode(entry.type, may_be_none), entry)
return node
def visit_NameNode(self, node):
if self.flow.block:
entry = node.entry or self.env.lookup(node.name)
if entry:
self.flow.mark_reference(node, entry)
if entry in self.reductions and not self.in_inplace_assignment:
error(node.pos,
"Cannot read reduction variable in loop body")
return node
def visit_StatListNode(self, node):
if self.flow.block:
for stat in node.stats:
self._visit(stat)
if not self.flow.block:
stat.is_terminator = True
break
return node
def visit_Node(self, node):
self.visitchildren(node)
self.mark_position(node)
return node
def visit_IfStatNode(self, node):
next_block = self.flow.newblock()
parent = self.flow.block
# If clauses
for clause in node.if_clauses:
parent = self.flow.nextblock(parent)
self._visit(clause.condition)
self.flow.nextblock()
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=parent)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_WhileStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition block
self.flow.loops.append(LoopDescr(next_block, condition_block))
if node.condition:
self._visit(node.condition)
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def mark_forloop_target(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.env)
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2]))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, node.item)
def visit_ForInStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.iterator)
# Target assignment
self.flow.nextblock()
if isinstance(node, Nodes.ForInStatNode):
self.mark_forloop_target(node)
else: # Parallel
self.mark_assignment(node.target)
# Body block
if isinstance(node, Nodes.ParallelRangeNode):
# In case of an invalid
self._delete_privates(node, exclude=node.target.entry)
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def _delete_privates(self, node, exclude=None):
for private_node in node.assigned_nodes:
if not exclude or private_node.entry is not exclude:
self.flow.mark_deletion(private_node, private_node.entry)
def visit_ParallelRangeNode(self, node):
reductions = self.reductions
# if node.target is None or not a NameNode, an error will have
# been previously issued
if hasattr(node.target, 'entry'):
self.reductions = set(reductions)
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
pos, reduction = node.assignments[private_node.entry]
if reduction:
self.reductions.add(private_node.entry)
node = self.visit_ForInStatNode(node)
self.reductions = reductions
return node
def visit_ParallelWithBlockNode(self, node):
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
self._delete_privates(node)
self.visitchildren(node)
self._delete_privates(node)
return node
def visit_ForFromStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.bound1)
self._visit(node.bound2)
if node.step is not None:
self._visit(node.step)
# Target assignment
self.flow.nextblock()
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
ExprNodes.binop_node(node.pos, '+',
node.bound1, node.step))
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_LoopNode(self, node):
raise InternalError("Generic loops are not supported")
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
return node
def visit_WithStatNode(self, node):
self._visit(node.manager)
self._visit(node.enter_call)
self._visit(node.body)
return node
def visit_TryExceptStatNode(self, node):
# After exception handling
next_block = self.flow.newblock()
# Body block
self.flow.newblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.exceptions.append(ExceptionDescr(entry_point))
self.flow.nextblock()
## XXX: links to exception handling point should be added by
## XXX: children nodes
self.flow.block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
# After exception
if self.flow.block:
if node.else_clause:
self.flow.nextblock()
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
for clause in node.except_clauses:
self.flow.block = entry_point
if clause.pattern:
for pattern in clause.pattern:
self._visit(pattern)
else:
# TODO: handle * pattern
pass
entry_point = self.flow.newblock(parent=self.flow.block)
self.flow.nextblock()
if clause.target:
self.mark_assignment(clause.target)
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
if self.flow.exceptions:
entry_point.add_child(self.flow.exceptions[-1].entry_point)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_TryFinallyStatNode(self, node):
body_block = self.flow.nextblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.block = entry_point
self._visit(node.finally_clause)
if self.flow.block and self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
# Normal execution
finally_enter = self.flow.newblock()
self.flow.block = finally_enter
self._visit(node.finally_clause)
finally_exit = self.flow.block
descr = ExceptionDescr(entry_point, finally_enter, finally_exit)
self.flow.exceptions.append(descr)
if self.flow.loops:
self.flow.loops[-1].exceptions.append(descr)
self.flow.block = body_block
## XXX: Is it still required
body_block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
if self.flow.loops:
self.flow.loops[-1].exceptions.pop()
if self.flow.block:
self.flow.block.add_child(finally_enter)
if finally_exit:
self.flow.block = self.flow.nextblock(parent=finally_exit)
else:
self.flow.block = None
return node
def visit_RaiseStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReraiseStatNode(self, node):
self.mark_position(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReturnStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
for exception in self.flow.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(self.flow.exit_point)
break
else:
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
self.flow.block = None
return node
def visit_BreakStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "break statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.next_block)
break
else:
self.flow.block.add_child(loop.next_block)
self.flow.block = None
return node
def visit_ContinueStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "continue statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.loop_block)
break
else:
self.flow.block.add_child(loop.loop_block)
self.flow.block = None
return node
def visit_ComprehensionNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
# Skip append node here
self._visit(node.loop)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_ScopedExprNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
self.visitchildren(node)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_PyClassDefNode(self, node):
self.visitchildren(node, ('dict', 'metaclass',
'mkw', 'bases', 'class_result'))
self.flow.mark_assignment(node.target, object_expr_not_none,
self.env.lookup(node.name))
self.env_stack.append(self.env)
self.env = node.scope
self.flow.nextblock()
self.visitchildren(node, ('body',))
self.flow.nextblock()
self.env = self.env_stack.pop()
return node
def visit_AmpersandNode(self, node):
if node.operand.is_name:
# Fake assignment to silence warning
self.mark_assignment(node.operand, fake_rhs_expr)
self.visitchildren(node)
return node
|
XueqingLin/tensorflow | refs/heads/master | tensorflow/contrib/opt/python/training/variable_clipping_optimizer.py | 15 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Delegating optimizer to clip norm for specified variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
__all__ = ["VariableClippingOptimizer"]
class VariableClippingOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that clips the norm of specified variables after update.
This optimizer delegates all aspects of gradient calculation and application
to an underlying optimizer. After applying gradients, this optimizer then
clips the variable to have a maximum L2 norm along specified dimensions.
NB: this is quite different from clipping the norm of the gradients.
Multiple instances of `VariableClippingOptimizer` may be chained to specify
different max norms for different subsets of variables.
@@__init__
"""
def __init__(self,
opt,
vars_to_clip_dims,
max_norm,
use_locking=False,
colocate_clip_ops_with_vars=False,
name="VariableClipping"):
"""Construct a new clip-norm optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
vars_to_clip_dims: A dict with keys as Variables and values as lists
of dimensions along which to compute the L2-norm. See
`tf.clip_by_norm` for more details.
max_norm: The L2-norm to clip to, for all variables specified.
use_locking: If `True` use locks for clip update operations.
colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
ops with the corresponding variable.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "VariableClipping".
"""
super(VariableClippingOptimizer, self).__init__(use_locking, name)
self._opt = opt
# Defensive copy of input dict
self._vars_to_clip_dims = {
var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
self._max_norm = max_norm
self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
with ops.name_scope(name, self._name) as name:
update_op = self._opt.apply_gradients(
grads_and_vars, global_step=global_step)
clip_update_ops = []
with ops.control_dependencies([update_op]):
for grad, var in grads_and_vars:
if grad is None or var not in self._vars_to_clip_dims:
continue
with ops.name_scope("clip_" + var.op.name):
if isinstance(grad, ops.Tensor):
clip_update_ops.append(self._clip_dense(var))
else:
clip_update_ops.append(self._clip_sparse(grad, var))
# In case no var was clipped, still need to run the update_op.
return control_flow_ops.group(*([update_op] + clip_update_ops), name=name)
def _clip_dense(self, var):
with self._maybe_colocate_with(var):
updated_var_value = array_ops.identity(var.ref())
normalized_var = clip_ops.clip_by_norm(
updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
delta = updated_var_value - normalized_var
with ops.colocate_with(var):
return var.assign_sub(delta, use_locking=self._use_locking)
def _clip_sparse(self, grad, var):
assert isinstance(grad, ops.IndexedSlices)
clip_dims = self._vars_to_clip_dims[var]
if 0 in clip_dims:
logging.warning("Clipping norm across dims %s for %s is inefficient "
"when including sparse dimension 0.", clip_dims,
var.op.name)
return self._clip_dense(var)
with ops.colocate_with(var):
var_subset = array_ops.gather(var.ref(), grad.indices)
with self._maybe_colocate_with(var):
normalized_var_subset = clip_ops.clip_by_norm(
var_subset, self._max_norm, clip_dims)
delta = ops.IndexedSlices(
var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
with ops.colocate_with(var):
return var.scatter_sub(delta, use_locking=self._use_locking)
@contextlib.contextmanager
def _maybe_colocate_with(self, var):
"""Context to colocate with `var` if `colocate_clip_ops_with_vars`."""
if self._colocate_clip_ops_with_vars:
with ops.colocate_with(var):
yield
else:
yield
|
vitaly4uk/django | refs/heads/master | django/contrib/admin/filters.py | 139 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_model_from_relation, prepare_lookup_value, reverse_field_path,
)
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.utils import timezone
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext_lazy as _
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
self.lookup_kwarg = '%s__%s__exact' % (field_path, field.target_field.name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = self.field_choices(field, request, model_admin)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
self.empty_value_display = model_admin.get_empty_value_display()
def has_output(self):
if self.field.null:
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def field_choices(self, field, request, model_admin):
return field.get_choices(include_blank=False)
def choices(self, cl):
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if self.field.null:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: f.remote_field, RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = {k: v for k, v in params.items()
if k.startswith(self.field_generic)}
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.empty_value_display = model_admin.get_empty_value_display()
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
class RelatedOnlyFieldListFilter(RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
limit_choices_to = {'pk__in': set(model_admin.get_queryset(request).values_list(field.name, flat=True))}
return field.get_choices(include_blank=False, limit_choices_to=limit_choices_to)
|
pvanek/nomacs | refs/heads/master | scripts/versionupdate.py | 2 | #!/usr/bin/env python
"""Increments the build version of a C++ projects.
This script increments the build version after every build of
a Visual Stuido project. It manipulates the DkVersion.h file in order
to do so. The version number defined there is displayed in help and
used for the VS_VERSION_INFO in the *.rc file
"""
import logging
__author__ = "Markus Diem"
__credits__ = ["Markus Diem"]
__license__ = "GPLv3"
__version__ = "0.3"
__maintainer__ = "Markus Diem"
__email__ = "markus@nomacs.org"
__status__ = "Production"
OUTPUT_NAME = "versionupdate"
def update(filepath, copy = False):
from shutil import move
from os import remove
from utils.fun import version
v = version()
dstpath, ext = os.path.splitext(filepath)
dstpath += "-versioned" + ext
with open(filepath, "r") as src:
with open(dstpath, "w") as dst:
for l in src.readlines():
l = update_version_string(v, l)
l = update_version_rc(v, l)
l = update_version_patch(v, l)
l = add_git_tag_string(l)
dst.write(l)
# replace current file?
if not copy:
remove(filepath)
move(dstpath, filepath)
def update_version_string(version, line):
# searching (DkVersion.h): #define NOMACS_VERSION_STR "3.14.42\0"
# searching (msi installer): <?define ProductVersion = "3.14.42"?>
# searching (inno installer): define MyAppVersion "3.14.42"
if "NOMACS_VERSION_STR" in line or \
"<?define ProductVersion" in line or \
"define MyAppVersion" in line:
str_ver = line.split("\"")
line = str_ver[0] + "\"" + version + "\"" + str_ver[-1]
return line
def update_version_rc(version, line):
# searching: #define NOMACS_VERSION_RC 3,14,42
if "NOMACS_VERSION_RC" in line:
str_ver = line.split(" ")
str_ver[-1] = version.replace(".", ",")
line = " ".join(str_ver) + "\n"
return line
def update_version_patch(version, line):
# get patch from 3.14.42
vs = version.split(".")
if len(vs) != 3:
print("WARNING: could not split version: " + version)
return
# searching: #define NOMACS_VER_PATCH 0
if "NOMACS_VER_PATCH" in line:
str_ver = line.split(" ")
str_ver[-1] = vs[-1]
line = " ".join(str_ver) + "\n"
return line
def add_git_tag_string(line):
# searching: #define NOMACS_GIT_TAG "4add4f1f6b6c731a9f4cf63596e087d4f68c2aed"
if "NOMACS_GIT_TAG" in line:
line = line.replace("\n", "")
v = line.split("\"")
if len(v) == 3:
v[-2] = git_tag()
line = "\"".join(v) + "\n"
return line
def git_tag():
import subprocess
tag = subprocess.check_output(["git", "rev-parse", "HEAD"])
tag = tag.strip().decode("utf-8")
return tag
if __name__ == "__main__":
import argparse
import os
from utils.fun import mypath
parser = argparse.ArgumentParser(
description='Increments the build version of a C++ project and adds the git rev as product version.')
parser.add_argument("inputfile", type=str,
help="""full path to the file who's version should be updated""")
parser.add_argument("--copy", action='store_true',
help="""if set, a _ver file will be created""")
args = parser.parse_args()
if not os.path.isfile(args.inputfile):
print("input file does not exist: " + args.inputfile)
exit()
update(args.inputfile, args.copy)
|
dexterx17/nodoSocket | refs/heads/master | clients/Python-2.7.6/setup.py | 1 | # Autodetecting setup.py script for building the Python extensions
#
__version__ = "$Revision$"
import sys, os, imp, re, optparse
from glob import glob
from platform import machine as platform_machine
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.spawn import find_executable
cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ
def get_platform():
# cross build
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Get value of sys.platform
if sys.platform.startswith('osf1'):
return 'osf1'
return sys.platform
host_platform = get_platform()
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS"))
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (at the front) if
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory."""
if dir is not None and os.path.isdir(dir) and dir not in dirlist:
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
or path.startswith('/System/')
or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if host_platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Platform-dependent module source and include directories
incdirlist = []
if host_platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
# Mac OS X also includes some mac-specific modules
macmoddir = os.path.join(srcdir, 'Mac/Modules')
moddirlist.append(macmoddir)
incdirlist.append(os.path.join(srcdir, 'Mac/Include'))
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# platform specific include directories
ext.include_dirs.extend(incdirlist)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print "%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g)
if missing:
print
print ("Python build finished, but the necessary bits to build "
"these modules were not found:")
print_three_column(missing)
print ("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print
if self.failed:
failed = self.failed[:]
print
print "Failed to build these modules:"
print_three_column(failed)
print
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if host_platform == 'darwin' and (
sys.maxint > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if host_platform == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# Don't try to load extensions for cross builds
if cross_compiling:
return
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError, why:
self.failed.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
cc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
multiarch_path_component = ''
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
finally:
os.unlink(tmpfile)
if multiarch_path_component != '':
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
return
if not find_executable('dpkg-architecture'):
return
opt = ''
if cross_compiling:
opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
(opt, tmpfile))
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def add_gcc_paths(self):
gcc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'gccpaths')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile))
is_gcc = False
in_incdirs = False
inc_dirs = []
lib_dirs = []
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
for line in fp.readlines():
if line.startswith("gcc version"):
is_gcc = True
elif line.startswith("#include <...>"):
in_incdirs = True
elif line.startswith("End of search list"):
in_incdirs = False
elif is_gcc and line.startswith("LIBRARY_PATH"):
for d in line.strip().split("=")[1].split(":"):
d = os.path.normpath(d)
if '/gcc/' not in d:
add_dir_to_list(self.compiler.library_dirs,
d)
elif is_gcc and in_incdirs and '/gcc/' not in line:
add_dir_to_list(self.compiler.include_dirs,
line.strip())
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used
if not cross_compiling:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
if cross_compiling:
self.add_gcc_paths()
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
try:
have_unicode = unicode
except NameError:
have_unicode = 0
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
inc_dirs = self.compiler.include_dirs[:]
lib_dirs = self.compiler.library_dirs[:]
if not cross_compiling:
for d in (
'/usr/include',
):
add_dir_to_list(inc_dirs, d)
for d in (
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
):
add_dir_to_list(lib_dirs, d)
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
config_h_vars = sysconfig.parse_config_h(open(config_h))
srcdir = sysconfig.get_config_var('srcdir')
# Check for AtheOS which has libraries in non-standard locations
if host_platform == 'atheos':
lib_dirs += ['/system/libs', '/atheos/autolnk/lib']
lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep)
inc_dirs += ['/system/include', '/atheos/autolnk/include']
inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep)
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if host_platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses directories
# with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if host_platform in ['darwin', 'beos']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
#exts.append( Extension('_weakref', ['_weakref.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
exts.append( Extension('datetime', ['datetimemodule.c', 'timemodule.c'],
libraries=math_libs) )
# fast iterator tools implemented in C
exts.append( Extension("itertools", ["itertoolsmodule.c"]) )
# code that will be builtins in the future, but conflict with the
# current builtins
exts.append( Extension('future_builtins', ['future_builtins.c']) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# high-performance collections
exts.append( Extension("_collections", ["_collectionsmodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# Python 3.1 _io library
exts.append( Extension("_io",
["_io/bufferedio.c", "_io/bytesio.c", "_io/fileio.c",
"_io/iobase.c", "_io/_iomodule.c", "_io/stringio.c", "_io/textio.c"],
depends=["_io/_iomodule.h"], include_dirs=["Modules/_io"]))
# _functools
exts.append( Extension("_functools", ["_functoolsmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# profilers (_lsprof is for cProfile.py)
exts.append( Extension('_hotshot', ['_hotshot.c']) )
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
if have_unicode:
exts.append( Extension('unicodedata', ['unicodedata.c']) )
else:
missing.append('unicodedata')
# access to ISO C locale support
data = open('pyconfig.h').read()
m = re.search(r"#s*define\s+WITH_LIBINTL\s+1\s*", data)
if m is not None:
locale_libs = ['intl']
else:
locale_libs = []
if host_platform == 'darwin':
locale_extra_link_args = ['-framework', 'CoreFoundation']
else:
locale_extra_link_args = []
exts.append( Extension('_locale', ['_localemodule.c'],
libraries=locale_libs,
extra_link_args=locale_extra_link_args) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# cStringIO and cPickle
exts.append( Extension('cStringIO', ['cStringIO.c']) )
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
if host_platform not in ['atheos']:
exts.append( Extension('mmap', ['mmapmodule.c']) )
else:
missing.append('mmap')
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
# George Neville-Neil's timing module:
# Deprecated in PEP 4 http://www.python.org/peps/pep-0004.html
# http://mail.python.org/pipermail/python-dev/2006-January/060023.html
#exts.append( Extension('timing', ['timingmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# Disabled on 64-bit platforms
if sys.maxint != 9223372036854775807L:
# Operations on images
exts.append( Extension('imageop', ['imageop.c']) )
else:
missing.extend(['imageop'])
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Determine if readline is already linked against curses or tinfo.
if do_readline and find_executable('ldd'):
fp = os.popen("ldd %s" % do_readline)
ldd_output = fp.readlines()
ret = fp.close()
if ret is None or ret >> 8 == 0:
for ln in ldd_output:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
if 'tinfo' in ln: # termcap interface split out from ncurses
readline_termcap_library = 'tinfo'
break
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if dep_target and dep_target.split('.') < ['10', '5']:
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if host_platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c', 'timemodule.c'],
depends=['socketmodule.h'],
libraries=math_libs) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if host_platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
incfile = open(name, 'r')
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
except IOError, msg:
print "IOError while reading opensshv.h:", msg
pass
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print ("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
# The _sha module implements the SHA1 hash algorithm.
exts.append( Extension('_sha', ['shamodule.c']) )
# The _md5 module implements the RSA Data Security, Inc. MD5
# Message-Digest Algorithm, described in RFC 1321. The
# necessary files md5.c and md5.h are included here.
exts.append( Extension('_md5',
sources = ['md5module.c', 'md5.c'],
depends = ['md5.h']) )
min_sha2_openssl_ver = 0x00908000
if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c']) )
exts.append( Extension('_sha512', ['sha512module.c']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module anydbm.py provides an
# implementation independent wrapper for these; dumbdbm.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 3)
min_db_ver = (4, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
# Use this function to filter out known bad configurations.
if (4, 6) == db_ver[:2]:
# BerkeleyDB 4.6.x is not stable on many architectures.
arch = platform_machine()
if arch not in ('i386', 'i486', 'i586', 'i686',
'x86_64', 'ia64'):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 5:
for x in range(max_db_ver[1]+1):
if allow_db_ver((5, x)):
yield x
elif major == 4:
for x in range(9):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
if cross_compiling:
db_inc_paths = []
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if host_platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print "db: looking for db.h in", f
if os.path.exists(f):
f = open(f).read()
m = re.search(r"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(r"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(r"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print "db.h:", db_ver, "patch", db_patch,
print "being ignored (4.6.x must be >= 4.6.21)"
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print "db.h: found", db_ver, "in", d
else:
# we already found a header for this library version
if db_setup_debug: print "db.h: ignoring", d
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print "db.h: no version number version in", d
db_found_vers = db_ver_inc_map.keys()
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if host_platform != 'darwin':
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print "db lib: ", dblib, "not found"
except db_found:
if db_setup_debug:
print "bsddb using BerkeleyDB lib:", db_ver, dblib
print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir
db_incs = [db_incdir]
dblibs = [dblib]
# We add the runtime_library_dirs argument because the
# BerkeleyDB lib we're linking against often isn't in the
# system dynamic library search path. This is usually
# correct and most trouble free, but may cause problems in
# some unusual system configurations (e.g. the directory
# is on an NFS server that goes away).
exts.append(Extension('_bsddb', ['_bsddb.c'],
depends = ['bsddb.h'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
libraries=dblibs))
else:
if db_setup_debug: print "db: no appropriate library found"
db_incs = None
dblibs = []
dblib_dir = None
missing.append('_bsddb')
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
if cross_compiling:
sqlite_inc_paths = []
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
for d_ in inc_dirs + sqlite_inc_paths:
d = d_
if host_platform == 'darwin' and is_macosx_sdk_path(d):
d = os.path.join(sysroot, d[1:])
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print "sqlite: found %s"%f
incf = open(f).read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print "%s/sqlite3.h: version %s"%(d, sqlite_version)
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print "%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION)
elif sqlite_setup_debug:
print "sqlite: %s had no SQLITE_VERSION"%(f,)
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if host_platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Comment this out if you want the sqlite3 module to be able to load extensions.
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if host_platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
# Look for Berkeley db 1.85. Note that it is built as a different
# module name so it can be included even when later versions are
# available. A very restrictive search is performed to avoid
# accidentally building this module with a later version of the
# underlying db library. May BSD-ish Unixes incorporate db 1.85
# symbols into libc and place the include file in /usr/include.
#
# If the better bsddb library can be built (db_incs is defined)
# we do not build this one. Otherwise this build will pick up
# the more recent berkeleydb's db.h file first in the include path
# when attempting to compile and it will fail.
f = "/usr/include/db.h"
if host_platform == 'darwin':
if is_macosx_sdk_path(f):
sysroot = macosx_sdk_root()
f = os.path.join(sysroot, f[1:])
if os.path.exists(f) and not db_incs:
data = open(f).read()
m = re.search(r"#s*define\s+HASHVERSION\s+2\s*", data)
if m is not None:
# bingo - old version used hash file format version 2
### XXX this should be fixed to not be platform-dependent
### but I don't have direct access to an osf1 platform and
### seemed to be muffing the search somehow
libraries = host_platform == "osf1" and ['db'] or None
if libraries is not None:
exts.append(Extension('bsddb185', ['bsddbmodule.c'],
libraries=libraries))
else:
exts.append(Extension('bsddb185', ['bsddbmodule.c']))
else:
missing.append('bsddb185')
else:
missing.append('bsddb185')
dbm_order = ['gdbm']
# The standard Unix dbm module:
if host_platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others have -lgdbm_compat,
# others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
elif self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
print "building dbm using ndbm"
dbmext = Extension('dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
print "building dbm using gdbm"
dbmext = Extension(
'dbm', ['dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if db_incs is not None:
print "building dbm using bdb"
dbmext = Extension('dbm', ['dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('gdbm', ['gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('gdbm')
# Unix-only modules
if host_platform not in ['win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if host_platform not in ['atheos']:
exts.append( Extension('resource', ['resource.c']) )
else:
missing.append('resource')
# Sun yellow pages. Some systems have the functions in libc.
if (host_platform not in ['cygwin', 'atheos', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
if curses_library.startswith('ncurses'):
if curses_library == 'ncursesw':
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif curses_library == 'curses' and host_platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h):
zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:])
fp = open(zlib_h)
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if host_platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if host_platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('bz2')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
expat_depends = ['expat/ascii.h',
'expat/asciitab.h',
'expat/expat.h',
'expat/expat_config.h',
'expat/expat_external.h',
'expat/internal.h',
'expat/latin1tab.h',
'expat/utf8tab.h',
'expat/xmlrole.h',
'expat/xmltok.h',
'expat/xmltok_impl.h'
]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources,
depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
depends = ['pyexpat.c'] + expat_sources +
expat_depends,
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
if have_unicode:
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
else:
missing.append('_multibytecodec')
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
missing.append('_codecs_%s' % loc)
# Dynamic loading module
if sys.maxint == 0x7fffffff:
# This requires sizeof(int) == sizeof(long) == sizeof(char*)
dl_inc = find_file('dlfcn.h', [], inc_dirs)
if (dl_inc is not None) and (host_platform not in ['atheos']):
exts.append( Extension('dl', ['dlmodule.c']) )
else:
missing.append('dl')
else:
missing.append('dl')
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if host_platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif host_platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif host_platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif host_platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif host_platform.startswith('openbsd'):
macros = dict()
libraries = []
elif host_platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if host_platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=macros.items(),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if host_platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
else:
missing.append('linuxaudiodev')
if (host_platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8')
or host_platform.startswith("gnukfreebsd")):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if host_platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
else:
missing.append('sunaudiodev')
if host_platform == 'darwin':
# _scproxy
exts.append(Extension("_scproxy", [os.path.join(srcdir, "Mac/Modules/_scproxy.c")],
extra_link_args= [
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation'
]))
if host_platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
if int(os.uname()[2].split('.')[0]) >= 8:
# We're on Mac OS X 10.4 or later, the compiler should
# support '-Wno-deprecated-declarations'. This will
# surpress deprecation warnings for the Carbon extensions,
# these extensions wrap the Carbon APIs and even those
# parts that are deprecated.
carbon_extra_compile_args = ['-Wno-deprecated-declarations']
else:
carbon_extra_compile_args = []
# Mac OS X specific modules.
def macSrcExists(name1, name2=''):
if not name1:
return None
names = (name1,)
if name2:
names = (name1, name2)
path = os.path.join(srcdir, 'Mac', 'Modules', *names)
return os.path.exists(path)
def addMacExtension(name, kwds, extra_srcs=[]):
dirname = ''
if name[0] == '_':
dirname = name[1:].lower()
cname = name + '.c'
cmodulename = name + 'module.c'
# Check for NNN.c, NNNmodule.c, _nnn/NNN.c, _nnn/NNNmodule.c
if macSrcExists(cname):
srcs = [cname]
elif macSrcExists(cmodulename):
srcs = [cmodulename]
elif macSrcExists(dirname, cname):
# XXX(nnorwitz): If all the names ended with module, we
# wouldn't need this condition. ibcarbon is the only one.
srcs = [os.path.join(dirname, cname)]
elif macSrcExists(dirname, cmodulename):
srcs = [os.path.join(dirname, cmodulename)]
else:
raise RuntimeError("%s not found" % name)
# Here's the whole point: add the extension with sources
exts.append(Extension(name, srcs + extra_srcs, **kwds))
# Core Foundation
core_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'CoreFoundation'],
}
addMacExtension('_CF', core_kwds, ['cf/pycfbridge.c'])
addMacExtension('autoGIL', core_kwds)
# Carbon
carbon_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework', 'Carbon'],
}
CARBON_EXTS = ['ColorPicker', 'gestalt', 'MacOS', 'Nav',
'OSATerminology', 'icglue',
# All these are in subdirs
'_AE', '_AH', '_App', '_CarbonEvt', '_Cm', '_Ctl',
'_Dlg', '_Drag', '_Evt', '_File', '_Folder', '_Fm',
'_Help', '_Icn', '_IBCarbon', '_List',
'_Menu', '_Mlte', '_OSA', '_Res', '_Qd', '_Qdoffs',
'_Scrap', '_Snd', '_TE',
]
for name in CARBON_EXTS:
addMacExtension(name, carbon_kwds)
# Workaround for a bug in the version of gcc shipped with Xcode 3.
# The _Win extension should build just like the other Carbon extensions, but
# this actually results in a hard crash of the linker.
#
if '-arch ppc64' in cflags and '-arch ppc' in cflags:
win_kwds = {'extra_compile_args': carbon_extra_compile_args + ['-arch', 'i386', '-arch', 'ppc'],
'extra_link_args': ['-framework', 'Carbon', '-arch', 'i386', '-arch', 'ppc'],
}
addMacExtension('_Win', win_kwds)
else:
addMacExtension('_Win', carbon_kwds)
# Application Services & QuickTime
app_kwds = {'extra_compile_args': carbon_extra_compile_args,
'extra_link_args': ['-framework','ApplicationServices'],
}
addMacExtension('_Launch', app_kwds)
addMacExtension('_CG', app_kwds)
exts.append( Extension('_Qt', ['qt/_Qtmodule.c'],
extra_compile_args=carbon_extra_compile_args,
extra_link_args=['-framework', 'QuickTime',
'-framework', 'Carbon']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
return missing
def detect_tkinter_explicitly(self):
# Build _tkinter using explicit locations for Tcl/Tk.
#
# This is enabled when both arguments are given to ./configure:
#
# --with-tcltk-includes="-I/path/to/tclincludes \
# -I/path/to/tkincludes"
# --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \
# -L/path/to/tklibs -ltkm.n"
#
# These values can also be specified or overriden via make:
# make TCLTK_INCLUDES="..." TCLTK_LIBS="..."
#
# This can be useful for building and testing tkinter with multiple
# versions of Tcl/Tk. Note that a build of Tk depends on a particular
# build of Tcl so you need to specify both arguments and use care when
# overriding.
# The _TCLTK variables are created in the Makefile sharedmods target.
tcltk_includes = os.environ.get('_TCLTK_INCLUDES')
tcltk_libs = os.environ.get('_TCLTK_LIBS')
if not (tcltk_includes and tcltk_libs):
# Resume default configuration search.
return 0
extra_compile_args = tcltk_includes.split()
extra_link_args = tcltk_libs.split()
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
)
self.extensions.append(ext)
return 1
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in 'Tcl', 'Tk'
for H in 'Headers', 'Versions/Current/PrivateHeaders'
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
if is_macosx_sdk_path(F):
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(os.path.join(sysroot, F[1:]),))
else:
fp = os.popen("file %s/Tk.framework/Tk | grep 'for architecture'"%(F,))
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
fp.close()
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Check whether --with-tcltk-includes and --with-tcltk-libs were
# configured or passed into the make target. If so, use these values
# to build tkinter and bypass the searches for Tcl and TK in standard
# locations.
if self.detect_tkinter_explicitly():
return
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
if (host_platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in host_platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if host_platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if host_platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = [arg for arg in sysconfig.get_config_var("CONFIG_ARGS").split()
if (('--host=' in arg) or ('--build=' in arg))]
if not self.verbose:
config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print "Failed to configure _ctypes module"
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec f in fficonfig
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if host_platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
fp = open(ffi_h)
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
so_ext = sysconfig.get_config_var("SO")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0644, 0755)
self.set_dir_modes(self.install_dir, 0755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.so_ext): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
os.path.walk(dirname, self.set_dir_modes_visitor, mode)
def set_dir_modes_visitor(self, mode, dirname, names):
if os.path.islink(dirname): return
log.info("changing mode of %s to %o", dirname, mode)
if not self.dry_run: os.chmod(dirname, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = filter(None, CLASSIFIERS.split("\n")),
platforms = ["Many"],
# Build info
cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall,
'install_lib':PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# Scripts to install
scripts = ['Tools/scripts/pydoc', 'Tools/scripts/idle',
'Tools/scripts/2to3',
'Lib/smtpd.py']
)
# --install-platlib
if __name__ == '__main__':
main()
|
shoreflyer/cerbero | refs/heads/master | test/test_cerbero_build_filesprovider.py | 27 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import shutil
import unittest
import tempfile
from cerbero.build import filesprovider
from cerbero.config import Platform, License
from test.test_build_common import add_files
from test.test_common import DummyConfig
class Config(DummyConfig):
def __init__(self, tmp, platform):
self.prefix = tmp
self.target_platform = platform
class FilesProvider(filesprovider.FilesProvider):
files_misc = ['README', 'libexec/gstreamer-0.10/pluginsloader%(bext)s']
files_libs = ['libgstreamer-0.10']
files_bins = ['gst-launch']
files_devel = ['include/gstreamer.h']
licenses_devel = [License.LGPL]
platform_files_bins = {
Platform.WINDOWS: ['windows'],
Platform.LINUX: ['linux']}
platform_files_libs = {
Platform.WINDOWS: ['libgstreamer-win32'],
Platform.LINUX: ['libgstreamer-x11']}
class PackageTest(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
win32config = Config(self.tmp, Platform.WINDOWS)
linuxconfig = Config(self.tmp, Platform.LINUX)
self.win32recipe = FilesProvider(win32config)
self.linuxrecipe = FilesProvider(linuxconfig)
self.winbin = ['bin/gst-launch.exe', 'bin/windows.exe']
self.linuxbin = ['bin/gst-launch', 'bin/linux']
self.winlib = ['bin/libgstreamer-0.10.dll', 'bin/libgstreamer-win32.dll']
self.linuxlib = ['lib/libgstreamer-0.10.so.1', 'lib/libgstreamer-x11.so.1']
self.winmisc = ['README', 'libexec/gstreamer-0.10/pluginsloader.exe']
self.linuxmisc = ['README', 'libexec/gstreamer-0.10/pluginsloader']
devfiles = ['include/gstreamer.h', 'lib/libgstreamer-0.10.a',
'lib/libgstreamer-0.10.la']
self.windevfiles = devfiles + ['lib/libgstreamer-win32.a',
'lib/libgstreamer-win32.la', 'lib/libgstreamer-win32.dll.a',
'lib/libgstreamer-win32.def', 'lib/gstreamer-win32.lib',
'lib/libgstreamer-0.10.dll.a', 'lib/libgstreamer-0.10.def',
'lib/gstreamer-0.10.lib']
self.lindevfiles = devfiles + ['lib/libgstreamer-0.10.so',
'lib/libgstreamer-x11.a', 'lib/libgstreamer-x11.la',
'lib/libgstreamer-x11.so']
def tearDown(self):
shutil.rmtree(self.tmp)
def testFilesCategories(self):
self.assertEquals(sorted(['bins', 'libs', 'misc', 'devel']),
self.win32recipe._files_categories())
def testListBinaries(self):
self.assertEquals(self.win32recipe.files_list_by_category('bins'),
sorted(self.winbin))
self.assertEquals(self.linuxrecipe.files_list_by_category('bins'),
sorted(self.linuxbin))
def testListLibraries(self):
add_files(self.tmp)
self.assertEquals(self.win32recipe.files_list_by_category('libs'),
sorted(self.winlib))
self.assertEquals(self.linuxrecipe.files_list_by_category('libs'),
sorted(self.linuxlib))
def testDevelFiles(self):
add_files(self.tmp)
self.assertEquals(self.win32recipe.devel_files_list(),
sorted(self.windevfiles))
self.assertEquals(self.linuxrecipe.devel_files_list(),
sorted(self.lindevfiles))
def testDistFiles(self):
win32files = self.winlib + self.winbin + self.winmisc
linuxfiles = self.linuxlib + self.linuxbin + self.linuxmisc
add_files(self.tmp)
self.assertEquals(self.win32recipe.dist_files_list(), sorted(win32files))
self.assertEquals(self.linuxrecipe.dist_files_list(), sorted(linuxfiles))
def testGetAllFiles(self):
win32files = self.winlib + self.winbin + self.winmisc + self.windevfiles
linuxfiles = self.linuxlib + self.linuxbin + self.linuxmisc + self.lindevfiles
add_files(self.tmp)
self.assertEquals(self.win32recipe.files_list(), sorted(win32files))
self.assertEquals(self.linuxrecipe.files_list(), sorted(linuxfiles))
|
erwilan/ansible | refs/heads/devel | lib/ansible/plugins/callback/profile_tasks.py | 34 | # (C) 2016, Joel, http://github.com/jjshoe
# (C) 2015, Tom Paine, <github@aioue.net>
# (C) 2014, Jharrod LaFon, @JharrodLaFon
# (C) 2012-2013, Michael DeHaan, <michael.dehaan@gmail.com>
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# File is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# See <http://www.gnu.org/licenses/> for a copy of the
# GNU General Public License
# Provides per-task timing, ongoing playbook elapsed time and
# ordered list of top 20 longest running tasks at end
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import time
from ansible.module_utils.six.moves import reduce
from ansible.plugins.callback import CallbackBase
# define start time
t0 = tn = time.time()
def secondsToStr(t):
# http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
def filled(msg, fchar="*"):
if len(msg) == 0:
width = 79
else:
msg = "%s " % msg
width = 79 - len(msg)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (msg, filler)
def timestamp(self):
if self.current is not None:
self.stats[self.current]['time'] = time.time() - self.stats[self.current]['time']
def tasktime():
global tn
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secondsToStr(time.time() - tn)
time_total_elapsed = secondsToStr(time.time() - t0)
tn = time.time()
return filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed))
class CallbackModule(CallbackBase):
"""
This callback module provides per-task timing, ongoing playbook elapsed time
and ordered list of top 20 longest running tasks at end.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'profile_tasks'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.stats = collections.OrderedDict()
self.current = None
self.sort_order = os.getenv('PROFILE_TASKS_SORT_ORDER', True)
self.task_output_limit = os.getenv('PROFILE_TASKS_TASK_OUTPUT_LIMIT', 20)
if self.sort_order == 'ascending':
self.sort_order = False
if self.task_output_limit == 'all':
self.task_output_limit = None
else:
self.task_output_limit = int(self.task_output_limit)
super(CallbackModule, self).__init__()
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
timestamp(self)
# Record the start time of the current task
self.current = task._uuid
self.stats[self.current] = {'time': time.time(), 'name': task.get_name()}
if self._display.verbosity >= 2:
self.stats[self.current][ 'path'] = task.get_path()
def v2_playbook_on_task_start(self, task, is_conditional):
self._record_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display.display(filled("", fchar="="))
timestamp(self)
results = self.stats.items()
# Sort the tasks by the specified sort
if self.sort_order != 'none':
results = sorted(
self.stats.items(),
key=lambda x:x[1]['time'],
reverse=self.sort_order,
)
# Display the number of tasks specified or the default of 20
results = results[:self.task_output_limit]
# Print the timings
for uuid, result in results:
msg=u"{0:-<70}{1:->9}".format(result['name'] + u' ',u' {0:.02f}s'.format(result['time']))
if 'path' in result:
msg += u"\n{0:-<79}".format(result['path'] + u' ')
self._display.display(msg)
|
burnash/skype4py | refs/heads/master | examples/callfriend.py | 18 | #!python
# ---------------------------------------------------------------------------------------------
# Python / Skype4Py example that takes a skypename from command line parameter,
# checks if that skypename is in contact list and if yes then starts a call to that skypename.
#
# Tested with Skype4Py version 0.9.28.2 and Skype verson 3.5.0.214
import sys
import Skype4Py
# This variable will get its actual value in OnCall handler
CallStatus = 0
# Here we define a set of call statuses that indicate a call has been either aborted or finished
CallIsFinished = set ([Skype4Py.clsFailed, Skype4Py.clsFinished, Skype4Py.clsMissed, Skype4Py.clsRefused, Skype4Py.clsBusy, Skype4Py.clsCancelled]);
def AttachmentStatusText(status):
return skype.Convert.AttachmentStatusToText(status)
def CallStatusText(status):
return skype.Convert.CallStatusToText(status)
# This handler is fired when status of Call object has changed
def OnCall(call, status):
global CallStatus
CallStatus = status
print 'Call status: ' + CallStatusText(status)
# This handler is fired when Skype attatchment status changes
def OnAttach(status):
print 'API attachment status: ' + AttachmentStatusText(status)
if status == Skype4Py.apiAttachAvailable:
skype.Attach()
# Let's see if we were started with a command line parameter..
try:
CmdLine = sys.argv[1]
except:
print 'Missing command line parameter'
sys.exit()
# Creating Skype object and assigning event handlers..
skype = Skype4Py.Skype()
skype.OnAttachmentStatus = OnAttach
skype.OnCallStatus = OnCall
# Starting Skype if it's not running already..
if not skype.Client.IsRunning:
print 'Starting Skype..'
skype.Client.Start()
# Attatching to Skype..
print 'Connecting to Skype..'
skype.Attach()
# Checking if what we got from command line parameter is present in our contact list
Found = False
for F in skype.Friends:
if F.Handle == CmdLine:
Found = True
print 'Calling ' + F.Handle + '..'
skype.PlaceCall(CmdLine)
break
if not Found:
print 'Call target not found in contact list'
sys.exit()
# Loop until CallStatus gets one of "call terminated" values in OnCall handler
while not CallStatus in CallIsFinished:
pass
|
rabipanda/tensorflow | refs/heads/master | tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble_test.py | 92 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the structural state space ensembles."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.timeseries.python.timeseries import estimators
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import structural_ensemble
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class StructuralEnsembleEstimatorTests(test.TestCase):
def simple_data(self, sample_every, dtype, period, num_samples, num_features):
time = sample_every * numpy.arange(num_samples)
noise = numpy.random.normal(
scale=0.01, size=[num_samples, num_features])
values = noise + numpy.sin(
numpy.arange(num_features)[None, ...]
+ time[..., None] / float(period) * 2.0 * numpy.pi).astype(
dtype.as_numpy_dtype)
return {TrainEvalFeatures.TIMES: numpy.reshape(time, [1, -1]),
TrainEvalFeatures.VALUES: numpy.reshape(
values, [1, -1, num_features])}
def dry_run_train_helper(
self, sample_every, period, num_samples, model_type, model_args,
num_features=1):
numpy.random.seed(1)
dtype = dtypes.float32
features = self.simple_data(
sample_every, dtype=dtype, period=period, num_samples=num_samples,
num_features=num_features)
model = model_type(
configuration=(
state_space_model.StateSpaceModelConfiguration(
num_features=num_features,
dtype=dtype,
covariance_prior_fn=lambda _: 0.)),
**model_args)
class _RunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 4
estimator = estimators.StateSpaceRegressor(model, config=_RunConfig())
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), num_threads=1, shuffle_seed=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
estimator.train(input_fn=train_input_fn, max_steps=1)
first_evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
estimator.train(input_fn=train_input_fn, max_steps=3)
second_evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(second_evaluation["loss"], first_evaluation["loss"])
def test_structural_multivariate(self):
self.dry_run_train_helper(
sample_every=3,
period=5,
num_samples=100,
num_features=3,
model_type=structural_ensemble.StructuralEnsemble,
model_args={
"periodicities": 2,
"moving_average_order": 2,
"autoregressive_order": 1
})
def test_exogenous_input(self):
"""Test that no errors are raised when using exogenous features."""
dtype = dtypes.float64
times = [1, 2, 3, 4, 5, 6]
values = [[0.01], [5.10], [5.21], [0.30], [5.41], [0.50]]
feature_a = [["off"], ["on"], ["on"], ["off"], ["on"], ["off"]]
sparse_column_a = feature_column.sparse_column_with_keys(
column_name="feature_a", keys=["on", "off"])
one_hot_a = layers.one_hot_column(sparse_id_column=sparse_column_a)
regressor = estimators.StructuralEnsembleRegressor(
periodicities=[],
num_features=1,
moving_average_order=0,
exogenous_feature_columns=[one_hot_a],
dtype=dtype)
features = {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values,
"feature_a": feature_a}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features),
window_size=6, batch_size=1)
regressor.train(input_fn=train_input_fn, steps=1)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
evaluation = regressor.evaluate(input_fn=eval_input_fn, steps=1)
predict_input_fn = input_pipeline.predict_continuation_input_fn(
evaluation, times=[[7, 8, 9]],
exogenous_features={"feature_a": [[["on"], ["off"], ["on"]]]})
regressor.predict(input_fn=predict_input_fn)
def test_no_periodicity(self):
"""Test that no errors are raised when periodicites is None."""
dtype = dtypes.float64
times = [1, 2, 3, 4, 5, 6]
values = [[0.01], [5.10], [5.21], [0.30], [5.41], [0.50]]
regressor = estimators.StructuralEnsembleRegressor(
periodicities=None,
num_features=1,
moving_average_order=0,
dtype=dtype)
features = {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features),
window_size=6, batch_size=1)
regressor.train(input_fn=train_input_fn, steps=1)
eval_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(features))
evaluation = regressor.evaluate(input_fn=eval_input_fn, steps=1)
predict_input_fn = input_pipeline.predict_continuation_input_fn(
evaluation, times=[[7, 8, 9]])
regressor.predict(input_fn=predict_input_fn)
if __name__ == "__main__":
test.main()
|
stanta/darfchain | refs/heads/master | ICO/ico/state.py | 1 | from enum import IntEnum
class CrowdsaleState(IntEnum):
"""Match Crowdsale.State in the contract."""
Unknown = 0
Preparing = 1
PreFunding = 2
Funding = 3
Success = 4
Failure = 5
Finalized = 6
Refunding = 7
class UpgradeState(IntEnum):
"""Match UpgradeAgentEnabledToken.State in the contract."""
Unknown = 0
NotAllowed = 1
WaitingForAgent = 2
ReadyToUpgrade = 3
Upgrading = 4
|
telminov/ansible-manager | refs/heads/master | core/views/rest.py | 1 | from django.db.models import Count
from django.http import HttpResponse
from rest_framework.generics import ListAPIView
from rest_framework.views import APIView
from rest_framework.authentication import TokenAuthentication
from prometheus_client import generate_latest
from core import models
from core import serializers
from core import consts
tokenBearer = TokenAuthentication
tokenBearer.keyword = 'Bearer'
class TaskLogs(ListAPIView):
model = models.TaskLog
serializer_class = serializers.TaskLogSerializer
def get_queryset(self):
last_log_id = self.request.GET.get('last_log_id', 0)
return self.model.objects.filter(task_id=self.kwargs['task_id'], id__gt=last_log_id)
task_logs = TaskLogs.as_view()
class DjangoMetrics(APIView):
authentication_classes = (tokenBearer,)
def get(self, request):
result = generate_latest().decode()
return HttpResponse(result, content_type='text/plain; charset=utf-8')
class AnsibleManagerMetrics(APIView):
authentication_classes = (tokenBearer,)
def get(self, request):
result = '# HELP ansible_manager_template_last_task_success show success or fail last task\n'
result += '# TYPE ansible_manager_template_last_task_success gauge\n'
for template in models.TaskTemplate.objects.exclude(cron=''):
completed_tasks = template.tasks.filter(status__in=consts.NOT_RUN_STATUSES)
if not completed_tasks:
continue
success = int(completed_tasks.last().status == consts.COMPLETED)
result += 'ansible_manager_template_last_task_success{id="%s", name="%s"} %s\n' % (
template.pk, template.name, success)
result += '# HELP ansible_manager_tasks_completed_total show number of completed tasks\n'
result += '# TYPE ansible_manager_tasks_completed_total gauge\n'
tasks = models.Task.objects.values_list('template__id', 'template__name', 'status').annotate(count=Count('id'))
for template_id, template_name, status, count in tasks:
result += 'ansible_manager_tasks_completed_total{id="%s", name="%s", status="%s"} %s\n' % (
template_id, template_name, status, count
)
return HttpResponse(result, content_type='text/plain; charset=utf-8')
|
amyshi188/osf.io | refs/heads/develop | admin/pre_reg/utils.py | 21 |
SORT_BY = {
'initiator': 'initiator',
'n_initiator': '-initiator',
'title': 'title',
'n_title': '-title',
'date': 'date',
'n_date': '-date',
'state': 'state',
'n_state': '-state',
}
def sort_drafts(query_set, order_by):
if order_by == SORT_BY['date']:
return sorted(
query_set,
key=lambda d: d.approval.initiation_date
)
elif order_by == SORT_BY['state']:
return sorted(
query_set,
key=lambda d: d.approval.state,
)
elif order_by == SORT_BY['n_state']:
return sorted(
query_set,
key=lambda d: d.approval.state,
reverse=True
)
else:
return sorted(
query_set,
key=lambda d: d.approval.initiation_date,
reverse=True
)
|
ZyqGitHub1/lms | refs/heads/master | myapp/admin.py | 13848 | from django.contrib import admin
# Register your models here.
|
denis-pitul/django | refs/heads/master | django/dispatch/__init__.py | 758 | """Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) http://pypi.python.org/pypi/PyDispatcher/2.0.1
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from django.dispatch.dispatcher import Signal, receiver # NOQA
|
mgyenik/micropython | refs/heads/master | tests/bench/funcall-3-funcall-local.py | 101 | # Function call overhead test
# Perform the same trivial operation as calling function, cached in a
# local variable. This is commonly known optimization for overly dynamic
# languages (the idea is to cut on symbolic look up overhead, as local
# variables are accessed by offset, not by name)
import bench
def f(x):
return x + 1
def test(num):
f_ = f
for i in iter(range(num)):
a = f_(i)
bench.run(test)
|
moutai/scikit-learn | refs/heads/master | examples/linear_model/plot_omp.py | 385 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
|
siggame/server | refs/heads/master | util.py | 1 | import re
def command(function):
function.is_command = True
return function
def is_command(function):
return getattr(function, 'is_command', False)
#Copied from http://stackoverflow.com/a/1176023/1430838
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def uncamel(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
|
asshatter/community-detect | refs/heads/master | community_detect/__init__.py | 1 | from community_detect.community_detect import Community |
sushant354/judis-re | refs/heads/master | utils.py | 2 | import os
import datetime
import string
import sys
import urllib
import urlparse
import subprocess
import types
from xml.sax import saxutils
from xml.parsers.expat import ExpatError
from xml.dom import minidom, Node
import re
import tempfile
import logging
import codecs
import magic
import calendar
import math
import random
import time
from HTMLParser import HTMLParser, HTMLParseError
from bs4 import BeautifulSoup, NavigableString, Tag
monthre = 'january|february|march|april|may|june|july|august|september|october|november|december|frbruary|februay'
descriptiveDateRe = re.compile('(?P<day>\d+)\s*(st|nd|rd|th)?\s*(?P<month>%s)[\s.,]+(?P<year>\d+)' % monthre, flags=re.IGNORECASE)
digitsDateRe = re.compile('(?P<day>\d+)\s*[/. -]\s*(?P<month>\d+)\s*[/. -]\s*(?P<year>\d+)')
def month_to_num(month):
count = 0
month = month.lower()
if month in ['frbruary', 'februay']:
month = 'february'
for mth in calendar.month_name:
if mth.lower() == month:
return count
count += 1
return None
def datestr_to_obj(text):
text = text.encode('ascii', 'ignore')
reobj = descriptiveDateRe.search(text)
dateobj = None
day = month = year = None
if reobj:
groups = reobj.groupdict()
year = int(groups['year'])
month = month_to_num(groups['month'])
day = int(groups['day'])
else:
reobj = digitsDateRe.search(text)
if reobj:
groups = reobj.groupdict()
year = int(groups['year'])
month = int(groups['month'])
day = int(groups['day'])
if day and month and year:
if year in [20111, 20141, 20110]:
year = 2011
try:
dateobj = datetime.datetime(year, month, day)
except ValueError:
dateobj = None
return dateobj
def parse_xml(xmlpage):
try:
d = minidom.parseString(xmlpage)
except ExpatError:
d = None
return d
def get_node_value(xmlNodes):
value = []
ignoreValues = ['\n']
for node in xmlNodes:
if node.nodeType == Node.TEXT_NODE:
if node.data not in ignoreValues:
value.append(node.data)
return u''.join(value)
def check_next_page(tr, pagenum):
links = tr.findAll('a')
if len(links) <= 0:
return False, None
for link in links:
contents = get_tag_contents(link)
if not contents:
continue
contents = contents.strip()
if not re.match('[\d.]+$', contents):
return False, None
pageblock = True
nextlink = None
for link in links:
contents = get_tag_contents(link)
try:
val = string.atoi(contents)
except ValueError:
continue
if val == pagenum + 1 and link.get('href'):
nextlink = {'href': link.get('href'), 'title': '%d' % val}
break
return pageblock, nextlink
def parse_webpage(webpage):
try:
d = BeautifulSoup(webpage, 'html5lib')
return d
except:
return None
def date_to_xml(dateobj):
datedict = {}
datedict['day'] = dateobj.day
datedict['month'] = dateobj.month
datedict['year'] = dateobj.year
return datedict
def print_tag_file(filepath, feature):
filehandle = codecs.open(filepath, 'w', 'utf8')
filehandle.write(u'<?xml version="1.0" encoding="utf-8"?>\n')
filehandle.write(obj_to_xml('document', feature))
filehandle.close()
def obj_to_xml(tagName, obj):
if type(obj) in types.StringTypes:
return get_xml_tag(tagName, obj)
tags = ['<%s>' % tagName]
ks = obj.keys()
ks.sort()
for k in ks:
newobj = obj[k]
if type(newobj) == types.DictType:
tags.append(obj_to_xml(k, newobj))
elif type(newobj) == types.ListType:
for o in newobj:
tags.append(obj_to_xml(k, o))
else:
tags.append(get_xml_tag(k, obj[k]))
tags.append(u'</%s>' % tagName)
xmltags = u'\n'.join(tags)
return xmltags
def get_xml_tag(tagName, tagValue, escape = True):
if type(tagValue) == types.IntType:
xmltag = u'<%s>%d</%s>' % (tagName, tagValue, tagName)
elif type(tagValue) == types.FloatType:
xmltag = u'<%s>%f</%s>' % (tagName, tagValue, tagName)
else:
if escape:
tagValue = escape_xml(tagValue)
xmltag = u'<%s>%s</%s>' % (tagName, tagValue, tagName)
return xmltag
def escape_xml(tagvalue):
return saxutils.escape(tagvalue)
def url_to_filename(url, catchpath, catchquery):
htuple = urlparse.urlparse(url)
path = htuple[2]
words = []
if catchpath:
pathwords = string.split(path, '/')
words.extend(pathwords)
if catchquery:
qs = string.split(htuple[4], '&')
qdict = {}
for q in qs:
x = string.split(q, '=')
if len(x) == 2:
qdict[x[0]] = x[1]
for q in catchquery:
if qdict.has_key(q):
words.append(qdict[q])
if words:
wordlist = []
for word in words:
word = string.replace(word, '/', '_')
wordlist.append(word)
filename = string.join(wordlist, '_')
return filename
return None
def get_tag_contents(tag):
retval = ''
for content in tag.contents:
if type(content) == NavigableString:
retval += content
elif type(content) == Tag:
retval += ' ' + get_tag_contents(content)
return retval
def tag_contents_without_recurse(tag):
contents = []
for content in tag.contents:
if type(content) == NavigableString:
contents.append(content)
return contents
def mk_dir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def pad_zero(t):
if t < 10:
tstr = '0%d' % t
else:
tstr = '%d' % t
return tstr
def dateobj_to_str(dateobj, sep, reverse = False):
if reverse:
return '%s%s%s%s%s' % (pad_zero(dateobj.year), sep, \
pad_zero(dateobj.month), sep, pad_zero(dateobj.day))
else:
return '%s%s%s%s%s' % (pad_zero(dateobj.day), sep, \
pad_zero(dateobj.month), sep, pad_zero(dateobj.year))
class CourtParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.links = []
self.link = None
self.linktitle = ''
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if len(attr) > 1 and attr[0] == 'href':
self.link = attr[1]
def handle_data(self, data):
if self.link != None:
self.linktitle += data
def handle_endtag(self, tag):
if tag == 'a':
linktitle = string.strip(self.linktitle)
self.links.append((linktitle, self.link))
self.link = None
self.linktitle = ''
def feed_webpage(self, webpage):
try:
self.feed(webpage)
except HTMLParseError, e:
print >> sys.stderr, 'Malformed HTML: %s' % e
return False
return True
def get_petitioner_respondent(title):
reobj = re.search(r'\b(vs|versus|v/s|v s)\b', title, re.IGNORECASE)
petitioner = None
respondent = None
if reobj:
if reobj.start() > 0:
petitioner = title[:reobj.start()]
petitioner = u' '.join(petitioner.split())
petitioner = petitioner.strip('.,:')
if reobj.end() < len(title) - 1:
respondent = title[reobj.end():]
respondent = u' '.join(respondent.split())
respondent = respondent.strip('.,:')
return petitioner, respondent
def save_file(filepath, buf):
h = open(filepath, 'wb')
h.write(buf)
h.close()
class BaseCourt:
def __init__(self, name, rawdir, metadir, statdir, updateMeta):
self.hostname = None
self.name = name
self.rawdir = rawdir
self.metadir = metadir
self.updateMeta = updateMeta
if self.name == 'judis.nic.in':
loggername = 'supremecourt'
else:
loggername = self.name
self.logger = logging.getLogger(u'crawler.%s' % loggername)
mk_dir(self.rawdir)
mk_dir(self.metadir)
self.maxretries = 3
self.wgetlog = os.path.join(statdir, '%s-wget.log' % self.name)
self.useragent = 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.10) Gecko/2009051719 Gentoo Firefox/3.0.10'
self.PETITIONER = 'petitioner'
self.RESPONDENT = 'respondent'
self.DATE = 'date'
self.CASENO = 'caseno'
self.randomObj = random.Random()
def sync(self, fromdate, todate):
newdownloads = []
dirname = os.path.join (self.rawdir, self.name)
mk_dir(dirname)
dirname = os.path.join (self.metadir, self.name)
mk_dir(dirname)
while fromdate <= todate:
dateobj = fromdate.date()
tmprel = os.path.join (self.name, dateobj.__str__())
datedir = os.path.join (self.rawdir, tmprel)
mk_dir(datedir)
datedir = os.path.join (self.metadir, tmprel)
mk_dir(datedir)
self.logger.info(u'Date %s' % dateobj)
dls = self.download_oneday(tmprel, dateobj)
newdownloads.extend(dls)
fromdate += datetime.timedelta(days=1)
return newdownloads
def download_url(self, url, loadcookies = None, savecookies = None, \
postdata = None, referer = None, stderr = None, \
srvresponse = None, encodepost= True, headers = None):
arglist = [\
'/usr/bin/wget', '--output-document', '-', \
'--tries=%d' % self.maxretries, \
'--user-agent=%s' % self.useragent, \
]
if srvresponse:
arglist.append('-S')
if not stderr:
arglist.extend(['-a', self.wgetlog])
if loadcookies:
arglist.extend(['--load-cookies', loadcookies])
elif savecookies:
arglist.extend(['--keep-session-cookies', \
'--save-cookies', savecookies])
#sleepDuration = math.fabs(self.randomObj.normalvariate(2, 2))
#time.sleep(sleepDuration)
if postdata:
if encodepost:
encodedData = urllib.urlencode(postdata)
else:
encodedData = postdata
if len(encodedData) > 100*1000:
postfile = tempfile.NamedTemporaryFile()
postfile.write(encodedData)
postfile.flush()
arglist.extend(['--post-file', postfile.name])
else:
arglist.extend(['--post-data', encodedData])
if referer:
arglist.extend(['--referer', referer])
if self.logger.getEffectiveLevel() <= logging.DEBUG:
arglist.append('--debug')
if headers:
for hdr in headers:
arglist.append('--header')
arglist.append(hdr)
arglist.append(url)
if stderr:
p = subprocess.Popen(arglist, stdout = subprocess.PIPE, \
stderr = subprocess.PIPE)
out, err = p.communicate()
return out, err
else:
p = subprocess.Popen(arglist, stdout = subprocess.PIPE)
webpage = p.communicate()[0]
return webpage
def save_judgment(self, relurl, judgeurl, metainfo, cookiefile = None):
filepath = os.path.join(self.rawdir, relurl)
metapath = os.path.join(self.metadir, relurl)
if not os.path.exists(filepath):
if cookiefile:
doc = self.download_url(judgeurl, \
loadcookies = cookiefile)
else:
doc = self.download_url(judgeurl)
if doc:
save_file(filepath, doc)
self.logger.info(u'Saved rawfile %s' % relurl)
if metainfo and os.path.exists(filepath) and \
(self.updateMeta or not os.path.exists(metapath)):
print_tag_file(metapath, metainfo)
self.logger.info(u'Saved metainfo %s' % relurl)
if os.path.exists(filepath):
return relurl
else:
return None
def get_file_type(filepath):
m = magic.open(magic.MAGIC_MIME)
#m = magic.open(magic.MIME_TYPE)
m.load()
mtype = m.file(filepath)
m.close()
return mtype
def get_buffer_type(buffer):
m = magic.open(magic.MAGIC_MIME)
#m = magic.open(magic.MIME_TYPE)
m.load()
mtype = m.buffer(buffer)
m.close()
return mtype
|
adamdeprince/python-citibike-data | refs/heads/master | citibike/download.py | 1 | """Download original citibike data.
This application downloads the original citibike data and reencodes it
from a zip to a bz2 file."""
from pipes import quote
from tempfile import TemporaryFile
from distutils.dir_util import mkpath
from os import environ, makedirs, unlink
from os.path import join, exists
import errno
import requests
from bz2file import BZ2File
from zipfile import ZipFile
import gflags
from progressbar import ProgressBar
import citibike.reader
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('year', 2013, 'Year of first file to download')
gflags.DEFINE_integer('month', 7, 'Month of first file to download')
gflags.DEFINE_string(
'filename',
'%(year)04d%(month)02d-citibike-tripdata.zip',
'Template for building citibike filename/url')
gflags.DEFINE_string(
'url',
'https://s3.amazonaws.com/tripdata/%(filename)s',
'URL template from which to download')
gflags.DEFINE_string(
'csv',
'%(year)04d-%(month)02d - Citi Bike trip data.csv',
'Name of CSV file in zip file')
gflags.DEFINE_string(
'compressed_csv',
'%(year)04d-%(month)02d - Citi Bike trip data.csv.bz2',
'Name of bz2 compressed CSV file')
gflags.DEFINE_string('chunk_size', 2 ** 20, 'Download chunk size')
def month_counter(year=None, month=None):
year = year or FLAGS.year
month = month or FLAGS.month
while True:
while month <= 12:
yield year, month
month += 1
month = 1
year += 1
def path_csv_and_urls(year=None, month=None, filename=None, url=None):
for year, month in month_counter(year, month):
filename = FLAGS.filename % vars()
url = FLAGS.url % vars()
path = join(FLAGS.cache, FLAGS.compressed_csv % vars())
csv = FLAGS.csv % vars()
yield path, csv, url
def download(target=None):
target = target or FLAGS.cache
mkpath(target)
print "Starting download"
for path, csv, url in path_csv_and_urls():
if exists(path):
print("Skipping %s, %s exists" % (url, path))
continue
req = requests.get(url, stream=True)
if req.status_code != 200:
return req.status_code
print("Downloading: %(url)s " % vars())
with TemporaryFile() as tf:
l = 0
size = int(req.headers['content-length'])
pb = ProgressBar(size).start()
for chunk in req.iter_content(FLAGS.chunk_size):
l += len(chunk)
pb.update(l)
tf.write(chunk)
tf.flush()
tf.seek(0)
pb.finish()
print("Recompressing to: " + quote(path))
size = (
i.file_size for i in ZipFile(tf).infolist() if i.filename == csv).next()
l = 0
try:
with ZipFile(tf).open(csv) as csv_file:
pb = ProgressBar(size).start()
with BZ2File(path, "w") as target_file:
while True:
chunk = csv_file.read(FLAGS.chunk_size)
if not chunk:
break
l += len(chunk)
pb.update(l)
target_file.write(chunk)
except KeyboardInterrupt:
unlink(path)
break
finally:
pb.finish()
def main(argv):
try:
argv = FLAGS(argv)[1:]
except (gflags.FlagsError, KeyError, IndexError) as e:
sys.stderr.write("%s\nUsage: %s \n%s\n" % (
e, os.path.basename(sys.argv[0]), FLAGS))
return 1
download()
|
mapzen/vector-datasource | refs/heads/master | test/test_yaml_rules.py | 2 | from unittest import TestCase
class YamlRulesTest(TestCase):
def test_all_kinds(self):
"""
Test that we've enumerated all the possible values for kind and
kind_detail in the YAML files.
"""
from vectordatasource.meta import find_yaml_path
from vectordatasource.meta.kinds import parse_all_kinds
import os.path
yaml_path = find_yaml_path()
sort_rank_path = os.path.join(
os.path.split(yaml_path)[0], 'spreadsheets', 'sort_rank')
# should be able to execute this without throwing an exception.
all_kinds = parse_all_kinds(yaml_path, sort_rank_path, True)
# and we should get some data back
self.assertTrue(all_kinds)
|
eyohansa/django | refs/heads/master | tests/staticfiles_tests/storage.py | 332 | from datetime import datetime
from django.contrib.staticfiles.storage import CachedStaticFilesStorage
from django.core.files import storage
class DummyStorage(storage.Storage):
"""
A storage class that does implement modified_time() but raises
NotImplementedError when calling
"""
def _save(self, name, content):
return 'dummy'
def delete(self, name):
pass
def exists(self, name):
pass
def modified_time(self, name):
return datetime.date(1970, 1, 1)
class SimpleCachedStaticFilesStorage(CachedStaticFilesStorage):
def file_hash(self, name, content=None):
return 'deploy12345'
|
dcelasun/thrift | refs/heads/master | test/crossrunner/test.py | 17 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import multiprocessing
import os
import sys
from .compat import path_join
from .util import merge_dict, domain_socket_path
class TestProgram(object):
def __init__(self, kind, name, protocol, transport, socket, workdir, stop_signal, command, env=None,
extra_args=[], extra_args2=[], join_args=False, **kwargs):
self.kind = kind
self.name = name
self.protocol = protocol
self.transport = transport
self.socket = socket
self.workdir = workdir
self.stop_signal = stop_signal
self.command = None
self._base_command = self._fix_cmd_path(command)
if env:
self.env = copy.copy(os.environ)
self.env.update(env)
else:
self.env = os.environ
self._extra_args = extra_args
self._extra_args2 = extra_args2
self._join_args = join_args
def _fix_cmd_path(self, cmd):
# if the arg is a file in the current directory, make it path
def abs_if_exists(arg):
p = path_join(self.workdir, arg)
return p if os.path.exists(p) else arg
if cmd[0] == 'python':
cmd[0] = sys.executable
else:
cmd[0] = abs_if_exists(cmd[0])
return cmd
def _socket_args(self, socket, port):
return {
'ip-ssl': ['--ssl'],
'domain': ['--domain-socket=%s' % domain_socket_path(port)],
'abstract': ['--abstract-namespace', '--domain-socket=%s' % domain_socket_path(port)],
}.get(socket, None)
def _transport_args(self, transport):
return {
'zlib': ['--zlib'],
}.get(transport, None)
def build_command(self, port):
cmd = copy.copy(self._base_command)
args = copy.copy(self._extra_args2)
args.append('--protocol=' + self.protocol)
args.append('--transport=' + self.transport)
transport_args = self._transport_args(self.transport)
if transport_args:
args += transport_args
socket_args = self._socket_args(self.socket, port)
if socket_args:
args += socket_args
args.append('--port=%d' % port)
if self._join_args:
cmd.append('%s' % " ".join(args))
else:
cmd.extend(args)
if self._extra_args:
cmd.extend(self._extra_args)
self.command = cmd
return self.command
class TestEntry(object):
def __init__(self, testdir, server, client, delay, timeout, **kwargs):
self.testdir = testdir
self._log = multiprocessing.get_logger()
self._config = kwargs
self.protocol = kwargs['protocol']
self.transport = kwargs['transport']
self.socket = kwargs['socket']
srv_dict = self._fix_workdir(merge_dict(self._config, server))
cli_dict = self._fix_workdir(merge_dict(self._config, client))
cli_dict['extra_args2'] = srv_dict.pop('remote_args', [])
srv_dict['extra_args2'] = cli_dict.pop('remote_args', [])
self.server = TestProgram('server', **srv_dict)
self.client = TestProgram('client', **cli_dict)
self.delay = delay
self.timeout = timeout
self._name = None
# results
self.success = None
self.as_expected = None
self.returncode = None
self.expired = False
self.retry_count = 0
def _fix_workdir(self, config):
key = 'workdir'
path = config.get(key, None)
if not path:
path = self.testdir
if os.path.isabs(path):
path = os.path.realpath(path)
else:
path = os.path.realpath(path_join(self.testdir, path))
config.update({key: path})
return config
@classmethod
def get_name(cls, server, client, protocol, transport, socket, *args, **kwargs):
return '%s-%s_%s_%s-%s' % (server, client, protocol, transport, socket)
@property
def name(self):
if not self._name:
self._name = self.get_name(
self.server.name, self.client.name, self.protocol, self.transport, self.socket)
return self._name
@property
def transport_name(self):
return '%s-%s' % (self.transport, self.socket)
def test_name(server, client, protocol, transport, socket, **kwargs):
return TestEntry.get_name(server['name'], client['name'], protocol, transport, socket)
|
bharath2020/SmartHome | refs/heads/master | temperature_server.py | 1 | #!/usr/bin/env python3
from flask import Flask, request, render_template
import os
import json
import time
import datetime
from smarthomemongo import SmartHomeDB
app = Flask(__name__)
smartDB = SmartHomeDB()
@app.route('/')
def index():
records = smartDB.getCurrentStats('raspberry')
if( 'timestamp' in records.keys() ):
ts = datetime.datetime.fromtimestamp(records['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
records['timestamp_string'] = ts
return render_template('index.html',records=records)
@app.route('/add', methods=['POST'])
def add():
recordJson = request.get_json()
smartDB.insertTemperaturePoint(recordJson)
return 'Success', 200
@app.route('/update_stats', methods=['POST'])
def update_stats():
recordJson = request.get_json()
smartDB.updateCurrentStats(recordJson)
return 'Success', 200
@app.route('/get_current_stats',methods=['GET'])
def get_current_stats():
record = smartDB.getCurrentStats('raspberry')
return json.dumps(record)
@app.route('/line_graph')
def get_line_graph():
return render_template('graph.html')
@app.route('/data.csv')
def get_data_csv():
records = smartDB.getTemperaturePoints()
return json.dumps(records)
@app.route('/upload_test1', methods=['POST'])
def upload_test():
recordJson = request.get_json()
smartDB.upload_collection(recordJson)
return 'Success', 200
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
|
tanayseven/Voix | refs/heads/master | flask/lib/python2.7/site-packages/whoosh/support/levenshtein.py | 64 | """
Contains functions implementing edit distance algorithms.
"""
from whoosh.compat import xrange
def levenshtein(seq1, seq2, limit=None):
"""Returns the Levenshtein edit distance between two strings.
"""
oneago = None
thisrow = range(1, len(seq2) + 1) + [0]
for x in xrange(len(seq1)):
# Python lists wrap around for negative indices, so put the
# leftmost column at the *end* of the list. This matches with
# the zero-indexed strings and saves extra calculation.
oneago, thisrow = thisrow, [0] * len(seq2) + [x + 1]
for y in xrange(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
if limit and x > limit and min(thisrow) > limit:
return limit + 1
return thisrow[len(seq2) - 1]
def damerau_levenshtein(seq1, seq2, limit=None):
"""Returns the Damerau-Levenshtein edit distance between two strings.
"""
oneago = None
thisrow = list(range(1, len(seq2) + 1)) + [0]
for x in xrange(len(seq1)):
# Python lists wrap around for negative indices, so put the
# leftmost column at the *end* of the list. This matches with
# the zero-indexed strings and saves extra calculation.
twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in xrange(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
# This block deals with transpositions
if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]
and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):
thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)
if limit and x > limit and min(thisrow) > limit:
return limit + 1
return thisrow[len(seq2) - 1]
def relative(a, b):
"""Returns the relative distance between two strings, in the range
[0-1] where 1 means total equality.
"""
d = distance(a, b)
longer = float(max((len(a), len(b))))
shorter = float(min((len(a), len(b))))
r = ((longer - d) / longer) * (shorter / longer)
return r
distance = damerau_levenshtein
|
SOKP/external_chromium_org | refs/heads/sokp-l5.1 | tools/deep_memory_profiler/graph.py | 55 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import sys
from string import Template
_HTML_TEMPLATE = """<!DOCTYPE html>
<script src="https://www.google.com/jsapi"></script>
<script>
var all_data = $ALL_DATA;
google.load('visualization', '1', {packages:['corechart', 'table']});
google.setOnLoadCallback(drawVisualization);
function drawVisualization() {
// Apply policy 'l2' by default.
var default_policy = '$DEF_POLICY';
document.getElementById(default_policy).style.fontWeight = 'bold';
turnOn(default_policy);
}
function turnOn(policy) {
var data = google.visualization.arrayToDataTable(all_data[policy]);
var charOptions = {
title: 'DMP Graph (Policy: ' + policy + ')',
hAxis: {title: 'Timestamp', titleTextStyle: {color: 'red'}},
isStacked : true
};
var chart = new google.visualization.AreaChart(
document.getElementById('chart_div'));
chart.draw(data, charOptions);
var table = new google.visualization.Table(
document.getElementById('table_div'));
table.draw(data);
}
window.onload = function() {
var ul = document.getElementById('policies');
for (var i = 0; i < ul.children.length; ++i) {
var li = ul.children[i];
li.onclick = function() {
for (var j = 0; j < ul.children.length; ++j) {
var my_li = ul.children[j];
my_li.style.fontWeight = 'normal';
}
this.style.fontWeight = 'bold';
turnOn(this.id);
}
}
};
</script>
<style>
#policies li {
display: inline-block;
padding: 5px 10px;
}
</style>
Click to change an applied policy.
<ul id="policies">$POLICIES</ul>
<div id="chart_div" style="width: 1024px; height: 640px;"></div>
<div id="table_div" style="width: 1024px; height: 640px;"></div>
"""
def _GenerateGraph(json_data):
policies = list(json_data['policies'])
default_policy = "l2"
if default_policy not in policies:
default_policy = policies[0]
policies = "".join(map(lambda x: '<li id="'+x+'">'+x+'</li>', policies))
all_data = {}
for policy in json_data['policies']:
legends = list(json_data['policies'][policy]['legends'])
legends = ['second'] + legends[legends.index('FROM_HERE_FOR_TOTAL') + 1:
legends.index('UNTIL_HERE_FOR_TOTAL')]
data = []
for snapshot in json_data['policies'][policy]['snapshots']:
data.append([0] * len(legends))
for k, v in snapshot.iteritems():
if k in legends:
data[-1][legends.index(k)] = v
all_data[policy] = [legends] + data
print Template(_HTML_TEMPLATE).safe_substitute(
{'POLICIES': policies,
'DEF_POLICY': default_policy,
'ALL_DATA': json.dumps(all_data)})
def main(argv):
_GenerateGraph(json.load(file(argv[1], 'r')))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
freenas/samba | refs/heads/freenas/master | python/samba/tests/kcc/kcc_utils.py | 29 | # Unix SMB/CIFS implementation. Tests for samba.kcc.kcc_utils.
# Copyright (C) Andrew Bartlett 2015
#
# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.kcc.kcc_utils"""
import samba
import samba.tests
from samba.kcc.kcc_utils import *
class ScheduleTests(samba.tests.TestCase):
def test_new_connection_schedule(self):
schedule = new_connection_schedule()
self.assertIsInstance(schedule, drsblobs.schedule)
self.assertEquals(schedule.size, 188)
self.assertEquals(len(schedule.dataArray[0].slots), 168)
# OK, this is pathetic, but the rest of it looks really hard, with the
# classes all intertwingled with each other and the samdb. That is to say:
# XXX later.
|
manishpatell/erpcustomizationssaiimpex123qwe | refs/heads/master | openerp/tools/test_reports.py | 337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Helper functions for reports testing.
Please /do not/ import this file by default, but only explicitly call it
through the code of yaml tests.
"""
import openerp
import openerp.report
import openerp.tools as tools
import logging
from openerp.tools.safe_eval import safe_eval
from subprocess import Popen, PIPE
import os
import tempfile
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('openerp.tests')
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None, report_type=None):
""" Try to render a report <rname> with contents of ids
This function should also check for common pitfalls of reports.
"""
if data is None:
data = {}
if context is None:
context = {}
if rname.startswith('report.'):
rname_s = rname[7:]
else:
rname_s = rname
_test_logger.info(" - Trying %s.create(%r)", rname, ids)
res = openerp.report.render_report(cr, uid, ids, rname_s, data, context)
if not isinstance(res, tuple):
raise RuntimeError("Result of %s.create() should be a (data,format) tuple, now it is a %s" % \
(rname, type(res)))
(res_data, res_format) = res
if not res_data:
raise ValueError("Report %s produced an empty result!" % rname)
if tools.config['test_report_directory']:
file(os.path.join(tools.config['test_report_directory'], rname+ '.'+res_format), 'wb+').write(res_data)
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
if res_format == 'pdf':
if res_data[:5] != '%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
res_text = False
try:
fd, rfname = tempfile.mkstemp(suffix=res_format)
os.write(fd, res_data)
os.close(fd)
proc = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE)
stdout, stderr = proc.communicate()
res_text = tools.ustr(stdout)
os.unlink(rfname)
except Exception:
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if ('[[' in line) or ('[ [' in line):
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?
elif res_format == 'html':
pass
else:
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
return False
_test_logger.info(" + Report %s produced correctly.", rname)
return True
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
wiz_data=None, wiz_buttons=None,
context=None, our_module=None):
"""Take an ir.action.act_window and follow it until a report is produced
:param action_id: the integer id of an action, or a reference to xml id
of the act_window (can search [our_module.]+xml_id
:param active_model, active_ids: call the action as if it had been launched
from that model+ids (tree/form view action)
:param wiz_data: a dictionary of values to use in the wizard, if needed.
They will override (or complete) the default values of the
wizard form.
:param wiz_buttons: a list of button names, or button icon strings, which
should be preferred to press during the wizard.
Eg. 'OK' or 'gtk-print'
:param our_module: the name of the calling module (string), like 'account'
"""
if not our_module and isinstance(action_id, basestring):
if '.' in action_id:
our_module = action_id.split('.', 1)[0]
if context is None:
context = {}
else:
context = context.copy() # keep it local
# TODO context fill-up
registry = openerp.registry(cr.dbname)
def log_test(msg, *args):
_test_logger.info(" - " + msg, *args)
datas = {}
if active_model:
datas['model'] = active_model
if active_ids:
datas['ids'] = active_ids
if not wiz_buttons:
wiz_buttons = []
if isinstance(action_id, basestring):
if '.' in action_id:
act_module, act_xmlid = action_id.split('.', 1)
else:
if not our_module:
raise ValueError('You cannot only specify action_id "%s" without a module name' % action_id)
act_module = our_module
act_xmlid = action_id
act_model, act_id = registry['ir.model.data'].get_object_reference(cr, uid, act_module, act_xmlid)
else:
assert isinstance(action_id, (long, int))
act_model = 'ir.action.act_window' # assume that
act_id = action_id
act_xmlid = '<%s>' % act_id
def _exec_action(action, datas, context):
# taken from client/modules/action/main.py:84 _exec_action()
if isinstance(action, bool) or 'type' not in action:
return
# Updating the context : Adding the context of action in order to use it on Views called from buttons
if datas.get('id',False):
context.update( {'active_id': datas.get('id',False), 'active_ids': datas.get('ids',[]), 'active_model': datas.get('model',False)})
context1 = action.get('context', {})
if isinstance(context1, basestring):
context1 = safe_eval(context1, context.copy())
context.update(context1)
if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']:
for key in ('res_id', 'res_model', 'view_type', 'view_mode',
'limit', 'auto_refresh', 'search_view', 'auto_search', 'search_view_id'):
datas[key] = action.get(key, datas.get(key, None))
view_id = False
if action.get('views', []):
if isinstance(action['views'],list):
view_id = action['views'][0][0]
datas['view_mode']= action['views'][0][1]
else:
if action.get('view_id', False):
view_id = action['view_id'][0]
elif action.get('view_id', False):
view_id = action['view_id'][0]
assert datas['res_model'], "Cannot use the view without a model"
# Here, we have a view that we need to emulate
log_test("will emulate a %s view: %s#%s",
action['view_type'], datas['res_model'], view_id or '?')
view_res = registry[datas['res_model']].fields_view_get(cr, uid, view_id, action['view_type'], context)
assert view_res and view_res.get('arch'), "Did not return any arch for the view"
view_data = {}
if view_res.get('fields',{}).keys():
view_data = registry[datas['res_model']].default_get(cr, uid, view_res['fields'].keys(), context)
if datas.get('form'):
view_data.update(datas.get('form'))
if wiz_data:
view_data.update(wiz_data)
_logger.debug("View data is: %r", view_data)
for fk, field in view_res.get('fields',{}).items():
# Default fields returns list of int, while at create()
# we need to send a [(6,0,[int,..])]
if field['type'] in ('one2many', 'many2many') \
and view_data.get(fk, False) \
and isinstance(view_data[fk], list) \
and not isinstance(view_data[fk][0], tuple) :
view_data[fk] = [(6, 0, view_data[fk])]
action_name = action.get('name')
try:
from xml.dom import minidom
cancel_found = False
buttons = []
dom_doc = minidom.parseString(view_res['arch'])
if not action_name:
action_name = dom_doc.documentElement.getAttribute('name')
for button in dom_doc.getElementsByTagName('button'):
button_weight = 0
if button.getAttribute('special') == 'cancel':
cancel_found = True
continue
if button.getAttribute('icon') == 'gtk-cancel':
cancel_found = True
continue
if button.getAttribute('default_focus') == '1':
button_weight += 20
if button.getAttribute('string') in wiz_buttons:
button_weight += 30
elif button.getAttribute('icon') in wiz_buttons:
button_weight += 10
string = button.getAttribute('string') or '?%s' % len(buttons)
buttons.append( { 'name': button.getAttribute('name'),
'string': string,
'type': button.getAttribute('type'),
'weight': button_weight,
})
except Exception, e:
_logger.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
raise AssertionError(e.args[0])
if not datas['res_id']:
# it is probably an orm_memory object, we need to create
# an instance
datas['res_id'] = registry[datas['res_model']].create(cr, uid, view_data, context)
if not buttons:
raise AssertionError("view form doesn't have any buttons to press!")
buttons.sort(key=lambda b: b['weight'])
_logger.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
res = None
while buttons and not res:
b = buttons.pop()
log_test("in the \"%s\" form, I will press the \"%s\" button.", action_name, b['string'])
if not b['type']:
log_test("the \"%s\" button has no type, cannot use it", b['string'])
continue
if b['type'] == 'object':
#there we are! press the button!
fn = getattr(registry[datas['res_model']], b['name'])
if not fn:
_logger.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
continue
res = fn(cr, uid, [datas['res_id'],], context)
break
else:
_logger.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
action_name, b['string'], b['type'])
return res
elif action['type']=='ir.actions.report.xml':
if 'window' in datas:
del datas['window']
if not datas:
datas = action.get('datas')
if not datas:
datas = action.get('data')
datas = datas.copy()
ids = datas.get('ids')
if 'ids' in datas:
del datas['ids']
res = try_report(cr, uid, 'report.'+action['report_name'], ids, datas, context, our_module=our_module)
return res
else:
raise Exception("Cannot handle action of type %s" % act_model)
log_test("will be using %s action %s #%d", act_model, act_xmlid, act_id)
action = registry[act_model].read(cr, uid, [act_id], context=context)[0]
assert action, "Could not read action %s[%s]" %(act_model, act_id)
loop = 0
while action:
loop += 1
# This part tries to emulate the loop of the Gtk client
if loop > 100:
_logger.error("Passed %d loops, giving up", loop)
raise Exception("Too many loops at action")
log_test("it is an %s action at loop #%d", action.get('type', 'unknown'), loop)
result = _exec_action(action, datas, context)
if not isinstance(result, dict):
break
datas = result.get('datas', {})
if datas:
del result['datas']
action = result
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
IndonesiaX/configuration | refs/heads/master | util/vpc-tools/vpc_dns.py | 71 | #!/usr/bin/env python -u
#
# Updates DNS records for a stack
#
# Example usage:
#
# # update route53 entries for ec2 and rds instances
# # in the vpc with stack-name "stage-stack" and
# # create DNS entries in the example.com hosted
# # zone
#
# python vpc_dns.py -s stage-stack -z example.com
#
# # same thing but just print what will be done without
# # making any changes
#
# python vpc_dns.py -n -s stage-stack -z example.com
#
# # Create a new zone "vpc.example.com", update the parent
# # zone "example.com"
#
# python vpc_dns.py -s stage-stack -z vpc.example.com
#
import argparse
import boto
import datetime
from vpcutil import vpc_for_stack_name
import xml.dom.minidom
import sys
# These are ELBs that we do not want to create dns entries
# for because the instances attached to them are also in
# other ELBs and we want the env-deploy-play tuple which makes
# up the dns name to be unique
ELB_BAN_LIST = [
'Apros',
]
# If the ELB name has the key in its name these plays
# will be used for the DNS CNAME tuple. This is used for
# commoncluster.
ELB_PLAY_MAPPINGS = {
'RabbitMQ': 'rabbitmq',
'Xqueue': 'xqueue',
'Elastic': 'elasticsearch',
}
class DNSRecord():
def __init__(self, zone, record_name, record_type,
record_ttl, record_values):
self.zone = zone
self.record_name = record_name
self.record_type = record_type
self.record_ttl = record_ttl
self.record_values = record_values
def add_or_update_record(dns_records):
"""
Creates or updates a DNS record in a hosted route53
zone
"""
change_set = boto.route53.record.ResourceRecordSets()
record_names = set()
for record in dns_records:
status_msg = """
record_name: {}
record_type: {}
record_ttl: {}
record_values: {}
""".format(record.record_name, record.record_type,
record.record_ttl, record.record_values)
if args.noop:
print("Would have updated DNS record:\n{}".format(status_msg))
else:
print("Updating DNS record:\n{}".format(status_msg))
if record.record_name in record_names:
print("Unable to create record for {} with value {} because one already exists!".format(
record.record_values, record.record_name))
sys.exit(1)
record_names.add(record.record_name)
zone_id = record.zone.Id.replace("/hostedzone/", "")
records = r53.get_all_rrsets(zone_id)
old_records = {r.name[:-1]: r for r in records}
# If the record name already points to something.
# Delete the existing connection. If the record has
# the same type and name skip it.
if record.record_name in old_records.keys():
if record.record_name + "." == old_records[record.record_name].name and \
record.record_type == old_records[record.record_name].type:
print("Record for {} already exists and is identical, skipping.\n".format(
record.record_name))
continue
if args.force:
print("Deleting record:\n{}".format(status_msg))
change = change_set.add_change(
'DELETE',
record.record_name,
record.record_type,
record.record_ttl)
else:
raise RuntimeError(
"DNS record exists for {} and force was not specified.".
format(record.record_name))
for value in old_records[record.record_name].resource_records:
change.add_value(value)
change = change_set.add_change(
'CREATE',
record.record_name,
record.record_type,
record.record_ttl)
for value in record.record_values:
change.add_value(value)
if args.noop:
print("Would have submitted the following change set:\n")
else:
print("Submitting the following change set:\n")
xml_doc = xml.dom.minidom.parseString(change_set.to_xml())
print(xml_doc.toprettyxml(newl='')) # newl='' to remove extra newlines
if not args.noop:
r53.change_rrsets(zone_id, change_set.to_xml())
def get_or_create_hosted_zone(zone_name):
"""
Creates the zone and updates the parent
with the NS information in the zone
returns: created zone
"""
zone = r53.get_hosted_zone_by_name(zone_name)
parent_zone_name = ".".join(zone_name.split('.')[1:])
parent_zone = r53.get_hosted_zone_by_name(parent_zone_name)
if args.noop:
if parent_zone:
print("Would have created/updated zone: {} parent: {}".format(
zone_name, parent_zone_name))
else:
print("Would have created/updated zone: {}".format(
zone_name, parent_zone_name))
return zone
if not zone:
print("zone {} does not exist, creating".format(zone_name))
ts = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%SUTC')
zone = r53.create_hosted_zone(
zone_name, comment="Created by vpc_dns script - {}".format(ts))
if parent_zone:
print("Updating parent zone {}".format(parent_zone_name))
dns_records = set()
dns_records.add(DNSRecord(parent_zone, zone_name, 'NS', 900, zone.NameServers))
add_or_update_record(dns_records)
return zone
def get_security_group_dns(group_name):
# stage-edx-RabbitMQELBSecurityGroup-YB8ZKIZYN1EN
environment, deployment, sec_group, salt = group_name.split('-')
play = sec_group.replace("ELBSecurityGroup", "").lower()
return environment, deployment, play
def get_dns_from_instances(elb):
for inst in elb.instances:
try:
instance = ec2_con.get_all_instances(
instance_ids=[inst.id])[0].instances[0]
except IndexError:
print("instance {} attached to elb {}".format(inst, elb))
sys.exit(1)
try:
env_tag = instance.tags['environment']
deployment_tag = instance.tags['deployment']
if 'play' in instance.tags:
play_tag = instance.tags['play']
else:
# deprecated, for backwards compatibility
play_tag = instance.tags['role']
break # only need the first instance for tag info
except KeyError:
print("Instance {}, attached to elb {} does not "
"have a tag for environment, play or deployment".format(inst, elb))
sys.exit(1)
return env_tag, deployment_tag, play_tag
def update_elb_rds_dns(zone):
"""
Creates elb and rds CNAME records
in a zone for args.stack_name.
Uses the tags of the instances attached
to the ELBs to create the dns name
"""
dns_records = set()
vpc_id = vpc_for_stack_name(args.stack_name, args.aws_id, args.aws_secret)
if not zone and args.noop:
# use a placeholder for zone name
# if it doesn't exist
zone_name = "<zone name>"
else:
zone_name = zone.Name[:-1]
stack_elbs = [elb for elb in elb_con.get_all_load_balancers()
if elb.vpc_id == vpc_id]
for elb in stack_elbs:
env_tag, deployment_tag, play_tag = get_dns_from_instances(elb)
# Override the play tag if a substring of the elb name
# is in ELB_PLAY_MAPPINGS
for key in ELB_PLAY_MAPPINGS.keys():
if key in elb.name:
play_tag = ELB_PLAY_MAPPINGS[key]
break
fqdn = "{}-{}-{}.{}".format(env_tag, deployment_tag, play_tag, zone_name)
# Skip over ELBs if a substring of the ELB name is in
# the ELB_BAN_LIST
if any(name in elb.name for name in ELB_BAN_LIST):
print("Skipping {} because it is on the ELB ban list".format(elb.name))
continue
dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [elb.dns_name]))
stack_rdss = [rds for rds in rds_con.get_all_dbinstances()
if hasattr(rds.subnet_group, 'vpc_id') and
rds.subnet_group.vpc_id == vpc_id]
# TODO the current version of the RDS API doesn't support
# looking up RDS instance tags. Hence, we are using the
# env_tag and deployment_tag that was set via the loop over instances above.
rds_endpoints = set()
for rds in stack_rdss:
endpoint = stack_rdss[0].endpoint[0]
fqdn = "{}-{}-{}.{}".format(env_tag, deployment_tag, 'rds', zone_name)
# filter out rds instances with the same endpoints (multi-AZ)
if endpoint not in rds_endpoints:
dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [endpoint]))
rds_endpoints.add(endpoint)
add_or_update_record(dns_records)
if __name__ == "__main__":
description = """
Give a cloudformation stack name, for an edx stack, setup
DNS names for the ELBs in the stack
DNS entries will be created with the following format
<environment>-<deployment>-<play>.edx.org
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-s', '--stack-name', required=True,
help="The name of the cloudformation stack.")
parser.add_argument('-n', '--noop',
help="Don't make any changes.", action="store_true",
default=False)
parser.add_argument('-z', '--zone-name', default="edx.org",
help="The name of the zone under which to "
"create the dns entries.")
parser.add_argument('-f', '--force',
help="Force reuse of an existing name in a zone",
action="store_true", default=False)
parser.add_argument('--aws-id', default=None,
help="read only aws key for fetching instance information"
"the account you wish add entries for")
parser.add_argument('--aws-secret', default=None,
help="read only aws id for fetching instance information for"
"the account you wish add entries for")
args = parser.parse_args()
# Connect to ec2 using the provided credentials on the commandline
ec2_con = boto.connect_ec2(args.aws_id, args.aws_secret)
elb_con = boto.connect_elb(args.aws_id, args.aws_secret)
rds_con = boto.connect_rds(args.aws_id, args.aws_secret)
# Connect to route53 using the user's .boto file
r53 = boto.connect_route53()
zone = get_or_create_hosted_zone(args.zone_name)
update_elb_rds_dns(zone)
|
Bachaco-ve/odoo | refs/heads/8.0 | addons/account/wizard/account_chart.py | 271 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_chart(osv.osv_memory):
"""
For Chart of Accounts
"""
_name = "account.chart"
_description = "Account chart"
_columns = {
'fiscalyear': fields.many2one('account.fiscalyear', \
'Fiscal year', \
help='Keep empty for all open fiscal years'),
'period_from': fields.many2one('account.period', 'Start period'),
'period_to': fields.many2one('account.period', 'End period'),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_fiscalyear(self, cr, uid, context=None):
"""Return default Fiscalyear value"""
return self.pool.get('account.fiscalyear').find(cr, uid, context=context)
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear_id=False, context=None):
res = {}
if fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
ORDER BY p.date_start ASC, p.special DESC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period}
else:
res['value'] = {'period_from': False, 'period_to': False}
return res
def account_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
period_obj = self.pool.get('account.period')
fy_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
fiscalyear_id = data.get('fiscalyear', False) and data['fiscalyear'][0] or False
result['periods'] = []
if data['period_from'] and data['period_to']:
period_from = data.get('period_from', False) and data['period_from'][0] or False
period_to = data.get('period_to', False) and data['period_to'][0] or False
result['periods'] = period_obj.build_ctx_periods(cr, uid, period_from, period_to)
result['context'] = str({'fiscalyear': fiscalyear_id, 'periods': result['periods'], \
'state': data['target_move']})
if fiscalyear_id:
result['name'] += ':' + fy_obj.read(cr, uid, [fiscalyear_id], context=context)[0]['code']
return result
_defaults = {
'target_move': 'posted',
'fiscalyear': _get_fiscalyear,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
joyxu/kernelci-backend | refs/heads/master | app/tests/__init__.py | 1 | # Copyright (C) 2014 Linaro Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests suite for kernel-ci-backend."""
import unittest
def test_modules():
return [
"handlers.tests.test_batch_handler",
"handlers.tests.test_bisect_handler",
"handlers.tests.test_boot_handler",
"handlers.tests.test_boot_trigger_handler",
"handlers.tests.test_count_handler",
"handlers.tests.test_defconf_handler",
"handlers.tests.test_handler_response",
"handlers.tests.test_handlers_common",
"handlers.tests.test_job_handler",
"handlers.tests.test_lab_handler",
"handlers.tests.test_report_handler",
"handlers.tests.test_send_handler",
"handlers.tests.test_test_case_handler",
"handlers.tests.test_test_set_handler",
"handlers.tests.test_test_suite_handler",
"handlers.tests.test_token_handler",
"handlers.tests.test_upload_handler",
"handlers.tests.test_version_handler",
"models.tests.test_bisect_model",
"models.tests.test_boot_model",
"models.tests.test_defconfig_model",
"models.tests.test_error_log_model",
"models.tests.test_error_summary_model",
"models.tests.test_job_model",
"models.tests.test_lab_model",
"models.tests.test_report_model",
"models.tests.test_test_case_model",
"models.tests.test_test_set_model",
"models.tests.test_test_suite_model",
"models.tests.test_token_model",
"utils.batch.tests.test_batch_common",
"utils.bisect.tests.test_bisect",
"utils.report.tests.test_boot_report",
"utils.report.tests.test_build_report",
"utils.report.tests.test_report_common",
"utils.tests.test_base",
"utils.tests.test_bootimport",
"utils.tests.test_docimport",
"utils.tests.test_log_parser",
"utils.tests.test_tests_import",
"utils.tests.test_upload",
"utils.tests.test_validator"
]
def test_suite():
"""Create a unittest.TestSuite object."""
modules = test_modules()
suite = unittest.TestSuite()
test_loader = unittest.TestLoader()
for name in modules:
unit_suite = test_loader.loadTestsFromName(name)
suite.addTests(unit_suite)
return suite
|
OpenNetworkingFoundation/Snowmass-ONFOpenTransport | refs/heads/develop | RI/flask_server/tapi_server/models/tapi_oam_pm_threshold_data.py | 4 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_local_class import TapiCommonLocalClass # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_common_time_period import TapiCommonTimePeriod # noqa: F401,E501
from tapi_server import util
class TapiOamPmThresholdData(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, name=None, local_id=None, is_transient=False, granularity_period=None): # noqa: E501
"""TapiOamPmThresholdData - a model defined in OpenAPI
:param name: The name of this TapiOamPmThresholdData. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param local_id: The local_id of this TapiOamPmThresholdData. # noqa: E501
:type local_id: str
:param is_transient: The is_transient of this TapiOamPmThresholdData. # noqa: E501
:type is_transient: bool
:param granularity_period: The granularity_period of this TapiOamPmThresholdData. # noqa: E501
:type granularity_period: TapiCommonTimePeriod
"""
self.openapi_types = {
'name': List[TapiCommonNameAndValue],
'local_id': str,
'is_transient': bool,
'granularity_period': TapiCommonTimePeriod
}
self.attribute_map = {
'name': 'name',
'local_id': 'local-id',
'is_transient': 'is-transient',
'granularity_period': 'granularity-period'
}
self._name = name
self._local_id = local_id
self._is_transient = is_transient
self._granularity_period = granularity_period
@classmethod
def from_dict(cls, dikt) -> 'TapiOamPmThresholdData':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.PmThresholdData of this TapiOamPmThresholdData. # noqa: E501
:rtype: TapiOamPmThresholdData
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this TapiOamPmThresholdData.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiOamPmThresholdData.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiOamPmThresholdData.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiOamPmThresholdData.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def local_id(self):
"""Gets the local_id of this TapiOamPmThresholdData.
none # noqa: E501
:return: The local_id of this TapiOamPmThresholdData.
:rtype: str
"""
return self._local_id
@local_id.setter
def local_id(self, local_id):
"""Sets the local_id of this TapiOamPmThresholdData.
none # noqa: E501
:param local_id: The local_id of this TapiOamPmThresholdData.
:type local_id: str
"""
self._local_id = local_id
@property
def is_transient(self):
"""Gets the is_transient of this TapiOamPmThresholdData.
none # noqa: E501
:return: The is_transient of this TapiOamPmThresholdData.
:rtype: bool
"""
return self._is_transient
@is_transient.setter
def is_transient(self, is_transient):
"""Sets the is_transient of this TapiOamPmThresholdData.
none # noqa: E501
:param is_transient: The is_transient of this TapiOamPmThresholdData.
:type is_transient: bool
"""
self._is_transient = is_transient
@property
def granularity_period(self):
"""Gets the granularity_period of this TapiOamPmThresholdData.
:return: The granularity_period of this TapiOamPmThresholdData.
:rtype: TapiCommonTimePeriod
"""
return self._granularity_period
@granularity_period.setter
def granularity_period(self, granularity_period):
"""Sets the granularity_period of this TapiOamPmThresholdData.
:param granularity_period: The granularity_period of this TapiOamPmThresholdData.
:type granularity_period: TapiCommonTimePeriod
"""
self._granularity_period = granularity_period
|
kynikos/wiki-scripts | refs/heads/master | tests/with_api/test_site.py | 2 | #! /usr/bin/env python3
import pytest
from ws.client.api import LoginFailed
# TODO: pytest attribute
#@attr(speed="slow")
@pytest.mark.skip(reason="The api fixture was removed.")
class test_site:
"""
Tests intended mostly for detecting changes in the ArchWiki configuration.
"""
props_data = {
"general": {
"mainpage": "Main page",
"base": "https://wiki.archlinux.org/index.php/Main_page",
"sitename": "ArchWiki",
"logo": "https://wiki.archlinux.org/skins/ArchLinux/archlogo.png",
"generator": "MediaWiki 1.29.0",
"phpversion": "7.1.8",
"phpsapi": "fpm-fcgi",
"dbtype": "mysql",
"dbversion": "5.7.18-16-log",
"imagewhitelistenabled": "",
"langconversion": "",
"titleconversion": "",
"linkprefixcharset": "",
"linkprefix": "",
"linktrail": "/^([a-z]+)(.*)$/sD",
"legaltitlechars": " %!\"$&'()*,\\-.\\/0-9:;=?@A-Z\\\\^_`a-z~\\x80-\\xFF+",
"invalidusernamechars": "@:",
"case": "first-letter",
"allcentralidlookupproviders": ["local"],
"centralidlookupprovider": "local",
"lang": "en",
"fallback": [],
"fallback8bitEncoding": "windows-1252",
"fixarabicunicode": "",
"fixmalayalamunicode": "",
"writeapi": "",
"timezone": "UTC",
"timeoffset": 0,
"articlepath": "/index.php/$1",
"scriptpath": "",
"script": "/index.php",
"variantarticlepath": False,
"server": "https://wiki.archlinux.org",
"servername": "wiki.archlinux.org",
"wikiid": "archwiki",
"maxarticlesize": 2097152,
"magiclinks": [],
"interwikimagic": "",
"uploadsenabled": "",
"maxuploadsize": 104857600,
"minuploadchunksize": 1024,
"thumblimits": [
120,
150,
180,
200,
250,
300
],
"imagelimits": [
{
"width": 320,
"height": 240
},
{
"width": 640,
"height": 480
},
{
"width": 800,
"height": 600
},
{
"width": 1024,
"height": 768
},
{
"width": 1280,
"height": 1024
}
],
"favicon": "https://wiki.archlinux.org/favicon.ico",
"galleryoptions": { # MW 1.29
"captionLength": "",
"imageHeight": 120,
"imageWidth": 120,
"imagesPerRow": 0,
"mode": "traditional",
"showBytes": "",
},
},
"usergroups": [
{
"name": "*",
"rights": [
"createaccount",
"read",
"createpage",
"createtalk",
"editmyusercss",
"editmyuserjs",
"viewmywatchlist",
"editmywatchlist",
"viewmyprivateinfo",
"editmyprivateinfo",
"editmyoptions",
"abusefilter-log-detail",
"abusefilter-view",
"abusefilter-log"
]
},
{
"name": "user",
"rights": [
"move",
"move-subpages",
"move-rootuserpages",
"move-categorypages",
"movefile",
"read",
"edit",
"createpage",
"createtalk",
"minoredit",
"purge",
"sendemail",
"applychangetags",
"changetags",
"editcontentmodel",
]
},
{
"name": "autoconfirmed",
"rights": [
"autoconfirmed",
"editsemiprotected",
"writeapi"
]
},
{
"name": "bot",
"rights": [
"bot",
"autoconfirmed",
"editsemiprotected",
"nominornewtalk",
"autopatrol",
"suppressredirect",
"apihighlimits",
"writeapi"
]
},
{
"name": "sysop",
"rights": [
"block",
"createaccount",
"delete",
"bigdelete",
"deletedhistory",
"deletedtext",
"undelete",
"editinterface",
"editusercss",
"edituserjs",
"import",
"importupload",
"move",
"move-subpages",
"move-rootuserpages",
"move-categorypages",
"patrol",
"autopatrol",
"protect",
"editprotected",
"rollback",
"upload",
"reupload",
"reupload-shared",
"unwatchedpages",
"autoconfirmed",
"editsemiprotected",
"ipblock-exempt",
"blockemail",
"markbotedits",
"apihighlimits",
"browsearchive",
"noratelimit",
"movefile",
"unblockself",
"suppressredirect",
"mergehistory",
"managechangetags",
"deletechangetags",
"deleterevision",
"writeapi",
"abusefilter-modify",
"abusefilter-private",
"abusefilter-modify-restricted",
"abusefilter-revert",
"checkuser",
"checkuser-log",
"interwiki",
"nuke",
]
},
{
"name": "bureaucrat",
"rights": [
"userrights",
"noratelimit"
]
},
{
"name": "maintainer",
"rights": [
"autopatrol",
"patrol",
"noratelimit",
"suppressredirect",
"rollback",
"browsearchive",
"apihighlimits",
"unwatchedpages",
"deletedhistory",
"deletedtext",
"writeapi"
]
},
{
"name": "checkuser",
"rights": [
"checkuser",
"checkuser-log"
]
}
],
"extensions": [
{
"type": "other",
"name": "FunnyQuestion",
"description": "Challenge-response authentication",
"author": "Pierre Schmitz",
"url": "https://pierre-schmitz.com/",
"version": "2.4"
},
{
"type": "other",
"name": "MobileFrontend",
"descriptionmsg": "mobile-frontend-desc",
"author": "Patrick Reilly, Max Semenik, Jon Robson, Arthur Richards, Brion Vibber, Juliusz Gonera, Ryan Kaldari, Florian Schmidt, Rob Moen, Sam Smith",
"url": "https://www.mediawiki.org/wiki/Extension:MobileFrontend",
"version": "1.0.0",
"license-name": "GPL-2.0+",
"license": "/index.php/Special:Version/License/MobileFrontend",
"credits": "/index.php/Special:Version/Credits/MobileFrontend"
},
{
"type": "skin",
"name": "ArchLinux",
"description": "MediaWiki skin based on MonoBook",
"author": "Pierre Schmitz",
"url": "https://www.archlinux.org",
"license-name": "GPL-2.0+",
"license": "/index.php/Special:Version/License/ArchLinux"
},
{
"type": "specialpage",
"name": "Nuke",
"descriptionmsg": "nuke-desc",
"author": "Brion Vibber, Jeroen De Dauw",
"url": "https://www.mediawiki.org/wiki/Extension:Nuke",
"version": "1.3.0",
"license-name": "GPL-2.0+",
"license": "/index.php/Special:Version/License/Nuke"
},
{
"type": "specialpage",
"name": "CheckUser",
"descriptionmsg": "checkuser-desc",
"author": "Tim Starling, Aaron Schulz",
"url": "https://www.mediawiki.org/wiki/Extension:CheckUser",
"version": "2.4",
"license-name": "GPL-2.0+",
"license": "/index.php/Special:Version/License/CheckUser"
},
{
"type": "specialpage",
"name": "Interwiki",
"descriptionmsg": "interwiki-desc",
"author": "Stephanie Amanda Stevens, Alexandre Emsenhuber, Robin Pepermans, Siebrand Mazeland, Platonides, Raimond Spekking, Sam Reed, Jack Phoenix, Calimonius the Estrange, ...",
"url": "https://www.mediawiki.org/wiki/Extension:Interwiki",
"version": "3.1 20160307",
"license-name": "GPL-2.0+",
"license": "/index.php/Special:Version/License/Interwiki"
},
{
"type": "antispam",
"name": "Abuse Filter",
"descriptionmsg": "abusefilter-desc",
"author": "Andrew Garrett, River Tarnell, Victor Vasiliev, Marius Hoch",
"url": "https://www.mediawiki.org/wiki/Extension:AbuseFilter",
"license-name": "GPL-2.0+",
"license": "/index.php/Special:Version/License/Abuse_Filter"
},
{
"type": "parserhook",
"name": "ParserFunctions",
"descriptionmsg": "pfunc_desc",
"author": "Tim Starling, Robert Rohde, Ross McClure, Juraj Simlovic",
"url": "https://www.mediawiki.org/wiki/Extension:ParserFunctions",
"version": "1.6.0",
"license-name": "GPL-2.0+",
"license": "/index.php/Special:Version/License/ParserFunctions"
}
],
"fileextensions": [
{
"ext": "png"
},
{
"ext": "gif"
},
{
"ext": "jpg"
},
{
"ext": "jpeg"
},
{
"ext": "webp"
},
],
"rightsinfo": {
"url": "",
"text": "GNU Free Documentation License 1.3 or later"
},
"restrictions": {
"types": [
"create",
"edit",
"move",
"upload"
],
"levels": [
"",
"autoconfirmed",
"sysop"
],
"cascadinglevels": [
"sysop"
],
"semiprotectedlevels": [
"autoconfirmed"
]
},
"skins": [
{
"code": "archlinux",
"default": "",
"*": "ArchLinux"
},
{
"code": "minerva",
"*": "Minerva"
},
{
"code": "fallback",
"unusable": "",
"*": "Fallback"
},
{
"code": "apioutput",
"unusable": "",
"*": "ApiOutput"
}
],
"extensiontags": [
"<pre>",
"<nowiki>",
"<gallery>",
"<indicator>"
],
"protocols": [
"bitcoin:",
"ftp://",
"ftps://",
"geo:",
"git://",
"gopher://",
"http://",
"https://",
"irc://",
"ircs://",
"magnet:",
"mailto:",
"mms://",
"news:",
"nntp://",
"redis://",
"sftp://",
"sip:",
"sips:",
"sms:",
"ssh://",
"svn://",
"tel:",
"telnet://",
"urn:",
"worldwind://",
"xmpp:",
"//"
],
"defaultoptions": {
"ccmeonemails": 0,
"cols": 80,
"date": "default",
"diffonly": 0,
"disablemail": 0,
"editfont": "default",
"editondblclick": 0,
"editsectiononrightclick": 0,
"enotifminoredits": 0,
"enotifrevealaddr": 0,
"enotifusertalkpages": 1,
"enotifwatchlistpages": 1,
"extendwatchlist": 1,
"fancysig": 0,
"forceeditsummary": 1,
"gender": "unknown",
"hideminor": 0,
"hidecategorization": 1, # MW 1.28
"hidepatrolled": 0,
"imagesize": 2,
"math": 1,
"minordefault": 0,
"newpageshidepatrolled": 0,
"nickname": "",
"norollbackdiff": 0,
"numberheadings": 0,
"previewonfirst": 0,
"previewontop": 1,
"rcdays": 7,
"rcenhancedfilters": 0, # MW 1.29
"rclimit": 50,
"rows": 25,
"showhiddencats": 0,
"shownumberswatching": 1,
"showtoolbar": 1,
"skin": "archlinux",
"stubthreshold": 0,
"thumbsize": 5,
"timecorrection": "System|0", # MW 1.28
"underline": 2,
"uselivepreview": 0,
"usenewrc": 1,
"watchcreations": 1,
"watchdefault": 1,
"watchdeletion": 0,
"watchlistdays": 3,
"watchlisthideanons": 0,
"watchlisthidebots": 0,
"watchlisthidecategorization": 1, # MW 1.28
"watchlisthideliu": 0,
"watchlisthideminor": 0,
"watchlisthideown": 0,
"watchlisthidepatrolled": 0,
"watchlistreloadautomatically": 0, # MW 1.28
"watchmoves": 0,
"watchrollback": 0,
"watchuploads": 1, # MW 1.28
"wllimit": 250,
"useeditwarning": 1,
"prefershttps": 1,
"language": "en",
"variant-gan": "gan",
"variant-iu": "iu",
"variant-kk": "kk",
"variant-ku": "ku",
"variant-shi": "shi",
"variant-sr": "sr",
"variant-tg": "tg",
"variant-uz": "uz",
"variant-zh": "zh",
"searchNs0": True,
},
"namespaces": {
-2: {'*': 'Media', 'canonical': 'Media', 'case': 'first-letter', 'id': -2},
-1: {'*': 'Special', 'canonical': 'Special', 'case': 'first-letter', 'id': -1},
0: {'*': '',
'case': 'first-letter',
'content': '',
'id': 0,
'subpages': ''},
1: {'*': 'Talk',
'canonical': 'Talk',
'case': 'first-letter',
'id': 1,
'subpages': ''},
2: {'*': 'User',
'canonical': 'User',
'case': 'first-letter',
'id': 2,
'subpages': ''},
3: {'*': 'User talk',
'canonical': 'User talk',
'case': 'first-letter',
'id': 3,
'subpages': ''},
4: {'*': 'ArchWiki',
'canonical': 'Project',
'case': 'first-letter',
'id': 4,
'subpages': ''},
5: {'*': 'ArchWiki talk',
'canonical': 'Project talk',
'case': 'first-letter',
'id': 5,
'subpages': ''},
6: {'*': 'File', 'canonical': 'File', 'case': 'first-letter', 'id': 6},
7: {'*': 'File talk',
'canonical': 'File talk',
'case': 'first-letter',
'id': 7,
'subpages': ''},
8: {'*': 'MediaWiki',
'canonical': 'MediaWiki',
'case': 'first-letter',
'id': 8,
'subpages': ''},
9: {'*': 'MediaWiki talk',
'canonical': 'MediaWiki talk',
'case': 'first-letter',
'id': 9,
'subpages': ''},
10: {'*': 'Template',
'canonical': 'Template',
'case': 'first-letter',
'id': 10,
'subpages': ''},
11: {'*': 'Template talk',
'canonical': 'Template talk',
'case': 'first-letter',
'id': 11,
'subpages': ''},
12: {'*': 'Help',
'canonical': 'Help',
'case': 'first-letter',
'id': 12,
'subpages': ''},
13: {'*': 'Help talk',
'canonical': 'Help talk',
'case': 'first-letter',
'id': 13,
'subpages': ''},
14: {'*': 'Category',
'canonical': 'Category',
'case': 'first-letter',
'id': 14},
15: {'*': 'Category talk',
'canonical': 'Category talk',
'case': 'first-letter',
'id': 15,
'subpages': ''}
},
"interwikimap": {
'ar': {'language': 'العربية',
'local': '',
'prefix': 'ar',
'url': 'https://wiki.archlinux.org/index.php/$1_(%D8%A7%D9%84%D8%B9%D8%B1%D8%A8%D9%8A%D8%A9)'},
'arxiv': {'prefix': 'arxiv', 'url': 'http://www.arxiv.org/abs/$1'},
'bg': {'language': 'български',
'local': '',
'prefix': 'bg',
'url': 'https://wiki.archlinux.org/index.php/$1_(%D0%91%D1%8A%D0%BB%D0%B3%D0%B0%D1%80%D1%81%D0%BA%D0%B8)'},
'commons': {'api': 'https://commons.wikimedia.org/w/api.php',
'prefix': 'commons',
'url': 'https://commons.wikimedia.org/wiki/$1'},
'cs': {'language': 'čeština',
'local': '',
'prefix': 'cs',
'url': 'https://wiki.archlinux.org/index.php/$1_(%C4%8Cesky)'},
'da': {'language': 'dansk',
'local': '',
'prefix': 'da',
'url': 'https://wiki.archlinux.org/index.php/$1_(Dansk)'},
'de': {'language': 'Deutsch',
'local': '',
'prefix': 'de',
'url': 'https://wiki.archlinux.de/title/$1'},
'debian': {'prefix': 'debian', 'url': 'https://wiki.debian.org/$1'},
'doi': {'prefix': 'doi', 'url': 'http://dx.doi.org/$1'},
'el': {'language': 'Ελληνικά',
'local': '',
'prefix': 'el',
'url': 'https://wiki.archlinux.org/index.php/$1_(%CE%95%CE%BB%CE%BB%CE%B7%CE%BD%CE%B9%CE%BA%CE%AC)'},
'emacswiki': {'prefix': 'emacswiki',
'url': 'http://www.emacswiki.org/cgi-bin/wiki.pl?$1'},
'en': {'language': 'English',
'local': '',
'prefix': 'en',
'url': 'https://wiki.archlinux.org/index.php/$1'},
'es': {'language': 'español',
'local': '',
'prefix': 'es',
'url': 'https://wiki.archlinux.org/index.php/$1_(Espa%C3%B1ol)'},
'fa': {'language': 'فارسی',
'local': '',
'prefix': 'fa',
'url': 'http://wiki.archusers.ir/index.php/$1'},
'fi': {'language': 'suomi',
'local': '',
'prefix': 'fi',
'url': 'https://wiki.archlinux.org/index.php/$1_(Suomi)'},
'foldoc': {'prefix': 'foldoc', 'url': 'http://foldoc.org/?$1'},
'fr': {'language': 'français',
'local': '',
'prefix': 'fr',
'url': 'http://wiki.archlinux.fr/$1'},
'freebsdman': {'prefix': 'freebsdman',
'url': 'http://www.freebsd.org/cgi/man.cgi?query=$1'},
'funtoo': {'api': 'http://www.funtoo.org/api.php',
'prefix': 'funtoo',
'url': 'http://www.funtoo.org/$1'},
'gentoo': {'api': 'https://wiki.gentoo.org/api.php',
'prefix': 'gentoo',
'url': 'https://wiki.gentoo.org/wiki/$1'},
'gregswiki': {'prefix': 'gregswiki', 'url': 'http://mywiki.wooledge.org/$1'},
'he': {'language': 'עברית',
'local': '',
'prefix': 'he',
'url': 'https://wiki.archlinux.org/index.php/$1_(%D7%A2%D7%91%D7%A8%D7%99%D7%AA)'},
'hr': {'language': 'hrvatski',
'local': '',
'prefix': 'hr',
'url': 'https://wiki.archlinux.org/index.php/$1_(Hrvatski)'},
'hu': {'language': 'magyar',
'local': '',
'prefix': 'hu',
'url': 'https://wiki.archlinux.org/index.php/$1_(Magyar)'},
'id': {'language': 'Bahasa Indonesia',
'local': '',
'prefix': 'id',
'url': 'https://wiki.archlinux.org/index.php/$1_(Indonesia)'},
'it': {'language': 'italiano',
'local': '',
'prefix': 'it',
'url': 'https://wiki.archlinux.org/index.php/$1_(Italiano)'},
'ja': {'language': '日本語',
'local': '',
'prefix': 'ja',
'url': 'https://wiki.archlinuxjp.org/index.php/$1'},
'ko': {'language': '한국어',
'local': '',
'prefix': 'ko',
'url': 'https://wiki.archlinux.org/index.php/$1_(%ED%95%9C%EA%B5%AD%EC%96%B4)'},
'linuxwiki': {'prefix': 'linuxwiki', 'url': 'http://linuxwiki.de/$1'},
'lqwiki': {'prefix': 'lqwiki',
'url': 'http://wiki.linuxquestions.org/wiki/$1'},
'lt': {'language': 'lietuvių',
'local': '',
'prefix': 'lt',
'url': 'https://wiki.archlinux.org/index.php/$1_(Lietuvi%C5%A1kai)'},
'meta': {'api': 'https://meta.wikimedia.org/w/api.php',
'prefix': 'meta',
'url': 'https://meta.wikimedia.org/wiki/$1'},
'metawikimedia': {'api': 'https://meta.wikimedia.org/w/api.php',
'prefix': 'metawikimedia',
'url': 'https://meta.wikimedia.org/wiki/$1'},
'mozillawiki': {'api': 'https://wiki.mozilla.org/api.php',
'prefix': 'mozillawiki',
'url': 'http://wiki.mozilla.org/$1'},
'mw': {'api': 'https://www.mediawiki.org/w/api.php',
'prefix': 'mw',
'url': 'https://www.mediawiki.org/wiki/$1'},
'nl': {'language': 'Nederlands',
'local': '',
'prefix': 'nl',
'url': 'https://wiki.archlinux.org/index.php/$1_(Nederlands)'},
'phab': {'prefix': 'phab', 'url': 'https://phabricator.wikimedia.org/$1'},
'phabricator': {'prefix': 'phabricator',
'url': 'https://phabricator.wikimedia.org/$1'},
'pl': {'language': 'polski',
'local': '',
'prefix': 'pl',
'url': 'https://wiki.archlinux.org/index.php/$1_(Polski)'},
'pmid': {'prefix': 'pmid', 'url': 'https://www.ncbi.nlm.nih.gov/pubmed/$1?dopt=Abstract'},
'pt': {'language': 'português',
'local': '',
'prefix': 'pt',
'url': 'https://wiki.archlinux.org/index.php/$1_(Portugu%C3%AAs)'},
'rfc': {'prefix': 'rfc', 'url': 'https://tools.ietf.org/html/rfc$1'},
'ro': {'language': 'română',
'local': '',
'prefix': 'ro',
'url': 'http://wiki.archlinux.ro/index.php/$1'},
'ru': {'language': 'русский',
'local': '',
'prefix': 'ru',
'url': 'https://wiki.archlinux.org/index.php/$1_(%D0%A0%D1%83%D1%81%D1%81%D0%BA%D0%B8%D0%B9)'},
'sk': {'language': 'slovenčina',
'local': '',
'prefix': 'sk',
'url': 'https://wiki.archlinux.org/index.php/$1_(Slovensk%C3%BD)'},
'sourceforge': {'prefix': 'sourceforge', 'url': 'http://sourceforge.net/$1'},
'sr': {'language': 'српски / srpski',
'local': '',
'prefix': 'sr',
'url': 'https://wiki.archlinux.org/index.php/$1_(%D0%A1%D1%80%D0%BF%D1%81%D0%BA%D0%B8)'},
'sv': {'language': 'svenska',
'local': '',
'prefix': 'sv',
'url': 'https://wiki.archlinux.org/index.php/$1_(Svenska)'},
'th': {'language': 'ไทย',
'local': '',
'prefix': 'th',
'url': 'https://wiki.archlinux.org/index.php/$1_(%E0%B9%84%E0%B8%97%E0%B8%A2)'},
'tr': {'language': 'Türkçe',
'local': '',
'prefix': 'tr',
'url': 'https://wiki.archlinux.org/index.php/$1_(T%C3%BCrk%C3%A7e)'},
'uk': {'language': 'українська',
'local': '',
'prefix': 'uk',
'url': 'https://wiki.archlinux.org/index.php/$1_(%D0%A3%D0%BA%D1%80%D0%B0%D1%97%D0%BD%D1%81%D1%8C%D0%BA%D0%B0)'},
'w': {'api': 'https://en.wikipedia.org/w/api.php',
'prefix': 'w',
'url': 'https://en.wikipedia.org/wiki/$1'},
'wikia': {'prefix': 'wikia', 'url': 'http://www.wikia.com/wiki/$1'},
'wikibooks': {'api': 'https://en.wikibooks.org/w/api.php',
'prefix': 'wikibooks',
'url': 'https://en.wikibooks.org/wiki/$1'},
'wikimedia': {'api': 'https://wikimediafoundation.org/w/api.php',
'prefix': 'wikimedia',
'url': 'https://wikimediafoundation.org/wiki/$1'},
'wikinews': {'api': 'https://en.wikinews.org/w/api.php',
'prefix': 'wikinews',
'url': 'https://en.wikinews.org/wiki/$1'},
'wikipedia': {'api': 'https://en.wikipedia.org/w/api.php',
'prefix': 'wikipedia',
'url': 'https://en.wikipedia.org/wiki/$1'},
'wikiquote': {'api': 'https://en.wikiquote.org/w/api.php',
'prefix': 'wikiquote',
'url': 'https://en.wikiquote.org/wiki/$1'},
'wikisource': {'api': 'https://wikisource.org/w/api.php',
'prefix': 'wikisource',
'url': 'https://wikisource.org/wiki/$1'},
'wikispecies': {'api': 'https://species.wikimedia.org/w/api.php',
'prefix': 'wikispecies',
'url': 'https://species.wikimedia.org/wiki/$1'},
'wikiversity': {'api': 'https://en.wikiversity.org/w/api.php',
'prefix': 'wikiversity',
'url': 'https://en.wikiversity.org/wiki/$1'},
'wikivoyage': {'api': 'https://en.wikivoyage.org/w/api.php',
'prefix': 'wikivoyage',
'url': 'https://en.wikivoyage.org/wiki/$1'},
'wikt': {'api': 'https://en.wiktionary.org/w/api.php',
'prefix': 'wikt',
'url': 'https://en.wiktionary.org/wiki/$1'},
'wiktionary': {'api': 'https://en.wiktionary.org/w/api.php',
'prefix': 'wiktionary',
'url': 'https://en.wiktionary.org/wiki/$1'},
'wmf': {'api': 'https://wikimediafoundation.org/w/api.php',
'prefix': 'wmf',
'url': 'https://wikimediafoundation.org/wiki/$1'},
'zh-hans': {'language': '中文(简体)\u200e',
'local': '',
'prefix': 'zh-hans',
'url': 'https://wiki.archlinux.org/index.php/$1_(%E7%AE%80%E4%BD%93%E4%B8%AD%E6%96%87)'},
'zh-hant': {'language': '中文(繁體)\u200e',
'local': '',
'prefix': 'zh-hant',
'url': 'https://wiki.archlinux.org/index.php/$1_(%E6%AD%A3%E9%AB%94%E4%B8%AD%E6%96%87)'}
},
}
def test_coverage(self, api):
paraminfo = api.call_api(action="paraminfo", modules="query+siteinfo")
properties = set(paraminfo["modules"][0]["parameters"][0]["type"])
assert properties == api.site.properties
@pytest.fixture(scope="class")
def api(self, api):
api.site.fetch(list(self.props_data))
return api
@pytest.mark.parametrize("propname, expected", props_data.items())
def test_props(self, api, propname, expected):
prop = getattr(api.site, propname).copy()
# FIXME: ugly hack...
if isinstance(prop, dict) and "time" in prop:
del prop["time"]
if propname == "general":
if "git-branch" in prop:
del prop["git-branch"]
if "git-hash" in prop:
del prop["git-hash"]
assert prop == expected
def test_invalid(self, api):
with pytest.raises(AttributeError):
api.site.invalid_property
def test_interlanguagemap(self, api):
external_tags = ["de", "fa", "fi", "fr", "ja", "ro", "sv", "tr"]
internal_tags = ["ar", "bg", "cs", "da", "el", "en", "es", "he", "hr", "hu", "id", "it", "ko", "lt", "nl", "pl", "pt", "ru", "sk", "sr", "th", "uk", "zh-hans", "zh-hant"]
expected = set(external_tags + internal_tags)
assert set(api.site.interlanguagemap) == expected
|
orekyuu/intellij-community | refs/heads/master | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/pgen2/pgen.py | 321 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from . import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = self.dfas.keys()
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.iteritems():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in rawfirst:
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = self.dfas.keys()
names.sort()
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.iteritems():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.iteritems():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.iteritems():
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print "Dump of NFA for", name
todo = [start]
for i, state in enumerate(todo):
print " State", i, state is finish and "(final)" or ""
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print " -> %d" % j
else:
print " %s -> %d" % (label, j)
def dump_dfa(self, name, dfa):
print "Dump of DFA for", name
for i, state in enumerate(dfa):
print " State", i, state.isfinal and "(final)" or ""
for label, next in state.arcs.iteritems():
print " %s -> %d" % (label, dfa.index(next))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = self.generator.next()
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = self.generator.next()
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + map(str, args))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(iter(nfaset).next(), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.iteritems():
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.iteritems():
if next is not other.arcs.get(label):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
|
pnichols104/python-koans | refs/heads/master | python2/koans/about_scope.py | 100 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import jims
import joes
counter = 0 # Global
class AboutScope(Koan):
#
# NOTE:
# Look in jims.py and joes.py to see definitions of Dog used
# for this set of tests
#
def test_dog_is_not_available_in_the_current_scope(self):
try:
fido = Dog()
except Exception as ex:
self.assertMatch(__, ex[0])
def test_you_can_reference_nested_classes_using_the_scope_operator(self):
fido = jims.Dog()
# name 'jims' module name is taken from jims.py filename
rover = joes.Dog()
self.assertEqual(__, fido.identify())
self.assertEqual(__, rover.identify())
self.assertEqual(____, type(fido) == type(rover))
self.assertEqual(____, jims.Dog == joes.Dog)
# ------------------------------------------------------------------
class str(object):
pass
def test_bare_bones_class_names_do_not_assume_the_current_scope(self):
self.assertEqual(____, AboutScope.str == str)
def test_nested_string_is_not_the_same_as_the_system_string(self):
self.assertEqual(____, self.str == type("HI"))
def test_str_without_self_prefix_stays_in_the_global_scope(self):
self.assertEqual(____, str == type("HI"))
# ------------------------------------------------------------------
PI = 3.1416
def test_constants_are_defined_with_an_initial_uppercase_letter(self):
self.assertAlmostEqual(_____, self.PI)
# Note, floating point numbers in python are not precise.
# assertAlmostEqual will check that it is 'close enough'
def test_constants_are_assumed_by_convention_only(self):
self.PI = "rhubarb"
self.assertEqual(_____, self.PI)
# There aren't any real constants in python. Its up to the developer
# to keep to the convention and not modify them.
# ------------------------------------------------------------------
def increment_using_local_counter(self, counter):
counter = counter + 1
def increment_using_global_counter(self):
global counter
counter = counter + 1
def test_incrementing_with_local_counter(self):
global counter
start = counter
self.increment_using_local_counter(start)
self.assertEqual(____, counter == start + 1)
def test_incrementing_with_global_counter(self):
global counter
start = counter
self.increment_using_global_counter()
self.assertEqual(____, counter == start + 1)
# ------------------------------------------------------------------
global deadly_bingo
deadly_bingo = [4, 8, 15, 16, 23, 42]
def test_global_attributes_can_be_created_in_the_middle_of_a_class(self):
self.assertEqual(__, deadly_bingo[5])
|
cfbraun/django-admintools-bootstrap | refs/heads/master | admintools_bootstrap/views.py | 6027 | # Create your views here.
|
jjiko/PokemonGo-Map | refs/heads/develop | pogom/__init__.py | 27 | #!/usr/bin/python
# -*- coding: utf-8 -*-
config = {
'LOCALE': 'en',
'LOCALES_DIR': 'static/locales',
'ROOT_PATH': None,
'ORIGINAL_LATITUDE': None,
'ORIGINAL_LONGITUDE': None,
'GMAPS_KEY': None,
'REQ_SLEEP': 1,
'REQ_HEAVY_SLEEP': 30,
'REQ_MAX_FAILED': 5
}
|
rrwen/search_google | refs/heads/master | search_google/tests/test_api_results.py | 1 | # -*- coding: utf-8 -*-
from os import listdir, remove
from os.path import isdir, isfile
from pkg_resources import resource_filename, Requirement
from shutil import rmtree
from tempfile import TemporaryFile, TemporaryDirectory
from unittest import TestCase
import json
import search_google.api
class resultsTest(TestCase):
def setUp(self):
file_path = resource_filename(Requirement.parse('search_google'), 'search_google/config.json')
with open(file_path, 'r') as in_file:
defaults = json.load(in_file)
buildargs = {
'serviceName': 'customsearch',
'version': 'v1',
'developerKey': defaults['build_developerKey']
}
cseargs = {
'q': 'google',
'num': 1,
'fileType': 'png',
'cx': defaults['cx']
}
self.results = search_google.api.results(buildargs, cseargs)
tempfile = TemporaryFile()
self.tempfile = str(tempfile.name)
tempfile.close()
self.tempdir = str(TemporaryDirectory().name)
def test_preview(self):
results = self.results
expected = None
self.assertTrue(expected == results.preview())
def test_get_values(self):
results = self.results
values = results.get_values('items', 'link')
self.assertTrue(isinstance(values, list))
def test_links(self):
results = self.results
expected = results.get_values('items', 'link')
self.assertTrue(expected == results.links)
def test_save_links(self):
results = self.results
open(self.tempfile, 'w').close()
results.save_links(self.tempfile)
with open(self.tempfile) as f:
nlinks = len(f.readlines())
self.assertTrue(nlinks == 1)
def test_save_metadata(self):
results = self.results
open(self.tempfile, 'w').close()
results.save_metadata(self.tempfile)
with open(self.tempfile, 'r') as f:
metadata = json.load(f)
self.assertTrue(metadata == results.metadata)
def test_download_links(self):
results = self.results
results.download_links(self.tempdir)
nfiles = len(listdir(self.tempdir))
rmtree(self.tempdir)
self.assertTrue(nfiles == 1)
def tearDown(self):
if isfile(self.tempfile):
remove(self.tempfile)
if isdir(self.tempdir):
rmtree(self.tempdir)
|
odoomrp/odoomrp-wip | refs/heads/8.0 | quality_control_version/models/qc_test.py | 11 | # -*- coding: utf-8 -*-
# © 2016 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import api, fields, models
class QcTest(models.Model):
_inherit = 'qc.test'
version = fields.Integer(
string='Version Number', default=0, readonly=True, copy=False)
deactivate_date = fields.Date(string='Deactivated date', readonly=True)
parent_test = fields.Many2one(
comodel_name='qc.test', string='Parent Test', copy=False)
old_versions = fields.One2many(
comodel_name='qc.test', string='Old Versions',
inverse_name='parent_test', context={'active_test': False})
unrevisioned_name = fields.Char(
string='Test Name', copy=True, readonly=True)
@api.model
def create(self, values):
if 'unrevisioned_name' not in values:
values['unrevisioned_name'] = values['name']
return super(QcTest, self).create(values)
@api.multi
def write(self, values):
for test in self:
if 'name' in values and not values.get('version', test.version):
values['unrevisioned_name'] = values['name']
return super(QcTest, test).write(values)
def _copy_test(self):
new_test = self.copy({
'version': self.version,
'active': False,
'deactivate_date': fields.Date.today(),
'parent_test': self.id,
})
return new_test
@api.multi
def button_new_version(self):
self.ensure_one()
self._copy_test()
revno = self.version
self.write({
'version': revno + 1,
'name': '%s-%02d' % (self.unrevisioned_name, revno + 1)
})
@api.multi
def action_open_older_versions(self):
result = self.env.ref('quality_control.action_qc_test').read()[0]
result['domain'] = [('id', 'in', self.old_versions.ids)]
result['context'] = {'active_test': False}
return result
|
Rouslan/NTracer | refs/heads/master | support/arg_helper.py | 1 | """Generate a header file containing a data-type for all interned strings used
in a given C++ file"""
import sys
import re
import os.path
from distutils import log
from distutils.dep_util import newer
RE_P = re.compile(r'(?<![a-zA-Z0-9_])P\(([^\)]+)\)')
RE_ACCEPTABLE = re.compile('[a-zA-Z0-9_]+')
def generate(input_file,output_file):
to_intern = set()
with open(input_file) as ifile:
for line in ifile:
to_intern.update(RE_P.findall(line))
to_intern = sorted(to_intern)
for s in to_intern:
if RE_ACCEPTABLE.fullmatch(s) is None:
raise ValueError('"{}" contains a character that can\'t be part of an indentifier'.format(s))
enum = ['_interned_string_'+s for s in to_intern]
if enum: enum[0] += ' = 0'
with open(output_file,'w') as ofile:
ofile.write('''
const char *_interned_raw_strings[] = {{
{raw}}};
enum {{
{enum}}};
struct interned_strings {{
PyObject *values[{size}];
bool init() {{
for(int i=0; i<{size}; ++i) {{
values[i] = PyUnicode_InternFromString(_interned_raw_strings[i]);
if(!values[i]) {{
for(; i>=0; --i) Py_DECREF(values[i]);
return false;
}}
}}
return true;
}}
~interned_strings() {{
for(int i=0; i<{size}; ++i) Py_DECREF(values[i]);
}}
}};
#define P(X) idata->istrings.values[_interned_string_##X]
'''.format(
size=len(to_intern),
raw=',\n'.join('"'+s+'"' for s in to_intern),
enum=',\n'.join(enum)))
def create_strings_hpp(base_dir,build_temp,ifile,force,dry_run):
ofile = os.path.join(build_temp,os.path.splitext(ifile)[0]+'_strings.hpp')
ifile = os.path.join(base_dir,'src',ifile)
if force or newer(ifile,ofile) or newer(__file__,ofile):
log.info('creating {0} from {1}'.format(ofile,ifile))
if not dry_run:
generate(ifile,ofile)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage: arg_helper.py input output',file=sys.stderr)
exit(1)
generate(*sys.argv[1:])
|
rogerwang/chromium | refs/heads/node | third_party/mesa/MesaLib/src/mapi/glapi/gen/gl_x86_asm.py | 33 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGenericStubs(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_x86_asm.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004, 2005""", "BRIAN PAUL, IBM")
return
def get_stack_size(self, f):
size = 0
for p in f.parameterIterator():
if p.is_padding:
continue
size += p.get_stack_size()
return size
def printRealHeader(self):
print '#include "x86/assyntax.h"'
print '#include "glapi/glapioffsets.h"'
print ''
print '#if defined(STDCALL_API)'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n2))'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n2))'
print '# endif'
print '#else'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n))'
print '# define _glapi_Dispatch _mglapi_Dispatch'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n))'
print '# endif'
print '#endif'
print ''
print '#define GL_OFFSET(x) CODEPTR(REGOFF(4 * x, EAX))'
print ''
print '#if defined(GNU_ASSEMBLER) && !defined(__DJGPP__) && !defined(__MINGW32__) && !defined(__APPLE__)'
print '#define GLOBL_FN(x) GLOBL x ; .type x, function'
print '#else'
print '#define GLOBL_FN(x) GLOBL x'
print '#endif'
print ''
print '#if defined(PTHREADS) || defined(WIN32_THREADS) || defined(BEOS_THREADS)'
print '# define THREADS'
print '#endif'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '#ifdef GLX_X86_READONLY_TEXT'
print '# define CTX_INSNS MOV_L(GS:(EAX), EAX)'
print '#else'
print '# define CTX_INSNS NOP /* Pad for init_glapi_relocs() */'
print '#endif'
print ''
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tCTX_INSNS ; \\'
print '\tJMP(GL_OFFSET(off))'
print ''
print '#elif defined(PTHREADS)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#elif defined(THREADS)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_glapi_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#else /* Non-threaded version. */'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#endif'
print ''
print '#ifdef HAVE_ALIAS'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print '\t.globl\tGL_PREFIX(fn, fn_alt) ;\t\t\t\\'
print '\t.set\tGL_PREFIX(fn, fn_alt), GL_PREFIX(alias, alias_alt)'
print '#else'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print ' GL_STUB(fn, off, fn_alt)'
print '#endif'
print ''
print 'SEG_TEXT'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '\tGLOBL\tGLNAME(_x86_get_dispatch)'
print '\tHIDDEN(GLNAME(_x86_get_dispatch))'
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tcall 1f'
print '1:\tpopl %eax'
print '\taddl $_GLOBAL_OFFSET_TABLE_+[.-1b], %eax'
print '\tmovl _glapi_tls_Dispatch@GOTNTPOFF(%eax), %eax'
print '\tret'
print ''
print '#elif defined(PTHREADS)'
print 'EXTERN GLNAME(_glapi_Dispatch)'
print 'EXTERN GLNAME(_gl_DispatchTSD)'
print 'EXTERN GLNAME(pthread_getspecific)'
print ''
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tSUB_L(CONST(24), ESP)'
print '\tPUSH_L(GLNAME(_gl_DispatchTSD))'
print '\tCALL(GLNAME(pthread_getspecific))'
print '\tADD_L(CONST(28), ESP)'
print '\tRET'
print '#elif defined(THREADS)'
print 'EXTERN GLNAME(_glapi_get_dispatch)'
print '#endif'
print ''
print '#if defined( GLX_USE_TLS ) && !defined( GLX_X86_READONLY_TEXT )'
print '\t\t.section\twtext, "awx", @progbits'
print '#endif /* defined( GLX_USE_TLS ) */'
print ''
print '\t\tALIGNTEXT16'
print '\t\tGLOBL GLNAME(gl_dispatch_functions_start)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_start))'
print 'GLNAME(gl_dispatch_functions_start):'
print ''
return
def printRealFooter(self):
print ''
print '\t\tGLOBL\tGLNAME(gl_dispatch_functions_end)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_end))'
print '\t\tALIGNTEXT16'
print 'GLNAME(gl_dispatch_functions_end):'
print ''
print '#if defined(GLX_USE_TLS) && defined(__linux__)'
print ' .section ".note.ABI-tag", "a"'
print ' .p2align 2'
print ' .long 1f - 0f /* name length */'
print ' .long 3f - 2f /* data length */'
print ' .long 1 /* note length */'
print '0: .asciz "GNU" /* vendor name */'
print '1: .p2align 2'
print '2: .long 0 /* note data: the ABI tag */'
print ' .long 2,4,20 /* Minimum kernel version w/TLS */'
print '3: .p2align 2 /* pad out section */'
print '#endif /* GLX_USE_TLS */'
print ''
print '#if defined (__ELF__) && defined (__linux__)'
print ' .section .note.GNU-stack,"",%progbits'
print '#endif'
return
def printBody(self, api):
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
print '\tGL_STUB(%s, _gloffset_%s, %s)' % (name, f.name, alt)
if not f.is_static_entry_point(f.name):
print '\tHIDDEN(GL_PREFIX(%s, %s))' % (name, alt)
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
for n in f.entry_points:
if f.is_static_entry_point(n):
if n != f.name:
alt2 = "%s@%u" % (n, stack)
text = '\tGL_STUB_ALIAS(%s, _gloffset_%s, %s, %s, %s)' % (n, f.name, alt2, name, alt)
if f.has_different_protocol(n):
print '#ifndef GLX_INDIRECT_RENDERING'
print text
print '#endif'
else:
print text
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
mode = "generic"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == '-m':
mode = val
elif arg == "-f":
file_name = val
if mode == "generic":
printer = PrintGenericStubs()
else:
print "ERROR: Invalid mode \"%s\" specified." % mode
show_usage()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api)
|
junhuac/MQUIC | refs/heads/master | depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/apitools/apitools/base/py/transfer_test.py | 11 | """Tests for transfer.py."""
import string
import mock
import six
from six.moves import http_client
import unittest2
from apitools.base.py import base_api
from apitools.base.py import http_wrapper
from apitools.base.py import transfer
class TransferTest(unittest2.TestCase):
def assertRangeAndContentRangeCompatible(self, request, response):
request_prefix = 'bytes='
self.assertIn('range', request.headers)
self.assertTrue(request.headers['range'].startswith(request_prefix))
request_range = request.headers['range'][len(request_prefix):]
response_prefix = 'bytes '
self.assertIn('content-range', response.info)
response_header = response.info['content-range']
self.assertTrue(response_header.startswith(response_prefix))
response_range = (
response_header[len(response_prefix):].partition('/')[0])
msg = ('Request range ({0}) not a prefix of '
'response_range ({1})').format(
request_range, response_range)
self.assertTrue(response_range.startswith(request_range), msg=msg)
def testComputeEndByte(self):
total_size = 100
chunksize = 10
download = transfer.Download.FromStream(
six.StringIO(), chunksize=chunksize, total_size=total_size)
self.assertEqual(chunksize - 1,
download._Download__ComputeEndByte(0, end=50))
def testComputeEndByteReturnNone(self):
download = transfer.Download.FromStream(six.StringIO())
self.assertIsNone(
download._Download__ComputeEndByte(0, use_chunks=False))
def testComputeEndByteNoChunks(self):
total_size = 100
download = transfer.Download.FromStream(
six.StringIO(), chunksize=10, total_size=total_size)
for end in (None, 1000):
self.assertEqual(
total_size - 1,
download._Download__ComputeEndByte(0, end=end,
use_chunks=False),
msg='Failed on end={0}'.format(end))
def testComputeEndByteNoTotal(self):
download = transfer.Download.FromStream(six.StringIO())
default_chunksize = download.chunksize
for chunksize in (100, default_chunksize):
download.chunksize = chunksize
for start in (0, 10):
self.assertEqual(
download.chunksize + start - 1,
download._Download__ComputeEndByte(start),
msg='Failed on start={0}, chunksize={1}'.format(
start, chunksize))
def testComputeEndByteSmallTotal(self):
total_size = 100
download = transfer.Download.FromStream(six.StringIO(),
total_size=total_size)
for start in (0, 10):
self.assertEqual(total_size - 1,
download._Download__ComputeEndByte(start),
msg='Failed on start={0}'.format(start))
def testGetRange(self):
for (start_byte, end_byte) in [(0, 25), (5, 15), (0, 0), (25, 25)]:
bytes_http = object()
http = object()
download_stream = six.StringIO()
download = transfer.Download.FromStream(download_stream,
total_size=26,
auto_transfer=False)
download.bytes_http = bytes_http
base_url = 'https://part.one/'
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as make_request:
make_request.return_value = http_wrapper.Response(
info={
'content-range': 'bytes %d-%d/26' %
(start_byte, end_byte),
'status': http_client.OK,
},
content=string.ascii_lowercase[start_byte:end_byte+1],
request_url=base_url,
)
request = http_wrapper.Request(url='https://part.one/')
download.InitializeDownload(request, http=http)
download.GetRange(start_byte, end_byte)
self.assertEqual(1, make_request.call_count)
received_request = make_request.call_args[0][1]
self.assertEqual(base_url, received_request.url)
self.assertRangeAndContentRangeCompatible(
received_request, make_request.return_value)
def testNonChunkedDownload(self):
bytes_http = object()
http = object()
download_stream = six.StringIO()
download = transfer.Download.FromStream(download_stream, total_size=52)
download.bytes_http = bytes_http
base_url = 'https://part.one/'
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as make_request:
make_request.return_value = http_wrapper.Response(
info={
'content-range': 'bytes 0-51/52',
'status': http_client.OK,
},
content=string.ascii_lowercase * 2,
request_url=base_url,
)
request = http_wrapper.Request(url='https://part.one/')
download.InitializeDownload(request, http=http)
self.assertEqual(1, make_request.call_count)
received_request = make_request.call_args[0][1]
self.assertEqual(base_url, received_request.url)
self.assertRangeAndContentRangeCompatible(
received_request, make_request.return_value)
download_stream.seek(0)
self.assertEqual(string.ascii_lowercase * 2,
download_stream.getvalue())
def testChunkedDownload(self):
bytes_http = object()
http = object()
download_stream = six.StringIO()
download = transfer.Download.FromStream(
download_stream, chunksize=26, total_size=52)
download.bytes_http = bytes_http
# Setting autospec on a mock with an iterable side_effect is
# currently broken (http://bugs.python.org/issue17826), so
# instead we write a little function.
def _ReturnBytes(unused_http, http_request,
*unused_args, **unused_kwds):
url = http_request.url
if url == 'https://part.one/':
return http_wrapper.Response(
info={
'content-location': 'https://part.two/',
'content-range': 'bytes 0-25/52',
'status': http_client.PARTIAL_CONTENT,
},
content=string.ascii_lowercase,
request_url='https://part.one/',
)
elif url == 'https://part.two/':
return http_wrapper.Response(
info={
'content-range': 'bytes 26-51/52',
'status': http_client.OK,
},
content=string.ascii_uppercase,
request_url='https://part.two/',
)
else:
self.fail('Unknown URL requested: %s' % url)
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as make_request:
make_request.side_effect = _ReturnBytes
request = http_wrapper.Request(url='https://part.one/')
download.InitializeDownload(request, http=http)
self.assertEqual(2, make_request.call_count)
for call in make_request.call_args_list:
self.assertRangeAndContentRangeCompatible(
call[0][1], _ReturnBytes(*call[0]))
download_stream.seek(0)
self.assertEqual(string.ascii_lowercase + string.ascii_uppercase,
download_stream.getvalue())
def testFromEncoding(self):
# Test a specific corner case in multipart encoding.
# Python's mime module by default encodes lines that start with
# "From " as ">From ", which we need to make sure we don't run afoul
# of when sending content that isn't intended to be so encoded. This
# test calls out that we get this right. We test for both the
# multipart and non-multipart case.
multipart_body = '{"body_field_one": 7}'
upload_contents = 'line one\nFrom \nline two'
upload_config = base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=True,
resumable_path=u'/resumable/upload',
simple_multipart=True,
simple_path=u'/upload',
)
url_builder = base_api._UrlBuilder('http://www.uploads.com')
# Test multipart: having a body argument in http_request forces
# multipart here.
upload = transfer.Upload.FromStream(
six.StringIO(upload_contents),
'text/plain',
total_size=len(upload_contents))
http_request = http_wrapper.Request(
'http://www.uploads.com',
headers={'content-type': 'text/plain'},
body=multipart_body)
upload.ConfigureRequest(upload_config, http_request, url_builder)
self.assertEqual(url_builder.query_params['uploadType'], 'multipart')
rewritten_upload_contents = '\n'.join(
http_request.body.split('--')[2].splitlines()[1:])
self.assertTrue(rewritten_upload_contents.endswith(upload_contents))
# Test non-multipart (aka media): no body argument means this is
# sent as media.
upload = transfer.Upload.FromStream(
six.StringIO(upload_contents),
'text/plain',
total_size=len(upload_contents))
http_request = http_wrapper.Request(
'http://www.uploads.com',
headers={'content-type': 'text/plain'})
upload.ConfigureRequest(upload_config, http_request, url_builder)
self.assertEqual(url_builder.query_params['uploadType'], 'media')
rewritten_upload_contents = http_request.body
self.assertTrue(rewritten_upload_contents.endswith(upload_contents))
|
shlomihod/scenario | refs/heads/master | scenario/__init__.py | 1 | __all__ = ['run_scenario', 'parse_scenario_json', 'play_scenario',
'get_timeout_feedback_json', 'get_overflow_feedback_json']
from scenario.runner import run_scenario
from scenario.parser import parse_scenario_json
from scenario.player import play_scenario
from scenario.api import get_timeout_feedback_json, get_overflow_feedback_json
|
aaronbassett/djangocon-swampdragon | refs/heads/master | talk/todo/routers.py | 1 | # -*- coding: utf-8 -*-
# Swamp Dragon
from swampdragon import route_handler
from swampdragon.route_handler import ModelRouter
from .models import TodoList, TodoItem
from .serializers import TodoListSerializer, TodoItemSerializer
class TodoListRouter(ModelRouter):
route_name = 'todo-list'
serializer_class = TodoListSerializer
model = TodoList
def get_object(self, **kwargs):
return self.model.objects.get(pk=kwargs['id'])
def get_query_set(self, **kwargs):
return self.model.objects.all()
class TodoItemRouter(ModelRouter):
route_name = 'todo-item'
serializer_class = TodoItemSerializer
model = TodoItem
def get_object(self, **kwargs):
return self.model.objects.get(pk=kwargs['id'])
def get_query_set(self, **kwargs):
return self.model.objects.filter(todo_list__id=kwargs['list_id'])
route_handler.register(TodoListRouter)
route_handler.register(TodoItemRouter)
|
vleo/vleo-notebook | refs/heads/master | protobuf/trunk/protobuf/python/stubout.py | 670 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = []
|
cedar101/quepy | refs/heads/master | tests/random_expression.py | 9 | # -*- coding: utf-8 -*-
import random
from quepy.expression import Expression
def random_data(only_ascii=False):
data = []
first = True
while first or 1 / 20.0 < random.random():
first = False
if only_ascii:
c = unichr(random.randint(33, 126))
data.append(c)
continue
x = random.random()
if 0.1 > x:
c = random.choice(u" ./\n")
elif 0.50 > x:
c = unichr(random.randint(65, 122))
elif 0.85 > x:
c = unichr(random.randint(0, 127))
else:
c = unichr(random.randint(0, 65535))
data.append(c)
return u"".join(data)
def random_relation(only_ascii=False):
data = random_data(only_ascii)
data = data.replace(" ", "")
if random.random() > 0.05:
return data
class UnicodeableDummy(object):
def __unicode__(self):
return data
return UnicodeableDummy()
def random_expression(only_ascii=False):
"""
operations: new node, add data, decapitate, merge
"""
mean_size = 20
xs = [40.0, 30.0, 50.0, 20.0]
xs = [x * (1.0 - random.random()) for x in xs]
assert all(x != 0 for x in xs)
new_node, add_data, decapitate, _ = [x / sum(xs) for x in xs]
expressions = [Expression(), Expression(), Expression(), Expression()]
while len(expressions) != 1:
if (1.0 / mean_size) < random.random():
# Will start to merge more and will not create new nodes
new_node = 0.0
# Choose action
r = random.random()
if r < new_node:
# New expression
expressions.append(Expression())
elif r < add_data + new_node:
# Add data
e = random.choice(expressions)
e.add_data(random_relation(only_ascii), random_data(only_ascii))
elif r < decapitate + add_data + new_node:
# Decapitate
e = random.choice(expressions)
e.decapitate(random_relation(only_ascii),
reverse=(0.25 < random.random()))
elif len(expressions) != 1:
# Merge
random.shuffle(expressions)
e2 = expressions.pop()
e1 = expressions[-1]
e1 += e2
return expressions[0]
|
jazkarta/edx-platform-for-isc | refs/heads/backport-auto-certification | common/djangoapps/django_comment_common/tests.py | 34 | from django.test import TestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from django_comment_common.models import Role
from student.models import CourseEnrollment, User
class RoleAssignmentTest(TestCase):
"""
Basic checks to make sure our Roles get assigned and unassigned as students
are enrolled and unenrolled from a course.
"""
def setUp(self):
# Check a staff account because those used to get the Moderator role
self.staff_user = User.objects.create_user(
"patty",
"patty@fake.edx.org",
)
self.staff_user.is_staff = True
self.student_user = User.objects.create_user(
"hacky",
"hacky@fake.edx.org"
)
self.course_key = SlashSeparatedCourseKey("edX", "Fake101", "2012")
CourseEnrollment.enroll(self.staff_user, self.course_key)
CourseEnrollment.enroll(self.student_user, self.course_key)
def test_enrollment_auto_role_creation(self):
student_role = Role.objects.get(
course_id=self.course_key,
name="Student"
)
self.assertEqual([student_role], list(self.staff_user.roles.all()))
self.assertEqual([student_role], list(self.student_user.roles.all()))
# The following was written on the assumption that unenrolling from a course
# should remove all forum Roles for that student for that course. This is
# not necessarily the case -- please see comments at the top of
# django_comment_client.models.assign_default_role(). Leaving it for the
# forums team to sort out.
#
# def test_unenrollment_auto_role_removal(self):
# another_student = User.objects.create_user("sol", "sol@fake.edx.org")
# CourseEnrollment.enroll(another_student, self.course_id)
#
# CourseEnrollment.unenroll(self.student_user, self.course_id)
# # Make sure we didn't delete the actual Role
# student_role = Role.objects.get(
# course_id=self.course_id,
# name="Student"
# )
# self.assertNotIn(student_role, self.student_user.roles.all())
# self.assertIn(student_role, another_student.roles.all())
|
gedakc/manuskript | refs/heads/master | manuskript/ui/helpLabel.py | 2 | #!/usr/bin/env python
# --!-- coding: utf8 --!--
from PyQt5.QtWidgets import QLabel, QSizePolicy
class helpLabel(QLabel):
def __init__(self, text=None, parent=None):
QLabel.__init__(self, text, parent)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
self.setStatusTip(self.tr("If you don't wanna see me, you can hide me in Help menu."))
self.setStyleSheet("""
QLabel {
background-color:lightYellow;
border:1px solid lightGray;
border-radius: 10px;
margin: 3px;
padding:10px;
color:gray;
}""")
|
mehmetkose/tornado | refs/heads/master | tornado/simple_httpclient.py | 14 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado.escape import utf8, _unicode
from tornado import gen
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
from tornado import httputil
from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
from tornado.iostream import StreamClosedError
from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults
from tornado.log import gen_log
from tornado import stack_context
from tornado.tcpclient import TCPClient
import base64
import collections
import copy
import functools
import re
import socket
import sys
from io import BytesIO
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine.
ssl = None
try:
import certifi
except ImportError:
certifi = None
def _default_ca_certs():
if certifi is None:
raise Exception("The 'certifi' package is required to use https "
"in simple_httpclient")
return certifi.where()
class SimpleAsyncHTTPClient(AsyncHTTPClient):
"""Non-blocking HTTP client with no external dependencies.
This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
Some features found in the curl-based AsyncHTTPClient are not yet
supported. In particular, proxies are not supported, connections
are not reused, and callers cannot select the network interface to be
used.
"""
def initialize(self, io_loop, max_clients=10,
hostname_mapping=None, max_buffer_size=104857600,
resolver=None, defaults=None, max_header_size=None,
max_body_size=None):
"""Creates a AsyncHTTPClient.
Only a single AsyncHTTPClient instance exists per IOLoop
in order to provide limitations on the number of pending connections.
``force_instance=True`` may be used to suppress this behavior.
Note that because of this implicit reuse, unless ``force_instance``
is used, only the first call to the constructor actually uses
its arguments. It is recommended to use the ``configure`` method
instead of the constructor to ensure that arguments take effect.
``max_clients`` is the number of concurrent requests that can be
in progress; when this limit is reached additional requests will be
queued. Note that time spent waiting in this queue still counts
against the ``request_timeout``.
``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
It can be used to make local DNS changes when modifying system-wide
settings like ``/etc/hosts`` is not possible or desirable (e.g. in
unittests).
``max_buffer_size`` (default 100MB) is the number of bytes
that can be read into memory at once. ``max_body_size``
(defaults to ``max_buffer_size``) is the largest response body
that the client will accept. Without a
``streaming_callback``, the smaller of these two limits
applies; with a ``streaming_callback`` only ``max_body_size``
does.
.. versionchanged:: 4.2
Added the ``max_body_size`` argument.
"""
super(SimpleAsyncHTTPClient, self).initialize(io_loop,
defaults=defaults)
self.max_clients = max_clients
self.queue = collections.deque()
self.active = {}
self.waiting = {}
self.max_buffer_size = max_buffer_size
self.max_header_size = max_header_size
self.max_body_size = max_body_size
# TCPClient could create a Resolver for us, but we have to do it
# ourselves to support hostname_mapping.
if resolver:
self.resolver = resolver
self.own_resolver = False
else:
self.resolver = Resolver(io_loop=io_loop)
self.own_resolver = True
if hostname_mapping is not None:
self.resolver = OverrideResolver(resolver=self.resolver,
mapping=hostname_mapping)
self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
def close(self):
super(SimpleAsyncHTTPClient, self).close()
if self.own_resolver:
self.resolver.close()
self.tcp_client.close()
def fetch_impl(self, request, callback):
key = object()
self.queue.append((key, request, callback))
if not len(self.active) < self.max_clients:
timeout_handle = self.io_loop.add_timeout(
self.io_loop.time() + min(request.connect_timeout,
request.request_timeout),
functools.partial(self._on_timeout, key))
else:
timeout_handle = None
self.waiting[key] = (request, callback, timeout_handle)
self._process_queue()
if self.queue:
gen_log.debug("max_clients limit reached, request queued. "
"%d active, %d queued requests." % (
len(self.active), len(self.queue)))
def _process_queue(self):
with stack_context.NullContext():
while self.queue and len(self.active) < self.max_clients:
key, request, callback = self.queue.popleft()
if key not in self.waiting:
continue
self._remove_timeout(key)
self.active[key] = (request, callback)
release_callback = functools.partial(self._release_fetch, key)
self._handle_request(request, release_callback, callback)
def _connection_class(self):
return _HTTPConnection
def _handle_request(self, request, release_callback, final_callback):
self._connection_class()(
self.io_loop, self, request, release_callback,
final_callback, self.max_buffer_size, self.tcp_client,
self.max_header_size, self.max_body_size)
def _release_fetch(self, key):
del self.active[key]
self._process_queue()
def _remove_timeout(self, key):
if key in self.waiting:
request, callback, timeout_handle = self.waiting[key]
if timeout_handle is not None:
self.io_loop.remove_timeout(timeout_handle)
del self.waiting[key]
def _on_timeout(self, key):
request, callback, timeout_handle = self.waiting[key]
self.queue.remove((key, request, callback))
timeout_response = HTTPResponse(
request, 599, error=HTTPError(599, "Timeout"),
request_time=self.io_loop.time() - request.start_time)
self.io_loop.add_callback(callback, timeout_response)
del self.waiting[key]
class _HTTPConnection(httputil.HTTPMessageDelegate):
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
def __init__(self, io_loop, client, request, release_callback,
final_callback, max_buffer_size, tcp_client,
max_header_size, max_body_size):
self.start_time = io_loop.time()
self.io_loop = io_loop
self.client = client
self.request = request
self.release_callback = release_callback
self.final_callback = final_callback
self.max_buffer_size = max_buffer_size
self.tcp_client = tcp_client
self.max_header_size = max_header_size
self.max_body_size = max_body_size
self.code = None
self.headers = None
self.chunks = []
self._decompressor = None
# Timeout handle returned by IOLoop.add_timeout
self._timeout = None
self._sockaddr = None
with stack_context.ExceptionStackContext(self._handle_exception):
self.parsed = urlparse.urlsplit(_unicode(self.request.url))
if self.parsed.scheme not in ("http", "https"):
raise ValueError("Unsupported url scheme: %s" %
self.request.url)
# urlsplit results have hostname and port results, but they
# didn't support ipv6 literals until python 2.7.
netloc = self.parsed.netloc
if "@" in netloc:
userpass, _, netloc = netloc.rpartition("@")
host, port = httputil.split_host_and_port(netloc)
if port is None:
port = 443 if self.parsed.scheme == "https" else 80
if re.match(r'^\[.*\]$', host):
# raw ipv6 addresses in urls are enclosed in brackets
host = host[1:-1]
self.parsed_hostname = host # save final host for _on_connect
if request.allow_ipv6 is False:
af = socket.AF_INET
else:
af = socket.AF_UNSPEC
ssl_options = self._get_ssl_options(self.parsed.scheme)
timeout = min(self.request.connect_timeout, self.request.request_timeout)
if timeout:
self._timeout = self.io_loop.add_timeout(
self.start_time + timeout,
stack_context.wrap(self._on_timeout))
self.tcp_client.connect(host, port, af=af,
ssl_options=ssl_options,
max_buffer_size=self.max_buffer_size,
callback=self._on_connect)
def _get_ssl_options(self, scheme):
if scheme == "https":
if self.request.ssl_options is not None:
return self.request.ssl_options
# If we are using the defaults, don't construct a
# new SSLContext.
if (self.request.validate_cert and
self.request.ca_certs is None and
self.request.client_cert is None and
self.request.client_key is None):
return _client_ssl_defaults
ssl_options = {}
if self.request.validate_cert:
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
if self.request.ca_certs is not None:
ssl_options["ca_certs"] = self.request.ca_certs
elif not hasattr(ssl, 'create_default_context'):
# When create_default_context is present,
# we can omit the "ca_certs" parameter entirely,
# which avoids the dependency on "certifi" for py34.
ssl_options["ca_certs"] = _default_ca_certs()
if self.request.client_key is not None:
ssl_options["keyfile"] = self.request.client_key
if self.request.client_cert is not None:
ssl_options["certfile"] = self.request.client_cert
# SSL interoperability is tricky. We want to disable
# SSLv2 for security reasons; it wasn't disabled by default
# until openssl 1.0. The best way to do this is to use
# the SSL_OP_NO_SSLv2, but that wasn't exposed to python
# until 3.2. Python 2.7 adds the ciphers argument, which
# can also be used to disable SSLv2. As a last resort
# on python 2.6, we set ssl_version to TLSv1. This is
# more narrow than we'd like since it also breaks
# compatibility with servers configured for SSLv3 only,
# but nearly all servers support both SSLv3 and TLSv1:
# http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
if sys.version_info >= (2, 7):
# In addition to disabling SSLv2, we also exclude certain
# classes of insecure ciphers.
ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES"
else:
# This is really only necessary for pre-1.0 versions
# of openssl, but python 2.6 doesn't expose version
# information.
ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1
return ssl_options
return None
def _on_timeout(self):
self._timeout = None
if self.final_callback is not None:
raise HTTPError(599, "Timeout")
def _remove_timeout(self):
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _on_connect(self, stream):
if self.final_callback is None:
# final_callback is cleared if we've hit our timeout.
stream.close()
return
self.stream = stream
self.stream.set_close_callback(self.on_connection_close)
self._remove_timeout()
if self.final_callback is None:
return
if self.request.request_timeout:
self._timeout = self.io_loop.add_timeout(
self.start_time + self.request.request_timeout,
stack_context.wrap(self._on_timeout))
if (self.request.method not in self._SUPPORTED_METHODS and
not self.request.allow_nonstandard_methods):
raise KeyError("unknown method %s" % self.request.method)
for key in ('network_interface',
'proxy_host', 'proxy_port',
'proxy_username', 'proxy_password'):
if getattr(self.request, key, None):
raise NotImplementedError('%s not supported' % key)
if "Connection" not in self.request.headers:
self.request.headers["Connection"] = "close"
if "Host" not in self.request.headers:
if '@' in self.parsed.netloc:
self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1]
else:
self.request.headers["Host"] = self.parsed.netloc
username, password = None, None
if self.parsed.username is not None:
username, password = self.parsed.username, self.parsed.password
elif self.request.auth_username is not None:
username = self.request.auth_username
password = self.request.auth_password or ''
if username is not None:
if self.request.auth_mode not in (None, "basic"):
raise ValueError("unsupported auth_mode %s",
self.request.auth_mode)
auth = utf8(username) + b":" + utf8(password)
self.request.headers["Authorization"] = (b"Basic " +
base64.b64encode(auth))
if self.request.user_agent:
self.request.headers["User-Agent"] = self.request.user_agent
if not self.request.allow_nonstandard_methods:
# Some HTTP methods nearly always have bodies while others
# almost never do. Fail in this case unless the user has
# opted out of sanity checks with allow_nonstandard_methods.
body_expected = self.request.method in ("POST", "PATCH", "PUT")
body_present = (self.request.body is not None or
self.request.body_producer is not None)
if ((body_expected and not body_present) or
(body_present and not body_expected)):
raise ValueError(
'Body must %sbe None for method %s (unless '
'allow_nonstandard_methods is true)' %
('not ' if body_expected else '', self.request.method))
if self.request.expect_100_continue:
self.request.headers["Expect"] = "100-continue"
if self.request.body is not None:
# When body_producer is used the caller is responsible for
# setting Content-Length (or else chunked encoding will be used).
self.request.headers["Content-Length"] = str(len(
self.request.body))
if (self.request.method == "POST" and
"Content-Type" not in self.request.headers):
self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
if self.request.decompress_response:
self.request.headers["Accept-Encoding"] = "gzip"
req_path = ((self.parsed.path or '/') +
(('?' + self.parsed.query) if self.parsed.query else ''))
self.connection = self._create_connection(stream)
start_line = httputil.RequestStartLine(self.request.method,
req_path, '')
self.connection.write_headers(start_line, self.request.headers)
if self.request.expect_100_continue:
self._read_response()
else:
self._write_body(True)
def _create_connection(self, stream):
stream.set_nodelay(True)
connection = HTTP1Connection(
stream, True,
HTTP1ConnectionParameters(
no_keep_alive=True,
max_header_size=self.max_header_size,
max_body_size=self.max_body_size,
decompress=self.request.decompress_response),
self._sockaddr)
return connection
def _write_body(self, start_read):
if self.request.body is not None:
self.connection.write(self.request.body)
elif self.request.body_producer is not None:
fut = self.request.body_producer(self.connection.write)
if fut is not None:
fut = gen.convert_yielded(fut)
def on_body_written(fut):
fut.result()
self.connection.finish()
if start_read:
self._read_response()
self.io_loop.add_future(fut, on_body_written)
return
self.connection.finish()
if start_read:
self._read_response()
def _read_response(self):
# Ensure that any exception raised in read_response ends up in our
# stack context.
self.io_loop.add_future(
self.connection.read_response(self),
lambda f: f.result())
def _release(self):
if self.release_callback is not None:
release_callback = self.release_callback
self.release_callback = None
release_callback()
def _run_callback(self, response):
self._release()
if self.final_callback is not None:
final_callback = self.final_callback
self.final_callback = None
self.io_loop.add_callback(final_callback, response)
def _handle_exception(self, typ, value, tb):
if self.final_callback:
self._remove_timeout()
if isinstance(value, StreamClosedError):
if value.real_error is None:
value = HTTPError(599, "Stream closed")
else:
value = value.real_error
self._run_callback(HTTPResponse(self.request, 599, error=value,
request_time=self.io_loop.time() - self.start_time,
))
if hasattr(self, "stream"):
# TODO: this may cause a StreamClosedError to be raised
# by the connection's Future. Should we cancel the
# connection more gracefully?
self.stream.close()
return True
else:
# If our callback has already been called, we are probably
# catching an exception that is not caused by us but rather
# some child of our callback. Rather than drop it on the floor,
# pass it along, unless it's just the stream being closed.
return isinstance(value, StreamClosedError)
def on_connection_close(self):
if self.final_callback is not None:
message = "Connection closed"
if self.stream.error:
raise self.stream.error
try:
raise HTTPError(599, message)
except HTTPError:
self._handle_exception(*sys.exc_info())
def headers_received(self, first_line, headers):
if self.request.expect_100_continue and first_line.code == 100:
self._write_body(False)
return
self.code = first_line.code
self.reason = first_line.reason
self.headers = headers
if self._should_follow_redirect():
return
if self.request.header_callback is not None:
# Reassemble the start line.
self.request.header_callback('%s %s %s\r\n' % first_line)
for k, v in self.headers.get_all():
self.request.header_callback("%s: %s\r\n" % (k, v))
self.request.header_callback('\r\n')
def _should_follow_redirect(self):
return (self.request.follow_redirects and
self.request.max_redirects > 0 and
self.code in (301, 302, 303, 307))
def finish(self):
data = b''.join(self.chunks)
self._remove_timeout()
original_request = getattr(self.request, "original_request",
self.request)
if self._should_follow_redirect():
assert isinstance(self.request, _RequestProxy)
new_request = copy.copy(self.request.request)
new_request.url = urlparse.urljoin(self.request.url,
self.headers["Location"])
new_request.max_redirects = self.request.max_redirects - 1
del new_request.headers["Host"]
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
# Client SHOULD make a GET request after a 303.
# According to the spec, 302 should be followed by the same
# method as the original request, but in practice browsers
# treat 302 the same as 303, and many servers use 302 for
# compatibility with pre-HTTP/1.1 user agents which don't
# understand the 303 status.
if self.code in (302, 303):
new_request.method = "GET"
new_request.body = None
for h in ["Content-Length", "Content-Type",
"Content-Encoding", "Transfer-Encoding"]:
try:
del self.request.headers[h]
except KeyError:
pass
new_request.original_request = original_request
final_callback = self.final_callback
self.final_callback = None
self._release()
self.client.fetch(new_request, final_callback)
self._on_end_request()
return
if self.request.streaming_callback:
buffer = BytesIO()
else:
buffer = BytesIO(data) # TODO: don't require one big string?
response = HTTPResponse(original_request,
self.code, reason=getattr(self, 'reason', None),
headers=self.headers,
request_time=self.io_loop.time() - self.start_time,
buffer=buffer,
effective_url=self.request.url)
self._run_callback(response)
self._on_end_request()
def _on_end_request(self):
self.stream.close()
def data_received(self, chunk):
if self._should_follow_redirect():
# We're going to follow a redirect so just discard the body.
return
if self.request.streaming_callback is not None:
self.request.streaming_callback(chunk)
else:
self.chunks.append(chunk)
if __name__ == "__main__":
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
main()
|
yhfudev/docsis3ns3 | refs/heads/master | src/bridge/test/examples-to-run.py | 200 | #! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("csma-bridge", "True", "True"),
("csma-bridge-one-hop", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = [
("csma-bridge.py", "True"),
]
|
JVenberg/PokemonGo-Bot-Desktop | refs/heads/development | pywin/Lib/distutils/version.py | 259 | #
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
|
Seenivasanseeni/PyDaS | refs/heads/master | PDaS/LinkedList.py | 1 | class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
class LinkedList(object):
#default constructor
def __init__(self,array=None):
self.head=None
self.length=0
if(array!=None):
self.initArray(array)
#constructor with list as argument
def initArray(self,array):
for value in array:
self.prepend(value)
self.reverse()
#method to copy a Linked List and to return the copy
def copy(self):
head2=LinkedList()
temp=self.head
while (temp!=None):
head2.prepend(temp.data)
temp=temp.next
head2.reverse()
return head2
def prepend(self, data):
self.head=Node(data,self.head)
self.length+=1
def append(self, data):
temp=self.head
parent=None
while(temp!=None):
parent=temp
temp=temp.next
temp=Node(data,None)
if(parent==None):
self.head=temp
else:
parent.next=temp
self.length+=1
def insertNth(self,data,position):
temp=self.head
index=0
parent=None
while(index!=position):
parent=temp
temp=temp.next
index+=1
temp=Node(data)
if(parent==None):
temp.next=self.head
self.head=temp
else:
temp.next=parent.next
parent.next=temp
self.length+=1
def updateNth(self,data,position):
if(position>self.length):
print("Invalid Index")
return;
temp=self.head
index=0
parent=None
while(index!=position):
parent=temp
temp=temp.next
index+=1
temp.data=data
if(parent==None):
self.head=temp
def printLinkedList(self,sep=" "):
if(self.length==0):
return None
temp=self.head
while (temp.next!=None):
print(str(temp.data),end=sep)
temp=temp.next
print(temp.data)
def getData(self,position):
if(self.length<=position):
return None
temp=self.head
index=0
while(index!=position):
temp=temp.next
index+=1
return temp.data
def remove(self,data):
temp=self.head
parent=None
while (temp!=None and temp.data!=data):
parent=temp
temp=temp.next
if(temp==None):
return -1
if(parent==None):
self.head=self.head.next
else:
parent.next=temp.next
self.length-=1
return 1
def removeAt(self,position):
if(self.length<=position):
return -1
temp=self.head
self.length-=1
index=0
if(position==0):
self.head=self.head.next
return 0
while(index!=position):
parent=temp
temp=temp.next
index+=1
parent.next=temp.next
return 1
def reverse(self):
temp=self.head
new=None
while (temp!=None):
next=temp.next
temp.next=new
new=temp
temp=next
self.head=new
def list(self):
array=[]
temp=self.head
while (temp!=None):
array.append(temp.data)
temp=temp.next
return array
def compareLists(self,head2):
headB=head2.head
headA=self.head
if((headA!=None and headB==None) or (headA==None and headB!=None)):
return False
if(headA==None and headB==None):
return True
if(self.length!=head2.length):
return False
while(headA!=None and headB!=None):
if( not headA.data==headB.data):
return False
headA=headA.next
headB=headB.next
return True
def reversedLinkedList(self):
reversedList=self.copy()
reversedList.reverse()
return reversedList |
EdwardJKim/nbgrader | refs/heads/master | nbgrader/tests/nbextensions/test_assignment_list.py | 1 | import pytest
import os
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from .. import run_nbgrader
from .conftest import notwindows
def _wait(browser):
return WebDriverWait(browser, 30)
def _load_assignments_list(browser, port, retries=5):
# go to the correct page
browser.get("http://localhost:{}/tree".format(port))
def page_loaded(browser):
return browser.execute_script(
'return typeof IPython !== "undefined" && IPython.page !== undefined;')
# wait for the page to load
try:
_wait(browser).until(page_loaded)
except TimeoutException:
if retries > 0:
print("Retrying page load...")
# page timeout, but sometimes this happens, so try refreshing?
_load_assignments_list(browser, port, retries=retries - 1)
else:
print("Failed to load the page too many times")
raise
# wait for the extension to load
_wait(browser).until(EC.presence_of_element_located((By.CSS_SELECTOR, "#assignments")))
# switch to the assignments list
element = browser.find_element_by_link_text("Assignments")
element.click()
# make sure released, downloaded, and submitted assignments are visible
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#submitted_assignments_list")))
def _expand(browser, list_id, assignment):
browser.find_element_by_link_text(assignment).click()
rows = browser.find_elements_by_css_selector("{} .list_item".format(list_id))
for i in range(1, len(rows)):
_wait(browser).until(lambda browser: browser.find_elements_by_css_selector("{} .list_item".format(list_id))[i].is_displayed())
return rows
def _unexpand(browser, list_id, assignment):
browser.find_element_by_link_text(assignment).click()
rows = browser.find_elements_by_css_selector("{} .list_item".format(list_id))
for i in range(1, len(rows)):
_wait(browser).until(lambda browser: not browser.find_elements_by_css_selector("{} .list_item".format(list_id))[i].is_displayed())
def _wait_for_modal(browser):
_wait(browser).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".modal-dialog")))
def _dismiss_modal(browser):
button = browser.find_element_by_css_selector(".modal-footer .btn-primary")
button.click()
def modal_gone(browser):
try:
browser.find_element_by_css_selector(".modal-dialog")
except NoSuchElementException:
return True
return False
_wait(browser).until(modal_gone)
def _sort_rows(x):
try:
item_name = x.find_element_by_class_name("item_name").text
except NoSuchElementException:
item_name = ""
return item_name
@pytest.mark.nbextensions
@notwindows
def test_show_assignments_list(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# make sure all the placeholders ar initially showing
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list_placeholder")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list_placeholder")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#submitted_assignments_list_placeholder")))
# release an assignment
run_nbgrader(["assign", "Problem Set 1"])
run_nbgrader(["release", "Problem Set 1", "--course", "abc101"])
# click the refresh button
browser.find_element_by_css_selector("#refresh_assignments_list").click()
# wait for the released assignments to update
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list_placeholder")))
rows = browser.find_elements_by_css_selector("#released_assignments_list > .list_item")
assert len(rows) == 1
assert rows[0].find_element_by_class_name("item_name").text == "Problem Set 1"
assert rows[0].find_element_by_class_name("item_course").text == "abc101"
@pytest.mark.nbextensions
@notwindows
def test_multiple_released_assignments(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# release another assignment
run_nbgrader(["assign", "ps.01"])
run_nbgrader(["release", "ps.01", "--course", "xyz 200"])
# click the refresh button
browser.find_element_by_css_selector("#refresh_assignments_list").click()
# wait for the released assignments to update
_wait(browser).until(lambda browser: len(browser.find_elements_by_css_selector("#released_assignments_list > .list_item")) == 2)
rows = browser.find_elements_by_css_selector("#released_assignments_list > .list_item")
rows.sort(key=_sort_rows)
assert rows[0].find_element_by_class_name("item_name").text == "Problem Set 1"
assert rows[0].find_element_by_class_name("item_course").text == "abc101"
assert rows[1].find_element_by_class_name("item_name").text == "ps.01"
assert rows[1].find_element_by_class_name("item_course").text == "xyz 200"
@pytest.mark.nbextensions
@notwindows
def test_fetch_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# click the "fetch" button
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list_placeholder")))
rows = browser.find_elements_by_css_selector("#released_assignments_list > .list_item")
rows[1].find_element_by_css_selector(".item_status button").click()
# wait for the downloaded assignments list to update
_wait(browser).until(lambda browser: len(browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")) == 1)
rows = browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")
assert rows[0].find_element_by_class_name("item_name").text == "ps.01"
assert rows[0].find_element_by_class_name("item_course").text == "xyz 200"
assert os.path.exists(os.path.join(tempdir, "ps.01"))
# expand the assignment to show the notebooks
rows = _expand(browser, "#nbgrader-xyz_200-ps01", "ps.01")
rows.sort(key=_sort_rows)
assert len(rows) == 2
assert rows[1].find_element_by_class_name("item_name").text == "problem 1"
# unexpand the assignment
_unexpand(browser, "#nbgrader-xyz_200-ps01", "ps.01")
@pytest.mark.nbextensions
@notwindows
def test_submit_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# submit it
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list_placeholder")))
rows = browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the submitted assignments list to update
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#submitted_assignments_list_placeholder")))
rows = browser.find_elements_by_css_selector("#submitted_assignments_list > .list_item")
assert len(rows) == 1
assert rows[0].find_element_by_class_name("item_name").text == "ps.01"
assert rows[0].find_element_by_class_name("item_course").text == "xyz 200"
# submit it again
rows = browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the submitted assignments list to update
_wait(browser).until(lambda browser: len(browser.find_elements_by_css_selector("#submitted_assignments_list > .list_item")) == 2)
rows = browser.find_elements_by_css_selector("#submitted_assignments_list > .list_item")
rows.sort(key=_sort_rows)
assert rows[0].find_element_by_class_name("item_name").text == "ps.01"
assert rows[0].find_element_by_class_name("item_course").text == "xyz 200"
assert rows[1].find_element_by_class_name("item_name").text == "ps.01"
assert rows[1].find_element_by_class_name("item_course").text == "xyz 200"
assert rows[0].find_element_by_class_name("item_status").text != rows[1].find_element_by_class_name("item_status").text
@pytest.mark.nbextensions
@notwindows
def test_fetch_second_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# click the "fetch" button
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list_placeholder")))
rows = browser.find_elements_by_css_selector("#released_assignments_list > .list_item")
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the downloaded assignments list to update
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list_placeholder")))
_wait(browser).until(lambda browser: len(browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")) == 2)
rows = browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")
rows.sort(key=_sort_rows)
assert rows[0].find_element_by_class_name("item_name").text == "Problem Set 1"
assert rows[0].find_element_by_class_name("item_course").text == "abc101"
assert rows[1].find_element_by_class_name("item_name").text == "ps.01"
assert rows[1].find_element_by_class_name("item_course").text == "xyz 200"
assert os.path.exists(os.path.join(tempdir, "Problem Set 1"))
# expand the assignment to show the notebooks
rows = _expand(browser, "#nbgrader-abc101-Problem_Set_1", "Problem Set 1")
rows.sort(key=_sort_rows)
assert len(rows) == 3
assert rows[1].find_element_by_class_name("item_name").text == "Problem 1"
assert rows[2].find_element_by_class_name("item_name").text == "Problem 2"
# unexpand the assignment
_unexpand(browser, "abc101-Problem_Set_1", "Problem Set 1")
@pytest.mark.nbextensions
@notwindows
def test_submit_other_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# submit it
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list_placeholder")))
rows = browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the submitted assignments list to update
_wait(browser).until(lambda browser: len(browser.find_elements_by_css_selector("#submitted_assignments_list > .list_item")) == 3)
rows = browser.find_elements_by_css_selector("#submitted_assignments_list > .list_item")
rows.sort(key=_sort_rows)
assert rows[0].find_element_by_class_name("item_name").text == "Problem Set 1"
assert rows[0].find_element_by_class_name("item_course").text == "abc101"
assert rows[1].find_element_by_class_name("item_name").text == "ps.01"
assert rows[1].find_element_by_class_name("item_course").text == "xyz 200"
assert rows[2].find_element_by_class_name("item_name").text == "ps.01"
assert rows[2].find_element_by_class_name("item_course").text == "xyz 200"
assert rows[0].find_element_by_class_name("item_status").text != rows[1].find_element_by_class_name("item_status").text
assert rows[0].find_element_by_class_name("item_status").text != rows[2].find_element_by_class_name("item_status").text
@pytest.mark.nbextensions
@notwindows
def test_validate_ok(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# expand the assignment to show the notebooks
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list_placeholder")))
rows = _expand(browser, "#nbgrader-xyz_200-ps01", "ps.01")
rows.sort(key=_sort_rows)
assert len(rows) == 2
assert rows[1].find_element_by_class_name("item_name").text == "problem 1"
# click the "validate" button
rows[1].find_element_by_css_selector(".item_status button").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it succeeded
browser.find_element_by_css_selector(".modal-dialog .validation-success")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
@notwindows
def test_validate_failure(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
# expand the assignment to show the notebooks
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list_placeholder")))
rows = _expand(browser, "#nbgrader-abc101-Problem_Set_1", "Problem Set 1")
rows.sort(key=_sort_rows)
assert len(rows) == 3
assert rows[1].find_element_by_class_name("item_name").text == "Problem 1"
assert rows[2].find_element_by_class_name("item_name").text == "Problem 2"
# click the "validate" button
rows[2].find_element_by_css_selector(".item_status button").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it succeeded
browser.find_element_by_css_selector(".modal-dialog .validation-failed")
# close the modal dialog
_dismiss_modal(browser)
|
thandang/TComponent | refs/heads/master | TComponents/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/rpm.py | 34 | """SCons.Tool.rpm
Tool-specific initialization for rpm.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The rpm tool calls the rpmbuild command. The first and only argument should a
tar.gz consisting of the source file and a specfile.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpm.py 5023 2010/06/14 22:05:46 scons"
import os
import re
import shutil
import subprocess
import SCons.Builder
import SCons.Node.FS
import SCons.Util
import SCons.Action
import SCons.Defaults
def get_cmd(source, env):
tar_file_with_included_specfile = source
if SCons.Util.is_List(source):
tar_file_with_included_specfile = source[0]
return "%s %s %s"%(env['RPM'], env['RPMFLAGS'],
tar_file_with_included_specfile.abspath )
def build_rpm(target, source, env):
# create a temporary rpm build root.
tmpdir = os.path.join( os.path.dirname( target[0].abspath ), 'rpmtemp' )
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# now create the mandatory rpm directory structure.
for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']:
os.makedirs( os.path.join( tmpdir, d ) )
# set the topdir as an rpmflag.
env.Prepend( RPMFLAGS = '--define \'_topdir %s\'' % tmpdir )
# now call rpmbuild to create the rpm package.
handle = subprocess.Popen(get_cmd(source, env),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
output = handle.stdout.read()
status = handle.wait()
if status:
raise SCons.Errors.BuildError( node=target[0],
errstr=output,
filename=str(target[0]) )
else:
# XXX: assume that LC_ALL=c is set while running rpmbuild
output_files = re.compile( 'Wrote: (.*)' ).findall( output )
for output, input in zip( output_files, target ):
rpm_output = os.path.basename(output)
expected = os.path.basename(input.get_path())
assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected)
shutil.copy( output, input.abspath )
# cleanup before leaving.
shutil.rmtree(tmpdir)
return status
def string_rpm(target, source, env):
try:
return env['RPMCOMSTR']
except KeyError:
return get_cmd(source, env)
rpmAction = SCons.Action.Action(build_rpm, string_rpm)
RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'),
source_scanner = SCons.Defaults.DirScanner,
suffix = '$RPMSUFFIX')
def generate(env):
"""Add Builders and construction variables for rpm to an Environment."""
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=c rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm')
def exists(env):
return env.Detect('rpmbuild')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
kernel64/AutobahnPython | refs/heads/master | autobahn/autobahn/compress_base.py | 46 | ###############################################################################
##
## Copyright 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ["PerMessageCompressOffer",
"PerMessageCompressOfferAccept",
"PerMessageCompressResponse",
"PerMessageCompressResponseAccept",
"PerMessageCompress"]
class PerMessageCompressOffer:
"""
Base class for WebSocket compression parameter client offers.
"""
pass
class PerMessageCompressOfferAccept:
"""
Base class for WebSocket compression parameter client offer accepts by the server.
"""
pass
class PerMessageCompressResponse:
"""
Base class for WebSocket compression parameter server responses.
"""
pass
class PerMessageCompressResponseAccept:
"""
Base class for WebSocket compression parameter server response accepts by client.
"""
pass
class PerMessageCompress:
"""
Base class for WebSocket compression negotiated parameters.
"""
pass
|
hoangminhitvn/flask | refs/heads/master | flask/lib/python2.7/site-packages/jinja2/bccache.py | 256 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import stat
import sys
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 2
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
self.code = marshal_load(f)
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU) # 0o700
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
raise RuntimeError('Temporary directory \'%s\' has an incorrect '
'owner, permissions, or type.' % actual_dir)
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
|
shail2810/nova | refs/heads/master | nova/cmd/novncproxy.py | 38 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Websocket proxy that is compatible with OpenStack Nova
noVNC consoles. Leverages websockify.py by Joel Martin
"""
import sys
from oslo_config import cfg
from nova.cmd import baseproxy
from nova import config
opts = [
cfg.StrOpt('novncproxy_host',
default='0.0.0.0',
help='Host on which to listen for incoming requests'),
cfg.IntOpt('novncproxy_port',
default=6080,
help='Port on which to listen for incoming requests'),
]
CONF = cfg.CONF
CONF.register_cli_opts(opts)
def main():
# set default web flag option
CONF.set_default('web', '/usr/share/novnc')
config.parse_args(sys.argv)
baseproxy.proxy(
host=CONF.novncproxy_host,
port=CONF.novncproxy_port)
|
EliteTK/PyBot | refs/heads/master | Commands/urban/urban.py | 1 | ###########################################################################
## PyBot ##
## Copyright (C) 2015, Kyle Repinski ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
###########################################################################
import __main__, pybotutils, requests
info = { "names" : [ "urban", "ud", "rurban" ], "access" : 0, "version" : 2 }
def command( command, message, user, channel ):
try:
if command != "rurban":
link = "http://urbandictionary.com/define.php?term=" + message
else:
link = "http://www.urbandictionary.com/random.php"
res = requests.get( link )
definition = __main__.fixHTMLCharsAdvanced( pybotutils.strbetween( res.text, "<div class='meaning'>\n", "\n</div>" ) )
word = __main__.fixHTMLCharsAdvanced( pybotutils.strbetween( res.text, "<title>Urban Dictionary: ", "</title>" ) )
if definition != "" and word != "":
toSend = word + ": " + definition
if len( toSend ) >= 440: # This is roughly the longest message I've been able to send.
shortLink = pybotutils.googlshort( "http://www.urbandictionary.com/define.php?term=" + message ) # Get a short link here in order to send as much as possible
toCutOff = len( shortLink ) # Get the length of said link to make room for it
toSend = toSend[0:(436-toCutOff)] # Using 436 here to allow room for "... " of course
toSend = toSend.rpartition( " " )[0] # In order to make sure it doesn't cut off in the middle of a word
toSend = toSend + "... " + shortLink # Finally finishing it off
__main__.sendMessage( toSend, channel )
return True
else:
if "<i>" + message + "</i> isn't defined.<br/>Can you define it?" in res.text:
__main__.sendMessage( message + " isn't defined.", channel )
return True
else:
__main__.sendMessage( "There was a problem. Fix your shit.", channel )
return False
return False
except:
return False
|
valentin-krasontovitsch/ansible | refs/heads/devel | lib/ansible/__init__.py | 301 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Note: Do not add any code to this file. The ansible module may be
# a namespace package when using Ansible-2.1+ Anything in this file may not be
# available if one of the other packages in the namespace is loaded first.
#
# This is for backwards compat. Code should be ported to get these from
# ansible.release instead of from here.
from ansible.release import __version__, __author__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.