repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ekiourk/ansible-modules-core
|
refs/heads/devel
|
packaging/language/__init__.py
|
12133432
| |
ryfeus/lambda-packs
|
refs/heads/master
|
Shapely_numpy/source/numpy/distutils/lib2def.py
|
193
|
from __future__ import division, absolute_import, print_function
import re
import sys
import os
import subprocess
__doc__ = """This module generates a DEF file from the symbols in
an MSVC-compiled DLL import library. It correctly discriminates between
data and functions. The data is collected from the output of the program
nm(1).
Usage:
python lib2def.py [libname.lib] [output.def]
or
python lib2def.py [libname.lib] > output.def
libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
Author: Robert Kern <kernr@mail.ncifcrf.gov>
Last Update: April 30, 1999
"""
__version__ = '0.1a'
py_ver = "%d%d" % tuple(sys.version_info[:2])
DEFAULT_NM = 'nm -Cs'
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
;DATA PRELOAD SINGLE
EXPORTS
""" % py_ver
# the header of the DEF file
FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
def parse_cmd():
"""Parses the command-line arguments.
libfile, deffile = parse_cmd()"""
if len(sys.argv) == 3:
if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
libfile, deffile = sys.argv[1:]
elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
deffile, libfile = sys.argv[1:]
else:
print("I'm assuming that your first argument is the library")
print("and the second is the DEF file.")
elif len(sys.argv) == 2:
if sys.argv[1][-4:] == '.def':
deffile = sys.argv[1]
libfile = 'python%s.lib' % py_ver
elif sys.argv[1][-4:] == '.lib':
deffile = None
libfile = sys.argv[1]
else:
libfile = 'python%s.lib' % py_ver
deffile = None
return libfile, deffile
def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
"""Returns the output of nm_cmd via a pipe.
nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
nm_output = f.stdout.read()
f.stdout.close()
return nm_output
def parse_nm(nm_output):
"""Returns a tuple of lists: dlist for the list of data
symbols and flist for the list of function symbols.
dlist, flist = parse_nm(nm_output)"""
data = DATA_RE.findall(nm_output)
func = FUNC_RE.findall(nm_output)
flist = []
for sym in data:
if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
flist.append(sym)
dlist = []
for sym in data:
if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
dlist.append(sym)
dlist.sort()
flist.sort()
return dlist, flist
def output_def(dlist, flist, header, file = sys.stdout):
"""Outputs the final DEF file to a file defaulting to stdout.
output_def(dlist, flist, header, file = sys.stdout)"""
for data_sym in dlist:
header = header + '\t%s DATA\n' % data_sym
header = header + '\n' # blank line
for func_sym in flist:
header = header + '\t%s\n' % func_sym
file.write(header)
if __name__ == '__main__':
libfile, deffile = parse_cmd()
if deffile is None:
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
nm_cmd = [str(DEFAULT_NM), str(libfile)]
nm_output = getnm(nm_cmd)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
|
valrus/mingus3
|
refs/heads/python3
|
unittest/test_MusicXML.py
|
4
|
import sys
sys.path += ["../"]
import mingus.extra.MusicXML as mxl
import unittest
class test_MusicXML(unittest.TestCase):
def setUp(self):
pass
def suite():
return unittest.TestLoader().loadTestsFromTestCase(test_MusicXML)
|
starqiu/PythonLearn
|
refs/heads/master
|
Django-1.6.5/tests/i18n/commands/__init__.py
|
116
|
from django.utils.translation import ugettext as _, ungettext
# Translators: This comment should be extracted
dummy1 = _("This is a translatable string.")
# This comment should not be extracted
dummy2 = _("This is another translatable string.")
# This file has a literal with plural forms. When processed first, makemessages
# shouldn't create a .po file with duplicate `Plural-Forms` headers
number = 3
dummuy3 = ungettext("%(number)s Foo", "%(number)s Foos", number) % {'number': number}
|
ravibhure/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_trafficcloneprofile.py
|
15
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_trafficcloneprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of TrafficCloneProfile Avi RESTful Object
description:
- This module is used to configure TrafficCloneProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
clone_servers:
description:
- Field introduced in 17.1.1.
cloud_ref:
description:
- It is a reference to an object of type cloud.
- Field introduced in 17.1.1.
name:
description:
- Name for the traffic clone profile.
- Field introduced in 17.1.1.
required: true
preserve_client_ip:
description:
- Specifies if client ip needs to be preserved to clone destination.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the traffic clone profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create TrafficCloneProfile object
avi_trafficcloneprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_trafficcloneprofile
"""
RETURN = '''
obj:
description: TrafficCloneProfile (api/trafficcloneprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
clone_servers=dict(type='list',),
cloud_ref=dict(type='str',),
name=dict(type='str', required=True),
preserve_client_ip=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'trafficcloneprofile',
set([]))
if __name__ == '__main__':
main()
|
vienin/vlaunch
|
refs/heads/2.0
|
sdk/bindings/xpcom/python/xpcom/nsError.py
|
30
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is ActiveState Tool Corp.
# Portions created by ActiveState Tool Corp. are Copyright (C) 2000, 2001
# ActiveState Tool Corp. All Rights Reserved.
#
# Contributor(s):
# Mark Hammond <MarkH@ActiveState.com> (original author)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# Generated by h2py from nsError.h
# CMD line: h2py.py -i (nsresult) nsError.h
# XXX - NOTE - some manual code at the end, and all literals moved back to ints
NS_ERROR_MODULE_XPCOM = 1
NS_ERROR_MODULE_BASE = 2
NS_ERROR_MODULE_GFX = 3
NS_ERROR_MODULE_WIDGET = 4
NS_ERROR_MODULE_CALENDAR = 5
NS_ERROR_MODULE_NETWORK = 6
NS_ERROR_MODULE_PLUGINS = 7
NS_ERROR_MODULE_LAYOUT = 8
NS_ERROR_MODULE_HTMLPARSER = 9
NS_ERROR_MODULE_RDF = 10
NS_ERROR_MODULE_UCONV = 11
NS_ERROR_MODULE_REG = 12
NS_ERROR_MODULE_FILES = 13
NS_ERROR_MODULE_DOM = 14
NS_ERROR_MODULE_IMGLIB = 15
NS_ERROR_MODULE_MAILNEWS = 16
NS_ERROR_MODULE_EDITOR = 17
NS_ERROR_MODULE_XPCONNECT = 18
NS_ERROR_MODULE_PROFILE = 19
NS_ERROR_MODULE_LDAP = 20
NS_ERROR_MODULE_SECURITY = 21
NS_ERROR_MODULE_DOM_XPATH = 22
NS_ERROR_MODULE_DOM_RANGE = 23
NS_ERROR_MODULE_URILOADER = 24
NS_ERROR_MODULE_CONTENT = 25
NS_ERROR_MODULE_PYXPCOM = 26
NS_ERROR_MODULE_XSLT = 27
NS_ERROR_MODULE_IPC = 28
NS_ERROR_MODULE_SVG = 29
NS_ERROR_MODULE_GENERAL = 51
def NS_FAILED(_nsresult): return ((_nsresult) & -2147483648)
NS_ERROR_SEVERITY_SUCCESS = 0
NS_ERROR_SEVERITY_ERROR = 1
NS_ERROR_MODULE_BASE_OFFSET = 69
def NS_ERROR_GET_CODE(err): return ((err) & 65535)
def NS_ERROR_GET_MODULE(err): return (((((err) >> 16) - NS_ERROR_MODULE_BASE_OFFSET) & 8191))
def NS_ERROR_GET_SEVERITY(err): return (((err) >> 31) & 1)
NS_OK = 0
NS_COMFALSE = 1
NS_ERROR_BASE = ( -1041039360)
NS_ERROR_NOT_INITIALIZED = (NS_ERROR_BASE + 1)
NS_ERROR_ALREADY_INITIALIZED = (NS_ERROR_BASE + 2)
NS_ERROR_NOT_IMPLEMENTED = ( -2147467263)
NS_NOINTERFACE = ( -2147467262)
NS_ERROR_NO_INTERFACE = NS_NOINTERFACE
NS_ERROR_INVALID_POINTER = ( -2147467261)
NS_ERROR_NULL_POINTER = NS_ERROR_INVALID_POINTER
NS_ERROR_ABORT = ( -2147467260)
NS_ERROR_FAILURE = ( -2147467259)
NS_ERROR_UNEXPECTED = ( -2147418113)
NS_ERROR_OUT_OF_MEMORY = ( -2147024882)
NS_ERROR_ILLEGAL_VALUE = ( -2147024809)
NS_ERROR_INVALID_ARG = NS_ERROR_ILLEGAL_VALUE
NS_ERROR_NO_AGGREGATION = ( -2147221232)
NS_ERROR_NOT_AVAILABLE = ( -2147221231)
NS_ERROR_FACTORY_NOT_REGISTERED = ( -2147221164)
NS_ERROR_FACTORY_REGISTER_AGAIN = ( -2147221163)
NS_ERROR_FACTORY_NOT_LOADED = ( -2147221000)
NS_ERROR_FACTORY_NO_SIGNATURE_SUPPORT = \
(NS_ERROR_BASE + 257)
NS_ERROR_FACTORY_EXISTS = (NS_ERROR_BASE + 256)
NS_ERROR_PROXY_INVALID_IN_PARAMETER = ( -2147418096)
NS_ERROR_PROXY_INVALID_OUT_PARAMETER = ( -2147418095)
##### END OF GENERATED CODE
#####
def NS_ERROR_GENERATE_FAILURE(module,code):
# slightly optimized, and avoids 2.3->2.4 long/int changes
# return (NS_ERROR_SEVERITY_ERROR<<31) | ((module+NS_ERROR_MODULE_BASE_OFFSET)<<16) | (code)
return -2147483648 | ((module+NS_ERROR_MODULE_BASE_OFFSET)<<16) | (code)
def NS_ERROR_GENERATE_SUCCESS(module,code):
#return (NS_ERROR_SEVERITY_SUCCESS<<31) | ((module+NS_ERROR_MODULE_BASE_OFFSET)<<16) | (code)
return ((module+NS_ERROR_MODULE_BASE_OFFSET)<<16) | (code)
NS_BASE_STREAM_CLOSED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_BASE, 2)
NS_BASE_STREAM_OSERROR = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_BASE, 3)
NS_BASE_STREAM_ILLEGAL_ARGS = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_BASE, 4)
NS_BASE_STREAM_NO_CONVERTER = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_BASE, 5)
NS_BASE_STREAM_BAD_CONVERSION = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_BASE, 6)
NS_BASE_STREAM_WOULD_BLOCK = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_BASE, 7)
NS_ERROR_FILE_UNRECOGNIZED_PATH = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 1)
NS_ERROR_FILE_UNRESOLVABLE_SYMLINK = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 2)
NS_ERROR_FILE_EXECUTION_FAILED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 3)
NS_ERROR_FILE_UNKNOWN_TYPE = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 4)
NS_ERROR_FILE_DESTINATION_NOT_DIR = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 5)
NS_ERROR_FILE_TARGET_DOES_NOT_EXIST = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 6)
NS_ERROR_FILE_COPY_OR_MOVE_FAILED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 7)
NS_ERROR_FILE_ALREADY_EXISTS = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 8)
NS_ERROR_FILE_INVALID_PATH = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 9)
NS_ERROR_FILE_DISK_FULL = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 10)
NS_ERROR_FILE_CORRUPTED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 11)
NS_ERROR_FILE_NOT_DIRECTORY = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 12)
NS_ERROR_FILE_IS_DIRECTORY = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 13)
NS_ERROR_FILE_IS_LOCKED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 14)
NS_ERROR_FILE_TOO_BIG = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 15)
NS_ERROR_FILE_NO_DEVICE_SPACE = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 16)
NS_ERROR_FILE_NAME_TOO_LONG = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 17)
NS_ERROR_FILE_NOT_FOUND = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 18)
NS_ERROR_FILE_READ_ONLY = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 19)
NS_ERROR_FILE_DIR_NOT_EMPTY = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 20)
NS_ERROR_FILE_ACCESS_DENIED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_FILES, 21)
## from netCore.h
NS_ERROR_ALREADY_CONNECTED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 11)
NS_ERROR_NOT_CONNECTED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 12)
NS_ERROR_IN_PROGRESS = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 15)
NS_ERROR_OFFLINE = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 16)
## from nsISocketTransportService.idl
NS_ERROR_CONNECTION_REFUSED = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 13)
NS_ERROR_NET_TIMEOUT = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 14)
# Status nsresult codes: used with nsIProgressEventSink::OnStatus
NS_NET_STATUS_RESOLVING_HOST = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 3)
NS_NET_STATUS_CONNECTED_TO = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 4)
NS_NET_STATUS_SENDING_TO = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 5)
NS_NET_STATUS_RECEIVING_FROM = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 6)
NS_NET_STATUS_CONNECTING_TO = NS_ERROR_GENERATE_FAILURE(NS_ERROR_MODULE_NETWORK, 7)
|
equalitie/BotHound
|
refs/heads/master
|
src/training_set.py
|
2
|
"""
This class is used to hold train as well as test set
AUTHORS:
- vmon (vmon@equaliti.e) 2013: Moved from train2ban
"""
import numpy as np
from operator import itemgetter
import pdb
class TrainingSet:
"""
Each TrainingSet consists of data, target and ip_index, in particular
you shouldn't add ip to the data without also adding it into the index
so it made sense to make class to ploice that
"""
BAD_TARGET = 1
GOOD_TARGET = 0
MAX_FEATURE_INEDX = 10 #TODO: This feels embaressingly static but just for now
#improving readiblity of normalisation
NORMALISATION_TYPE = 0
SAMPLE_MEAN = 1
SAMPLE_STD = 2
def __init__(self):
"""
Intitilizes the empty lists
"""
self._ip_feature_array = np.array([])
self._ip_index = [] #this keep trac of which sample row belong to
#which IP
self._sample_index = {} #this gets an ip and return its place in
#the sample list, of course I can use find on
#_ip_index but that's no longer O(1). on the
#other hand I can decide bad from good during
#add feature process but don't want to limit
#the user.
self._target = np.array([])
self._normalisation_function = None
self._normalisation_data = None
@classmethod
def _construct_training_set(cls, ip_feature_array, target, ip_index, sample_index, normalisation_function, normalisation_data):
"""
Semi-private constructor, not meant for public usage which
intitilizes the based on lists that are already initiated and
consistant
"""
self_inst = cls()
self_inst._ip_feature_array = ip_feature_array
self_inst._target = target
#indexes
self_inst._ip_index = ip_index
self_inst._sample_index = sample_index
#normalisation
self_inst._normalisation_function = normalisation_function
self_inst._normalisation_data = normalisation_data
return self_inst
def __len__(self):
"""
Defines what is commonly understood of size of a training set
"""
#sanity check
assert(self._ip_feature_array.shape[0] == len(self._ip_index))
return self._ip_feature_array.shape[0]
def normalise_sparse(self, test_array=None):
"""
Normalises based by making each record to have unit vector feature
INPUT:
test_array: the test array to be normalised, if None, then
normalise the self._ip_feature_array
"""
#stupid that numpy doesn't have something symmetrical to colunmn-wise
#norm division or I don't it
if (test_array == None):
array_2b_normal = self._ip_feature_array
self._normalisation_function = self.normalise_sparse
else:
array_2b_normal = test_array
#if the array is empty then there is nothing to normalise
if array_2b_normal.size == 0:
return array_2b_normal
array_2b_normal = array_2b_normal / (np.repeat(np.apply_along_axis(np.linalg.norm, 1 , array_2b_normal), array_2b_normal.shape[1])).reshape(array_2b_normal.shape)
self.normalisation_data = ['sparse'] #no more info needed
#dealing with python retardation
if (test_array == None):
self._ip_feature_array = array_2b_normal
else:
return array_2b_normal
def normalise_individual(self, test_array = None, redundant_feature_reduction = True):
"""
Normalises based on standardising the sample
INPUT: test_array: the test array to be normalised, if None, then
normalise the self._ip_feature_array
"""
#If we don't have normalisation data on file we have
#to generate it and store it
if (test_array == None):
self._normalisation_function = self.normalise_individual
array_2b_normal = self._ip_feature_array
#We need to remember these data to use during prediction
#to keep uniformity between normalisation strategy we
#store std and mean in a list
self._normalisation_data = [ \
'individual', \
self._ip_feature_array.mean(axis=0), \
self._ip_feature_array.std(axis=0)]
else:
array_2b_normal = test_array
#if the array is empty then there is nothing to normalise
if array_2b_normal.size == 0:
return array_2b_normal
#DON'T DO THAT CARELESSLY
#because during the prediction you need to kick them out and you might
#not have the info to do that. It is OK for testing but not for all the time
#we kick out features which are the same for every
#entery hence has no effect on the training
if (redundant_feature_reduction):
dimension_reducer = [cur_feature_std != 0 for cur_feature_std in self._normalisation_data[self.SAMPLE_STD]]
else:
#dimension_reducer become only a copier
dimension_reducer = [True for cur_feature_std in self._normalisation_data[self.SAMPLE_STD]]
reduced_std = self._normalisation_data[self.SAMPLE_STD][np.where(dimension_reducer)]
reduced_mean = self._normalisation_data[self.SAMPLE_MEAN][np.where(dimension_reducer)]
array_2b_normal = array_2b_normal[:,[red_plc[0] for red_plc in enumerate(dimension_reducer) if red_plc[1]]]
array_2b_normal = (array_2b_normal - reduced_mean)/reduced_std
#dealing with python retardation
if (test_array == None):
self._ip_feature_array = array_2b_normal
else:
return array_2b_normal
@classmethod
def fromPickle(cls, filename):
classifier = joblib.load(filename)
return cls(classifier)
def add_ip(self, new_ip_session, ip_features):
"""
Insert the ip in the index as well as in the data set and
the ip dict.
IP repeatition raises error.
(why? on one hand it could be that an IP appears in two logs, on the
other hand its behavoir might be different in two logs but in general we
do not consider that cause it might be that an IP is used as bot and the
user but we mark the IP either good or bad. The best way is two have
way of updating the features with new values. That is necessary when
we are live and we don't want to compute all values from the begining.
but for now I just ignore the second repetition
INPUT:
new_ip: string rep of the ip to be added
ip_features: a list of features corresponding to new_ip
"""
if new_ip_session in self._sample_index:
raise ValueError, "The IP dict has an IP stored already in the trainer set"
#This way we let some feature not to be computed and not to be part of
#model fiting. (TODO is it worth it)
#having this loop only make sense when we are cherry picking features
#if we are using the whole feature set then you can just copy the list or
#something. So, I'm doing that till we implement the cherry picking
#mechanism
if (len(self._ip_feature_array) == 0):
self._ip_feature_array = np.array([[0]*(self.MAX_FEATURE_INEDX)])
else:
self._ip_feature_array = np.append(self._ip_feature_array,[[0]*(self.MAX_FEATURE_INEDX)],axis=0)
for cur_feature in ip_features:
self._ip_feature_array[-1][cur_feature-1] = ip_features[cur_feature]
#something like this is more desirable for the sake of speed
#but that need changing the feature gathering TODO?
#np.append(self._ip_feature_array,ip_features[1:], axis=0)
#turned out doesn't work because features are gathered in dic of
#dics so the first compromise is
# self._ip_feature_array = len(self._ip_feature_array) and \
# np.append(self._ip_feature_array,[map(itemgetter(1),sorted(ip_features.items(), key = itemgetter(0)))], axis=0) or \
# np.array([map(itemgetter(1),sorted(ip_features.items(), key = itemgetter(0)))])
#this approach doesn't work because some features can't be computed
#and then we get dim error which is legit
#make it two dimensional
#if (self._ip_feature_array.ndim == 1): self._ip_feature_array = self._ip_feature_array.reshape(1,self._ip_feature_array.shape[0])
self._sample_index[new_ip_session] = len(self._ip_index)
self._ip_index.append(new_ip_session)
def dump_data(self):
"""
Just empties all the training set and starts as new
"""
self._ip_feature_array = np.array([])
self._ip_index = []
self._sample_index = []
self._target = np.array([])
#forget about normalisation
self._normalisation_function = None
self._normalisation_data = None
def initiate_target(self):
"""
Indicates that we are done with adding ips and we are ready to set
targets. This will setup a target as big as len(_ip_feature_list)
if the target is already initialized it expands so it match the
desired length
"""
self._target = np.repeat(self.GOOD_TARGET,self._ip_feature_array.shape[0])
def mark_as_bad(self, bad_ip):
"""
Searches for the ip and set its target as bad. _target has to be
initialized in advance
"""
cur_session = 0
while((bad_ip, cur_session) in self._sample_index) :
self._target[self._sample_index[(bad_ip, cur_session)]] = self.BAD_TARGET
cur_session += 1
def no_culprit(self):
"""
return True if there is no bad target is set, such a set
shouldn't go to training
"""
return sum(self._target) == self.GOOD_TARGET * len(self._target)
def precook_to_predict(self, ip_feature_db):
ip_set = TrainingSet._construct_training_set( \
ip_feature_array = np.array([]), \
target = np.array([]), \
ip_index = [], \
sample_index = {}, \
normalisation_function = self._normalisation_function, \
normalisation_data = self._normalisation_data)
for cur_ip in ip_feature_db:
ip_set.add_ip(cur_ip, ip_feature_db[cur_ip])
ip_set._ip_feature_array = ip_set._normalisation_function(test_array = ip_set._ip_feature_array)
return ip_set
def get_training_subset(self, case_selector = None, feature_selector=None):
"""
This helps to gets a subset of data to be used for training, inform of
net training set.
WARNING: feature_selector and cutting feature DOES NOT work for now
because it messes up normalisation!!!! (TODO)
INPUT:
case_selector: a list of boolean element the size of
number of rows in the training set (otherwise
raises an exception) True means consider the
record in training. None means everything
feature_selector: a list of boolean element the size of
number of columns in the training set (otherwise
raises an exception) True means consider the
feature in training. None means everything
"""
#if _ip_feature_array is empty there is nothing to do
#we may as way raise an exception
if self._ip_feature_array.size == 0:
raise ValueError, "Not able to subset an empty set"
if (case_selector == None):
case_selector = np.repeat(True, self._ip_feature_array.shape[0])
if (feature_selector == None):
feature_selector = np.repeat(True, self._ip_feature_array.shape[1])
if ((len(case_selector) != self._ip_feature_array.shape[0]) or \
(len(feature_selector) != self._ip_feature_array.shape[1])):
raise ValueError, "The dimension of subset selector does not match the dimension of the trainig set"
map(bool, list(case_selector)) #getting of rid ambiguity
map(bool, list(feature_selector))
subset_selector = (np.repeat(feature_selector, len(case_selector))*np.repeat(case_selector, len(feature_selector))).reshape(self._ip_feature_array.shape)
training_feature_subset = self._ip_feature_array[np.where(subset_selector)].reshape(sum(list(case_selector)),sum(list(feature_selector)))
subtarget = self._target[np.where(case_selector)]
#The only problem is that the indexing to ips isn't valid anymore
#We need to build an index translation table
i = 0 #fullset index
j = 0 #subset index
subset_sample_index = {}
while(i < len(self._ip_index)):
if (case_selector[i]):
subset_sample_index[self._ip_index[i]]=j
j+=1
i+=1
#This might be severely inefficient
subset_ip_index = list(np.array(self._ip_index)[np.where(case_selector)])
return TrainingSet._construct_training_set(training_feature_subset, \
subtarget, \
subset_ip_index, \
subset_sample_index, \
self._normalisation_function, \
self._normalisation_data)
|
PathoScope/PathoScope
|
refs/heads/master
|
pathoscope/pathoreport/PathoReportA.py
|
1
|
#!/usr/bin/python
# Initial Author: Solaiappan Manimaran
# PathoMap performs the alignment through wrappers for each type of aligners.
# Pathoscope - Predicts strains of genomes in Nextgen seq alignment file (sam/bl8)
# Copyright (C) 2013 Johnson Lab - Boston University and Crandall Lab George Washington University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, re, csv, sys
from pathoscope.utils import samUtils
from pathoscope.utils import seqParse
from pathoscope.utils import pathoUtilsA
from pathoscope.pathodb import dbUtils
from pathoscope.pathoreport import xmlReport
pathoscopedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0,pathoscopedir)
# ===========================================================
class PathoReportOptions:
MIN_CONTIG_LEN = 101
verbose = False
contigFlag = False
outDir = "."
samtoolsHome = None
samFile = None
mysqlConf = None
minContigLen = MIN_CONTIG_LEN
noCutOff = False
def __init__(self, samFile):
self.samFile = samFile
# Main entry function to PathoMap that does all the processing
def processPathoReport(pathoReportOptions):
h_gisPerTi = {}
h_annoT = {}
h_ti_contig = {}
if pathoReportOptions.samFile is not None:
(_, tail) = os.path.split(pathoReportOptions.samFile)
(base, _) = os.path.splitext(tail)
tsvFile = base+'.tsv'
outTsv = pathoReportOptions.outDir + os.sep + tsvFile
xmlFile = base+'.xml'
outXml = pathoReportOptions.outDir + os.sep + xmlFile
run_param_str =''
(h_refRead, h_refScore, reads, h_readSequence, h_gisPerTi, h_tiRef, U, NU, genomes, pi, initPi) = \
samUtils.findAlignmentReadPercentage(pathoReportOptions.samFile)
(bestHitFinalReads, bestHitFinal, level1Final, level2Final) = \
computeBestHit(U, NU, genomes, reads)
for j in NU:
NU[j][2] = NU[j][1]
(bestHitInitialReads, bestHitInitial, level1Initial, level2Initial) = \
computeBestHit(U, NU, genomes, reads)
header = ['Genome', 'MapQ Guess', 'MapQ Best Hit', 'MapQ Best Hit Read Numbers', \
'MapQ High Confidence Hits', 'MapQ Low Confidence Hits', 'Alignment Guess', \
'Alignment Best Hit', 'Alignment Best Hit Read Numbers', \
'Alignment High Confidence Hits', 'Alignment Low Confidence Hits']
nR = len(reads)
nG = len(h_refRead)
(_, _, _, _, _, _, _, _, _, _, _) = write_tsv_report(outTsv, nR, nG,
pi, genomes, initPi, bestHitInitial, bestHitInitialReads, bestHitFinal,
bestHitFinalReads, level1Initial, level2Initial, level1Final, level2Final, header,
pathoReportOptions.noCutOff)
if pathoReportOptions.contigFlag:
bamFile = samUtils.sam2bam(pathoReportOptions.samFile, pathoReportOptions.samtoolsHome)
#3)for each genome, get covered fragment and also make sure that each fragment has a high quality
refConsFq = samUtils.samtools_consensus(bamFile, pathoReportOptions.samtoolsHome)
#3.1) get delimiter and send a query to mysql to retrieve annotation
(h_annoT, h_ti_contig) =get_genome_annotation_in_mysql(\
refConsFq, pathoReportOptions.minContigLen,
pathoReportOptions.mysqlConf, h_annoT, h_ti_contig)
xmlReport.writePathoXML(run_param_str, outXml, h_annoT, h_ti_contig,
h_refRead, h_refScore, h_gisPerTi, h_tiRef, reads, h_readSequence,
pathoReportOptions.samFile, pathoReportOptions.mysqlConf)
else:
h_annoT = simple_genome_annotation(h_gisPerTi, pathoReportOptions.mysqlConf, h_annoT)
xmlReport.writePathoXML(run_param_str, outXml, h_annoT, h_ti_contig,
h_refRead, h_refScore, h_gisPerTi, h_tiRef, reads, h_readSequence,
pathoReportOptions.samFile, pathoReportOptions.mysqlConf)
#===============================================
'''
objective: this is a core def in retrieve_genome_annotation_from_sam. Having template(consensus fastq) ref covered by pileup reads, we want to retrieve gene annotations from mysql
'''
def get_genome_annotation_in_mysql(\
refConsFq, minContigLen, MySqlConf, h_annoT, h_ti_contig):
START,END = range(2)
SUBGI,GENE,LOCS_TAG,PROID,STBP,EDBP = range(6)
NAs = 'X'
useMysql=True
con = None
#(hostname,port,user,passwd,defaultDb)=range(5)
(_,_,_,passwd,_)=range(5)
if MySqlConf[passwd]==NAs: #then, we do not use mysql
useMysql=False
if useMysql:
con = dbUtils.init_mysql_innocentive(MySqlConf,0)
fp = open(refConsFq,'r')
#debugCnt = 0 #debug
for r in seqParse.parse(fp,'fastq'): # for each covered genome
covRange = selectConsensusContigs(r,minContigLen,-1) #disable checking seq complexity of contig
if not covRange:
continue
C = len(covRange)
#extract ti and gi
refName = r.id
mObj=re.search(r'ti\|(\d+)\|org\|([^|]+)\|gi\|(\d+)\|',r.id)
if mObj:
ti = mObj.group(1)
gi = mObj.group(3)
else:
mObj=re.search(r'ti\|(\d+)\|gi\|(\d+)\|',r.id)
if mObj and mObj.group(1)!="-1":
ti = mObj.group(1)
gi = mObj.group(2)
else:
mObj=re.search(r'gi\|(\d+)\|',r.id)
if mObj:
gi = mObj.group(1)
if not h_ti_contig.get(ti,[]):
h_ti_contig[ti]=[]
for c in range(C):
#contig = r[covRange[c][0]:covRange[c][1]+1]
contigSeq = str(r.seq[covRange[c][0]:covRange[c][1]+1])
#cqual = contig.letter_annotations["phred_quality"]
#cLen = len(cqual)
cLen = covRange[c][1]-covRange[c][0]+1
#cqual_ave = 1.*sum(cqual)/cLen
#h_ti_contig[ti].append([refName,cLen,str(contig.seq)])
h_ti_contig[ti].append([refName,cLen,contigSeq])
if con:
mysql_sel_cmd = 'select sub_gi, gene, locus_tag, protein_id, stbp, edbp from giDelimT where gi = %s' % gi
cur = con.cursor()
cur.execute(mysql_sel_cmd)
entr=cur.fetchall()
if entr:
#subgi2query=[]
#subgiAnnot=[]
#print r.id #debug
#print covRange #debug
for j in entr: #select which subgi sits within the covered genomic regions
aStbp=int(j[STBP]); aEdbp=int(j[EDBP])
A=aEdbp-aStbp+1
notCoveredA=A
minCoveredA2 = notCoveredA - 100
reportA=False
for i in range(C):
#print '[subgi%s:%d - %d][cov:%d-%d]' % (gi,aStbp,aEdbp,covRange[START][i],covRange[END][i])
notCoveredA -= pathoUtilsA.segments_intersect(aStbp,aEdbp,covRange[i][START],covRange[i][END])
if notCoveredA<minCoveredA2:
reportA=True
break
if reportA:
selCmd = 'select ref_name, product from giAnnoT where gi = %s' % j[SUBGI]
cur = con.cursor()
cur.execute(selCmd)
entr2 = cur.fetchone()
ref_name=NAs; product=NAs
if entr2:
ref_name = entr2[0]; product = entr2[1]
if h_annoT.get(ti,-1)==-1:
h_annoT[ti]=[]
h_annoT[ti].append([j[SUBGI],j[GENE],j[LOCS_TAG],j[PROID],ref_name,product])
fp.close()
if con:
dbUtils.mysql_close(con)
return h_annoT,h_ti_contig
#===============================================
'''
objective: this is a core def in retrieve_genome_annotation_from_sam. Having template(consensus fastq) ref covered by pileup reads, we want to retrieve gene annotations from mysql
'''
def simple_genome_annotation(h_gisPerTi, mySqlConf, h_annoT):
#SUBGI,GENE,LOCS_TAG,PROID,STBP,EDBP = range(6)
SUBGI,GENE,LOCS_TAG,PROID = range(4)
NAs = 'X'
useMysql=True
con = None
#(hostname,port,user,passwd,defaultDb)=range(5)
(_,_,_,passwd,_)=range(5)
if mySqlConf[passwd]==NAs: #then, we do not use mysql
useMysql=False
if useMysql:
con = dbUtils.init_mysql_innocentive(mySqlConf,0)
if con:
for ti in h_gisPerTi:
gis = h_gisPerTi[ti]
for gi in gis:
mysql_sel_cmd = 'select sub_gi, gene, locus_tag, protein_id, stbp, edbp from giDelimT where gi = %s' % gi
cur = con.cursor()
cur.execute(mysql_sel_cmd)
entr=cur.fetchall()
if entr:
for j in entr: #select which subgi sits within the covered genomic regions
selCmd = 'select ref_name, product from giAnnoT where gi = %s' % j[SUBGI]
cur = con.cursor()
cur.execute(selCmd)
entr2 = cur.fetchone()
ref_name=NAs; product=NAs
if entr2:
ref_name = entr2[0]; product = entr2[1]
if h_annoT.get(ti,-1)==-1:
h_annoT[ti]=[]
h_annoT[ti].append([j[SUBGI],j[GENE],j[LOCS_TAG],j[PROID],ref_name,product])
if con:
dbUtils.mysql_close(con)
return h_annoT
#===========================
def selectConsensusContigs(fqRec,minContigLen,kolCompxCutoff):
covRanges2=[]
covRanges=[]
rSeq = str(fqRec.seq)
#print rSeq #debug
tmp=[match.start()+1 for match in re.finditer('^[^nN]|[nN][^nN]',rSeq)]
if not tmp:
return covRanges2
if tmp[0]==1:
tmp[0]=0
covRanges.append(tmp)
tmp=[match.start() for match in re.finditer('[^nN][nN]|[^nN]$',rSeq)]
if not tmp:
return covRanges2
covRanges.append(tmp)
C = len(covRanges[0])
for c in range(C):
if (covRanges[1][c]-covRanges[0][c]+1) <= minContigLen:
continue
if kolCompxCutoff>0:
subSeq=rSeq[covRanges[0][c]:covRanges[1][c]+1]
kx = pathoUtilsA.kolmogorov(subSeq)
if kx > kolCompxCutoff:
covRanges2.append([covRanges[0][c],covRanges[1][c]])
else:
covRanges2.append([covRanges[0][c],covRanges[1][c]])
del covRanges
return covRanges2
#===========================
'''
Computes the best hit read metrics
'''
def computeBestHit(U, NU, genomes, read):
bestHitReads=[0.0 for _ in genomes]
level1Reads=[0.0 for _ in genomes]
level2Reads=[0.0 for _ in genomes]
for i in U:
bestHitReads[U[i][0]]+=1
level1Reads[U[i][0]] += 1
for j in NU:
z = NU[j]
ind = z[0]
xnorm = z[2]
bestGenome = max(xnorm)
numBestGenome = 0
for i in range(len(xnorm)):
if (xnorm[i] == bestGenome):
numBestGenome += 1
if (numBestGenome == 0):
numBestGenome = 1
for i in range(len(xnorm)):
if (xnorm[i] == bestGenome):
bestHitReads[ind[i]] += 1.0/numBestGenome
if (xnorm[i] >= 0.5):
level1Reads[ind[i]] += 1
elif (xnorm[i] >= 0.01):
level2Reads[ind[i]] += 1
nG = len(genomes)
nR = len(read)
bestHit = [bestHitReads[k]/nR for k in range(nG)]
level1 = [level1Reads[k]/nR for k in range(nG)]
level2 = [level2Reads[k]/nR for k in range(nG)]
return bestHitReads, bestHit, level1, level2
# ===========================================================
# Function to create the tsv file report
def write_tsv_report(finalReport, nR, nG, pi, genomes, initPi, bestHitInitial, bestHitInitialReads,
bestHitFinal, bestHitFinalReads, level1Initial, level2Initial, level1Final, level2Final,
header, noCutOff):
with open(finalReport, 'wb') as oFp:
tmp = zip(pi,genomes, initPi, bestHitInitial, bestHitInitialReads, bestHitFinal,
bestHitFinalReads, level1Initial, level2Initial, level1Final, level2Final)
tmp = sorted(tmp,reverse=True) # Sorting based on Final Guess
x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11 = zip(*tmp)
for i in range(len(x10)):
if (not(noCutOff) and x1[i] < 0.01 and x10[i] <= 0 and x11[i] <= 0):
break
if i == (len(x10)-1):
i += 1
tmp = zip (x2[:i], x1[:i], x6[:i], x7[:i], x10[:i], x11[:i], x3[:i], x4[:i], x5[:i], x8[:i], x9[:i]) # Changing the column order here
csv_writer = csv.writer(oFp, delimiter='\t')
header1 = ['Total Number of Aligned Reads:', nR, 'Total Number of Mapped Genomes:', nG]
csv_writer.writerow(header1)
csv_writer.writerow(header)
csv_writer.writerows(tmp)
return (x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11)
|
hmoco/osf.io
|
refs/heads/develop
|
api/base/content_negotiation.py
|
69
|
from rest_framework.negotiation import DefaultContentNegotiation
class JSONAPIContentNegotiation(DefaultContentNegotiation):
def select_renderer(self, request, renderers, format_suffix=None):
"""
Returns appropriate tuple (renderer, media type).
If 'application/json' in acceptable media types, use the first renderer in
DEFAULT_RENDERER_CLASSES which should be 'api.base.renderers.JSONAPIRenderer'.
Media_type "application/vnd.api+json". Otherwise, use default select_renderer.
"""
accepts = self.get_accept_list(request)
if 'application/json' in accepts:
return (renderers[0], renderers[0].media_type)
return super(JSONAPIContentNegotiation, self).select_renderer(request, renderers)
|
40223227/2015cdbg6w0622-40223227-
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/multiprocessing/dummy/__init__.py
|
693
|
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
jordiclariana/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/xbps.py
|
37
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 Dino Occhialini <dino.occhialini@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: xbps
short_description: Manage packages with XBPS
description:
- Manage packages with the XBPS package manager.
author:
- "Dino Occhialini (@dinoocch)"
- "Michael Aldridge (@the-maldridge)"
version_added: "2.3"
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: false
default: null
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
recurse:
description:
- When removing a package, also remove its dependencies, provided
that they are not required by other packages and were not
explicitly installed by a user.
required: false
default: no
choices: ["yes", "no"]
update_cache:
description:
- Whether or not to refresh the master package lists. This can be
run as part of a package installation or as a separate step.
required: false
default: yes
choices: ["yes", "no"]
upgrade:
description:
- Whether or not to upgrade whole system
required: false
default: no
choices: ["yes", "no"]
'''
EXAMPLES = '''
# Install package foo
- xbps: name=foo state=present
# Upgrade package foo
- xbps: name=foo state=latest update_cache=yes
# Remove packages foo and bar
- xbps: name=foo,bar state=absent
# Recursively remove package foo
- xbps: name=foo state=absent recurse=yes
# Update package cache
- xbps: update_cache=yes
# Upgrade packages
- xbps: upgrade=yes
'''
RETURN = '''
msg:
description: Message about results
returned: success
type: string
sample: "System Upgraded"
packages:
description: Packages that are affected/would be affected
type: list
sample: ["ansible"]
'''
import os
from ansible.module_utils.basic import AnsibleModule
def is_installed(xbps_output):
"""Returns package install state"""
return bool(len(xbps_output))
def query_package(module, xbps_path, name, state="present"):
"""Returns Package info"""
if state == "present":
lcmd = "%s %s" % (xbps_path['query'], name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if not is_installed(lstdout):
# package is not installed locally
return False, False
rcmd = "%s -Sun" % (xbps_path['install'])
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
if rrc == 0 or rrc == 17:
"""Return True to indicate that the package is installed locally,
and the result of the version number comparison to determine if the
package is up-to-date"""
return True, name not in rstdout
return False, False
def update_package_db(module, xbps_path):
"""Returns True if update_package_db changed"""
cmd = "%s -S" % (xbps_path['install'])
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="Could not update package db")
if "avg rate" in stdout:
return True
else:
return False
def upgrade(module, xbps_path):
"""Returns true is full upgrade succeeds"""
cmdupgrade = "%s -uy" % (xbps_path['install'])
cmdneedupgrade = "%s -un" % (xbps_path['install'])
rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False)
if rc == 0:
if(len(stdout.splitlines()) == 0):
module.exit_json(changed=False, msg='Nothing to upgrade')
else:
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded')
else:
module.fail_json(msg="Could not upgrade")
else:
module.fail_json(msg="Could not upgrade")
def remove_packages(module, xbps_path, packages):
"""Returns true if package removal succeeds"""
changed_packages = []
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated = query_package(module, xbps_path, package)
if not installed:
continue
cmd = "%s -y %s" % (xbps_path['remove'], package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
changed_packages.append(package)
if len(changed_packages) > 0:
module.exit_json(changed=True, msg="removed %s package(s)" %
len(changed_packages), packages=changed_packages)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, xbps_path, state, packages):
"""Returns true if package install succeeds."""
toInstall = []
for i, package in enumerate(packages):
"""If the package is installed and state == present or state == latest
and is up-to-date then skip"""
installed, updated = query_package(module, xbps_path, package)
if installed and (state == 'present' or
(state == 'latest' and updated)):
continue
toInstall.append(package)
if len(toInstall) == 0:
module.exit_json(changed=False, msg="Nothing to Install")
cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0 and not (state == 'latest' and rc == 17):
module.fail_json(msg="failed to install %s" % (package))
module.exit_json(changed=True, msg="installed %s package(s)"
% (len(toInstall)),
packages=toInstall)
module.exit_json(changed=False, msg="package(s) already installed",
packages=[])
def check_packages(module, xbps_path, packages, state):
"""Returns change status of command"""
would_be_changed = []
for package in packages:
installed, updated = query_package(module, xbps_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state),
packages=would_be_changed)
else:
module.exit_json(changed=False, msg="package(s) already %s" % state,
packages=[])
def main():
"""Returns, calling appropriate command"""
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, aliases=['pkg', 'package'], type='list'),
state=dict(default='present', choices=['present', 'installed',
'latest', 'absent',
'removed']),
recurse=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
upgrade=dict(default=False, type='bool'),
update_cache=dict(default=True, aliases=['update-cache'],
type='bool')
),
required_one_of=[['name', 'update_cache', 'upgrade']],
supports_check_mode=True)
xbps_path = dict()
xbps_path['install'] = module.get_bin_path('xbps-install', True)
xbps_path['query'] = module.get_bin_path('xbps-query', True)
xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
if not os.path.exists(xbps_path['install']):
module.fail_json(msg="cannot find xbps, in path %s"
% (xbps_path['install']))
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p["update_cache"] and not module.check_mode:
changed = update_package_db(module, xbps_path)
if p['name'] is None and not p['upgrade']:
if changed:
module.exit_json(changed=True,
msg='Updated the package master lists')
else:
module.exit_json(changed=False,
msg='Package list already up to date')
if (p['update_cache'] and module.check_mode and not
(p['name'] or p['upgrade'])):
module.exit_json(changed=True,
msg='Would have updated the package cache')
if p['upgrade']:
upgrade(module, xbps_path)
if p['name']:
pkgs = p['name']
if module.check_mode:
check_packages(module, xbps_path, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, xbps_path, p['state'], pkgs)
elif p['state'] == 'absent':
remove_packages(module, xbps_path, pkgs)
if __name__ == "__main__":
main()
|
GISPPU/GrenadaLandInformation
|
refs/heads/master
|
geonode/search/tests.py
|
3
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test.client import Client
from django.test import TestCase
from django.core.urlresolvers import reverse
from geonode.security.enumerations import AUTHENTICATED_USERS, ANONYMOUS_USERS
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.people.models import Profile
from geonode.search import search
from geonode.search import util
from geonode.search.populate_search_test_data import create_models
from geonode.search.query import query_from_request
from agon_ratings.models import OverallRating
import json
import logging
# quack
MockRequest = lambda **kw: type('xyz',(object,),{'REQUEST':kw,'user':None})
def all_public():
'''ensure all layers, maps and documents are publicly viewable'''
for l in Layer.objects.all():
l.set_default_permissions()
for m in Map.objects.all():
m.set_default_permissions()
for d in Document.objects.all():
d.set_default_permissions()
class searchTest(TestCase):
c = Client()
fixtures = ['initial_data.json', 'bobby']
@classmethod
def setUpClass(cls):
"""
Hook method for setting up class fixture before running tests in the class.
"""
from django.core.cache import cache
cache.clear()
searchTest('_fixture_setup')._fixture_setup(True)
create_models()
all_public()
@classmethod
def tearDownClass(cls):
"""
Hook method for deconstructing the class fixture after running all tests in the class.
"""
searchTest('_fixture_teardown')._fixture_teardown(True)
logging.getLogger('south').setLevel(logging.DEBUG)
def _fixture_setup(self, a=False):
if a:
super(searchTest, self)._fixture_setup()
def _fixture_teardown(self, a=False):
if a:
super(searchTest, self)._fixture_teardown()
def request(self, query=None, **options):
query_dict = dict(q=query) if query else {}
get_params = dict(query_dict, **options)
return self.c.get('/search/api', get_params)
def assert_results_contain_title(self, jsonvalue, title, _type=None):
matcher = (lambda doc: doc['title'] == title if _type is None else
lambda doc: doc['title'] == title and doc['_type'] == _type)
matches = filter(matcher, jsonvalue['results'])
self.assertTrue(matches, "No results match %s" % title)
def search_assert(self, response, **options):
jsonvalue = json.loads(response.content)
facets = jsonvalue['facets']
if 'layer' in facets:
self.assertEquals(facets['raster'] + facets['vector'], facets['layer'])
#import pprint; pprint.pprint(jsonvalue)
self.assertFalse(jsonvalue.get('errors'))
self.assertTrue(jsonvalue.get('success'))
contains_maptitle = options.pop('contains_maptitle', None)
if contains_maptitle:
self.assert_results_contain_title(jsonvalue, contains_maptitle, 'map')
contains_layertitle = options.pop('contains_layertitle', None)
if contains_layertitle:
self.assert_results_contain_title(jsonvalue, contains_layertitle, 'layer')
contains_username = options.pop('contains_username', None)
if contains_username:
self.assert_results_contain_title(jsonvalue, contains_username, 'user')
n_results = options.pop('n_results', None)
if n_results:
self.assertEquals(n_results, len(jsonvalue['results']))
n_total = options.pop('n_total', None)
if n_total:
self.assertEquals(n_total, jsonvalue['total'])
first_title = options.pop('first_title', None)
if first_title:
self.assertTrue(len(jsonvalue['results']) > 0, 'No results found')
doc = jsonvalue['results'][0]
self.assertEquals(first_title, doc['title'])
sorted_by = options.pop('sorted_by', None)
if sorted_by:
reversed = sorted_by[0] == '-'
sorted_by = sorted_by.replace('-','')
sorted_fields = [ jv[sorted_by] for jv in jsonvalue['results'] ]
expected = list(sorted_fields)
expected.sort(reverse = reversed)
self.assertEquals(sorted_fields, expected)
def test_limit(self):
self.search_assert(self.request(limit=1), n_results=1)
def test_query_map_title(self):
self.search_assert(self.request('unique'), contains_maptitle='map one')
def test_query_layer_title(self):
self.search_assert(self.request('uniquetitle'),
contains_layerid='uniquetitle')
def test_username(self):
self.search_assert(self.request('jblaze'), contains_username='jblaze')
def test_profile(self):
self.search_assert(self.request("some other information"),
contains_username='jblaze')
def test_text_across_types(self):
self.search_assert(self.request('foo'), n_results=8, n_total=8)
self.search_assert(self.request('common'), n_results=10, n_total=22)
def test_pagination(self):
self.search_assert(self.request('common', start=0), n_results=10, n_total=22)
self.search_assert(self.request('common', start=10), n_results=10, n_total=22)
self.search_assert(self.request('common', start=20), n_results=2, n_total=22)
def test_bbox_query(self):
# @todo since maps and users are excluded at the moment, this will have
# to be revisited
self.search_assert(self.request(extent='-180,180,-90,90', limit=None), n_results=26, n_total=26)
self.search_assert(self.request(extent='0,10,0,10', limit=None), n_results=7)
self.search_assert(self.request(extent='0,1,0,1', limit=None), n_results=2)
def test_bbox_result(self):
# grab one and set the bounds
lyr = Layer.objects.all()[0]
lyr.bbox_x0 = -100
lyr.bbox_x1 = -90
lyr.bbox_y0 = 38
lyr.bbox_y1 = 40
lyr.save()
response = json.loads(self.request(lyr.title,type='layer').content)
self.assertEquals({u'minx': u'-100', u'miny': u'38', u'maxx': u'-90', u'maxy': u'40'},
response['results'][0]['bbox'])
def test_date_query(self):
self.search_assert(self.request(period='1980-01-01T00:00:00Z,1995-01-01T00:00:00Z'),
n_results=3)
self.search_assert(self.request(period=',1995-01-01T00:00:00Z'),
n_results=7)
self.search_assert(self.request(period='1980-01-01T00:00:00Z,'),
n_results=10, n_total=22)
def test_errors(self):
self.assert_error(self.request(sort='foo'),
"valid sorting values are: ['alphaaz', 'newest', 'popularity', 'alphaza', 'none', 'rel', 'oldest']")
self.assert_error(self.request(extent='1,2,3'),
'extent filter must contain x0,x1,y0,y1 comma separated')
self.assert_error(self.request(extent='a,b,c,d'),
'extent filter must contain x0,x1,y0,y1 comma separated')
self.assert_error(self.request(start='x'),
'startIndex must be valid number')
self.assert_error(self.request(limit='x'),
'limit must be valid number')
self.assert_error(self.request(added='x'),
'valid added filter values are: today,week,month')
def assert_error(self, resp, msg):
obj = json.loads(resp.content)
self.assertTrue(obj['success'] == False)
self.assertEquals(msg, obj['errors'][0])
def test_sort(self):
self.search_assert(self.request('foo', sort='newest',type='layer'),
first_title='common blar', sorted_by='-last_modified')
self.search_assert(self.request('foo', sort='oldest',type='layer'),
first_title='common double time', sorted_by='last_modified')
self.search_assert(self.request('foo', sort='alphaaz'),
first_title='bar baz', sorted_by='title')
self.search_assert(self.request('foo', sort='alphaza'),
first_title='uniquefirst foo', sorted_by='-title')
# apply some ratings
ct = ContentType.objects.get_for_model(Layer)
for l in Layer.objects.all():
OverallRating.objects.create(content_type=ct, object_id=l.pk, rating=l.pk, category=3)
ct = ContentType.objects.get_for_model(Map)
for l in Map.objects.all():
OverallRating.objects.create(content_type=ct, object_id=l.pk, rating=l.pk, category=1)
# clear any cached ratings
from django.core.cache import cache
cache.clear()
self.search_assert(self.request('foo', sort='popularity'),
first_title='common double time', sorted_by='-rating')
def test_keywords(self):
# this tests the matching of the general query to keywords
self.search_assert(self.request('populartag'), n_results=10, n_total=26)
self.search_assert(self.request('maptagunique'), n_results=1, n_total=1)
self.search_assert(self.request('layertagunique'), n_results=1, n_total=1)
# verify little chunks must entirely match keywords
# po ma la are the prefixes to the former keywords :)
self.search_assert(self.request('po ma la'), n_results=0, n_total=0)
def test_type_query(self):
self.search_assert(self.request('common', type='map'), n_results=8, n_total=8)
self.search_assert(self.request('common', type='layer'), n_results=5, n_total=5)
self.search_assert(self.request('common', type='document'), n_results=9, n_total=9)
self.search_assert(self.request('foo', type='user'), n_results=4, n_total=4)
# there are 8 total layers, half vector, half raster
self.search_assert(self.request('', type='raster'), n_results=4, n_total=4)
self.search_assert(self.request('', type='vector'), n_results=4, n_total=4)
def test_kw_query(self):
# a kw-only query should filter out those not matching the keyword
self.search_assert(self.request('', kw='here', type='layer'), n_results=1, n_total=1)
# no matches
self.search_assert(self.request('', kw='foobar', type='layer'), n_results=0, n_total=0)
def test_exclude_query(self):
# exclude one layer
self.search_assert(self.request('', exclude='CA'), n_results=10, n_total=32)
# exclude one general word
self.search_assert(self.request('', exclude='common'), n_results=10, n_total=28)
# exclude more than one word
self.search_assert(self.request('', exclude='common,something'), n_results=10, n_total=24)
# exclude almost everything
self.search_assert(self.request('', exclude='common,something,ipsum,quux,morx,one'), n_results=10, n_total=11)
def test_category_search(self):
#search no categories
self.search_assert(self.request('', category=''), n_results=10, n_total=33)
#search, one category
self.search_assert(self.request('', category='location'), n_results=9, n_total=9)
# search two categories
self.search_assert(self.request('', category='location,biota'), n_results=10, n_total=17)
# search with all three categories
self.search_assert(self.request('', category='location,biota,elevation'), n_results=10, n_total=26)
def test_author_endpoint(self):
resp = self.c.get('/search/api/authors')
jsobj = json.loads(resp.content)
self.assertEquals(7, jsobj['total'])
def test_search_page(self):
from django.core.cache import cache
cache.clear()
resp = self.c.get(reverse('search'))
self.assertEquals(200, resp.status_code)
def test_util(self):
jdate = util.iso_str_to_jdate('-5000-01-01T12:00:00Z')
self.assertEquals(jdate, -105192)
roundtripped = util.jdate_to_approx_iso_str(jdate)
self.assertEquals(roundtripped, '-4999-01-03')
def test_security_trimming(self):
try:
self.run_security_trimming()
finally:
all_public()
def run_security_trimming(self):
# remove permissions on all jblaze layers
jblaze_layers = Layer.objects.filter(owner__username='jblaze')
hiding = jblaze_layers.count()
for l in jblaze_layers:
l.set_gen_level(ANONYMOUS_USERS, l.LEVEL_NONE)
l.set_gen_level(AUTHENTICATED_USERS, l.LEVEL_NONE)
# give user1 edit permission on these, too
user1 = User.objects.get(username='user1')
for l in jblaze_layers:
l.set_user_level(user1, Layer.LEVEL_WRITE)
# a (anonymous) layer query should exclude the number of hiding layers
self.search_assert(self.request(type='layer'), n_results=8 - hiding, n_total=8 - hiding)
# admin sees all
self.assertTrue(self.c.login(username='admin', password='admin'))
self.search_assert(self.request(type='layer'), n_results=8, n_total=8)
self.c.logout()
# a logged in jblaze will see his, too
jblaze = User.objects.get(username='jblaze')
jblaze.set_password('passwd')
jblaze.save()
self.assertTrue(self.c.login(username='jblaze', password='passwd'))
self.search_assert(self.request(type='layer'), n_results=8, n_total=8)
self.c.logout()
# a logged in user1 will these, too
user1.set_password('passwd')
user1.save()
self.assertTrue(self.c.login(username='user1', password='passwd'))
self.search_assert(self.request(type='layer'), n_results=8, n_total=8)
self.c.logout()
def test_relevance(self):
query = query_from_request(MockRequest(q='foo'), {})
def assert_rules(rules):
rank_rules = []
for model, model_rules in rules:
rank_rules.extend(search._rank_rules(model, *model_rules))
sql = search._add_relevance(query, rank_rules)
for _, model_rules in rules:
for attr, rank1, rank2 in model_rules:
self.assertTrue(('THEN %d ELSE 0' % rank1) in sql)
self.assertTrue(('THEN %d ELSE 0' % rank2) in sql)
self.assertTrue(attr in sql)
assert_rules([(Map, [('title', 10, 5), ('abstract', 5, 2)])])
assert_rules([(Layer,
[('name', 10, 1), ('title', 10, 5), ('abstract', 5, 2)])])
assert_rules([(User, [('username', 10, 5)]),
(Profile, [('organization', 5, 2)])])
|
ratnania/pyccel
|
refs/heads/master
|
doc/scripts/scripts/complex_numbers.py
|
2
|
from numpy import array
from numpy import zeros
from numpy import ones
x1 = 1+3j
x2 = complex(1,3)
x3 = [complex(1,1)]*10
x4 = [1+2j]*10
x5 = array([1+2j,2+3j])
x6 = zeros((100,100),'complex')
x7 = ones((100,100),'complex')
|
etos/django
|
refs/heads/master
|
django/db/migrations/exceptions.py
|
39
|
from django.db.utils import DatabaseError
class AmbiguityError(Exception):
"""More than one migration matches a name prefix."""
pass
class BadMigrationError(Exception):
"""There's a bad migration (unreadable/bad format/etc.)."""
pass
class CircularDependencyError(Exception):
"""There's an impossible-to-resolve circular dependency."""
pass
class InconsistentMigrationHistory(Exception):
"""An applied migration has some of its dependencies not applied."""
pass
class InvalidBasesError(ValueError):
"""A model's base classes can't be resolved."""
pass
class IrreversibleError(RuntimeError):
"""An irreversible migration is about to be reversed."""
pass
class NodeNotFoundError(LookupError):
"""An attempt on a node is made that is not available in the graph."""
def __init__(self, message, node, origin=None):
self.message = message
self.origin = origin
self.node = node
def __str__(self):
return self.message
def __repr__(self):
return "NodeNotFoundError(%r)" % (self.node, )
class MigrationSchemaMissing(DatabaseError):
pass
class InvalidMigrationPlan(ValueError):
pass
|
bobquest33/peach
|
refs/heads/master
|
peach/nn/kmeans.py
|
6
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/kmeans.py
# Clustering for use in radial basis functions
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
K-Means clustering algorithm
This sub-package implements the K-Means clustering algorithm. This algorithm,
given a set of points, finds a set of vectors that best represents a partition
for these points. These vectors represent the center of a cloud of points that
are nearest to them.
This algorithm is one that can be used with radial basis function (RBF) networks
to find the centers of the RBFs. Usually, training a RBFN in two passes -- first
positioning them, and then computing their variance.
"""
################################################################################
from numpy import sum, argmin, array, mean, reshape
from numpy.random import standard_normal
################################################################################
# Functions
################################################################################
################################################################################
# Classifiers
# These functions classify a set of points associating them to centers according
# to a given metric. To create a classifier, the first parameter must be the set
# of points, and the second parameter must be the list of centers. No other
# parameters are needed.
def ClassByDistance(xs, c):
'''
Given a set of points and a list of centers, classify the points according
to their euclidian distance to the centers.
:Parameters:
xs
Set of points to be classified. They must be given as a list or array of
one-dimensional vectors, one per line.
c
Set of centers. Must also be given as a lista or array of
one-dimensional vectors, one per line.
:Returns:
A list of index of the classification. The indices are the position of the
cluster in the given parameters ``c``.
'''
res = [ ]
for x in xs:
dists = sum((x - c)**2, axis=1)
res.append(argmin(dists))
return res
################################################################################
# Clusterers
# These functions compute, from a set of points, a single vector that represents
# the cluster. To create a clusterer, the function needs only one parameter, the
# set of points to be clustered. This is given in form of a list. The function
# must return a single vector representing the cluster.
def ClusterByMean(x):
'''
This function computes the center of a cluster by averaging the vectors in
the input set by simply averaging each component.
:Parameters:
x
Set of points to be clustered. They must be given in the form of a list
or array of one-dimensional points.
:Returns:
A one-dimensional array representing the center of the cluster.
'''
return mean(x, axis=0)
################################################################################
# Classes
################################################################################
class KMeans(object):
'''
K-Means clustering algorithm
This class implements the known and very used K-Means clustering algorithm.
In this algorithm, the centers of the clusters are selected randomly. The
points on the training set are classified in accord to their closeness to
the cluster centers. This changes the positions of the centers, which
changes the classification of the points. This iteration is repeated until
no changes occur.
Traditional K-Means implementations classify the points in the training set
according to the euclidian distance to the centers, and centers are computed
as the average of the points associated to it. This is the default behaviour
of this implementation, but it is configurable. Please, read below for more
detail.
'''
def __init__(self, training_set, nclusters, classifier=ClassByDistance,
clusterer=ClusterByMean):
'''
Initializes the algorithm.
:Parameters:
training_set
A list or array of vectors containing the data to be classified.
Each of the vectors in this list *must* have the same dimension, or
the algorithm won't behave correctly. Notice that each vector can be
given as a tuple -- internally, everything is converted to arrays.
nclusters
The number of clusters to be found. This must be, of course, bigger
than 1. These represent the number of centers found once the
algorithm terminates.
classifier
A function that classifies each of the points in the training set.
This function receives the training set and a list of centers, and
classify each of the points according to the given metric. Please,
look at the documentation on these functions for more information.
Its default value is ``ClassByDistance` , which uses euclidian
distance as metric.
clusterer
A function that computes the center of the cluster, given a set of
points. This function receives a list of points and returns the
vector representing the cluster. For more information, look at the
documentation for these functions. Its default value is
``ClusterByMean``, in which the cluster is represented by the mean
value of the vectors.
'''
self.__nclusters = nclusters
self.__x = array(training_set)
self.__c = standard_normal((nclusters, len(self.__x[0])))
self.classify = classifier
self.cluster = clusterer
self.__xc = self.classify(self.__x, self.__c)
def __getc(self):
return self.__c
def __setc(self, c):
self.__c = array(reshape(c, self.__c.shape))
c = property(__getc, __setc)
'''A ``numpy`` array containing the centers of the classes in the algorithm.
Each line represents a center, and the number of lines is the number of
classes. This property is read and write, but care must be taken when
setting new centers: if the dimensions are not exactly the same as given in
the instantiation of the class (*ie*, *C* centers of dimension *N*, an
exception will be raised.'''
def step(self):
'''
This method runs one step of the algorithm. It might be useful to track
the changes in the parameters.
:Returns:
The computed centers for this iteration.
'''
x = self.__x
c = self.__c
xc = self.classify(x, c)
self.__xc = xc
cnew = [ ]
for i in range(self.__nclusters):
xi = [ xij for xij, clij in zip(x, xc) if clij == i ]
if xi:
cnew.append(self.cluster(array(xi)))
else:
cnew.append(standard_normal(c[i,:].shape))
return array(cnew)
def __call__(self, imax=20):
'''
The ``__call__`` interface is used to run the algorithm until
convergence is found.
:Parameters:
imax
Specifies the maximum number of iterations admitted in the execution
of the algorithm. It defaults to 20.
:Returns:
An array containing, at each line, the vectors representing the
centers of the clustered regions.
'''
i = 0
xc = [ ]
while i < imax and xc != self.__xc:
xc = self.__xc
self.__c = self.step()
i = i + 1
return self.__c
if __name__ == "__main__":
from random import shuffle
from basic import *
xs = [ ]
for i in range(7):
xs.append(array([ -1., -1. ] + 0.1*standard_normal((2,))))
for i in range(7):
xs.append(array([ 1., -1. ] + 0.1*standard_normal((2,))))
for i in range(7):
xs.append(array([ 0., 1. ] + 0.1*standard_normal((2,))))
#shuffle(xs)
k = KMeans(xs, 3)
c = k()
print c
xc = k.classify(xs, c)
for xx, xxc in zip(xs, xc):
print xx, xxc, c[xxc,:]
xs = array(xs)
a1 = start_square()
a1.hold(True)
a1.grid(True)
for xx in xs:
a1.scatter(xx[0], xx[1], c='black', marker='x')
a1.scatter(c[:,0], c[:,1], c='red', marker='o')
savefig('kmeans.png')
|
kaksmet/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/echo-query_v13_wsh.py
|
266
|
#!/usr/bin/python
from mod_pywebsocket import msgutil, util
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
while True:
msgutil.send_message(request, request.unparsed_uri.split('?')[1] or '')
return
|
mropert/conan
|
refs/heads/develop
|
conans/client/command_profile_args.py
|
1
|
from conans.model.profile import Profile
from conans.errors import ConanException
from collections import defaultdict, OrderedDict
from conans.model.env_info import EnvValues
from conans.model.options import OptionsValues
from conans.model.scope import Scopes
def profile_from_args(args, cwd, default_folder):
""" Return a Profile object, as the result of merging a potentially existing Profile
file and the args command-line arguments
"""
file_profile = Profile.read_file(args.profile, cwd, default_folder)
args_profile = _profile_parse_args(args.settings, args.options, args.env, args.scope)
if file_profile:
file_profile.update(args_profile)
return file_profile
else:
return args_profile
def _profile_parse_args(settings, options, envs, scopes):
""" return a Profile object result of parsing raw data
"""
def _get_tuples_list_from_extender_arg(items):
if not items:
return []
# Validate the pairs
for item in items:
chunks = item.split("=")
if len(chunks) != 2:
raise ConanException("Invalid input '%s', use 'name=value'" % item)
return [(item[0], item[1]) for item in [item.split("=") for item in items]]
def _get_simple_and_package_tuples(items):
"""Parse items like "thing:item=value or item2=value2 and returns a tuple list for
the simple items (name, value) and a dict for the package items
{package: [(item, value)...)], ...}
"""
simple_items = []
package_items = defaultdict(list)
tuples = _get_tuples_list_from_extender_arg(items)
for name, value in tuples:
if ":" in name: # Scoped items
tmp = name.split(":", 1)
ref_name = tmp[0]
name = tmp[1]
package_items[ref_name].append((name, value))
else:
simple_items.append((name, value))
return simple_items, package_items
def _get_env_values(env, package_env):
env_values = EnvValues()
for name, value in env:
env_values.add(name, EnvValues.load_value(value))
for package, data in package_env.items():
for name, value in data:
env_values.add(name, EnvValues.load_value(value), package)
return env_values
result = Profile()
options = _get_tuples_list_from_extender_arg(options)
result.options = OptionsValues(options)
env, package_env = _get_simple_and_package_tuples(envs)
env_values = _get_env_values(env, package_env)
result.env_values = env_values
settings, package_settings = _get_simple_and_package_tuples(settings)
result.settings = OrderedDict(settings)
for pkg, values in package_settings.items():
result.package_settings[pkg] = OrderedDict(values)
result.scopes = Scopes.from_list(scopes) if scopes else None
return result
|
cnoviello/micropython
|
refs/heads/master
|
tests/basics/class_getattr.py
|
100
|
# test that __getattr__, __getattrribute__ and instance members don't override builtins
class C:
def __init__(self):
self.__add__ = lambda: print('member __add__')
def __add__(self, x):
print('__add__')
def __getattr__(self, attr):
print('__getattr__', attr)
return None
def __getattrribute__(self, attr):
print('__getattrribute__', attr)
return None
c = C()
c.__add__
c + 1 # should call __add__
|
BenTheElder/test-infra
|
refs/heads/master
|
gubernator/gcs_async_test.py
|
20
|
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import urlparse
import cloudstorage as gcs
import webtest
import gcs_async
app = webtest.TestApp(None)
def write(path, data):
if not isinstance(data, basestring):
data = json.dumps(data)
with gcs.open(path, 'w') as f:
f.write(data)
def install_handler_dispatcher(stub, matches, dispatch):
def fetch_stub(url, payload, method, headers, request, response,
follow_redirects=False, deadline=None,
validate_certificate=None):
# pylint: disable=too-many-arguments,unused-argument
result, code = dispatch(method, url, payload, headers)
response.set_statuscode(code)
response.set_content(result)
header = response.add_header()
header.set_key('content-length')
header.set_value(str(len(result)))
# this is gross, but there doesn't appear to be a better way
# pylint: disable=protected-access
stub._urlmatchers_to_fetch_functions.append((matches, fetch_stub))
def install_handler(stub, structure, base='pr-logs/pull/'):
"""
Add a stub to mock out GCS JSON API ListObject requests-- with
just enough detail for our code.
This is based on google.appengine.ext.cloudstorage.stub_dispatcher.
Args:
stub: a URLFetch stub, to register our new handler against.
structure: a dictionary of {paths: subdirectory names}.
This will be transformed into the (more verbose) form
that the ListObject API returns.
"""
prefixes_for_paths = {}
for path, subdirs in structure.iteritems():
path = base + path
prefixes_for_paths[path] = ['%s%s/' % (path, d) for d in subdirs]
def matches(url):
return url.startswith(gcs_async.STORAGE_API_URL)
def dispatch(method, url, _payload, _headers):
if method != 'GET':
raise ValueError('unhandled method %s' % method)
parsed = urlparse.urlparse(url)
param_dict = urlparse.parse_qs(parsed.query, True)
prefix = param_dict['prefix'][0]
return json.dumps({'prefixes': prefixes_for_paths[prefix]}), 200
install_handler_dispatcher(stub, matches, dispatch)
class GCSAsyncTest(unittest.TestCase):
def setUp(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_app_identity_stub()
# redirect GCS calls to the local proxy
gcs_async.GCS_API_URL = gcs.common.local_api_url()
def test_read(self):
write('/foo/bar', 'test data')
self.assertEqual(gcs_async.read('/foo/bar').get_result(), 'test data')
self.assertEqual(gcs_async.read('/foo/quux').get_result(), None)
def test_listdirs(self):
install_handler(self.testbed.get_stub('urlfetch'),
{'foo/': ['bar', 'baz']}, base='base/')
self.assertEqual(gcs_async.listdirs('buck/base/foo/').get_result(),
['buck/base/foo/bar/', 'buck/base/foo/baz/'])
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/doc/core/howto/tutorial/listings/finger/fingerPBclient.py
|
22
|
# test the PB finger on port 8889
# this code is essentially the same as
# the first example in howto/pb-usage
from twisted.spread import pb
from twisted.internet import reactor
def gotObject(object):
print "got object:", object
object.callRemote("getUser","moshez").addCallback(gotData)
# or
# object.callRemote("getUsers").addCallback(gotData)
def gotData(data):
print 'server sent:', data
reactor.stop()
def gotNoObject(reason):
print "no object:",reason
reactor.stop()
factory = pb.PBClientFactory()
reactor.connectTCP("127.0.0.1",8889, factory)
factory.getRootObject().addCallbacks(gotObject,gotNoObject)
reactor.run()
|
micjerry/groupservice
|
refs/heads/master
|
handlers/removemember.py
|
1
|
import tornado.web
import tornado.gen
import json
import io
import logging
import motor
from bson.objectid import ObjectId
from mickey.basehandler import BaseHandler
import mickey.tp
import mickey.maps
import mickey.userfetcher
class RemoveMemberHandler(BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
coll = self.application.db.groups
usercoll = self.application.userdb.users
publish = self.application.publish
data = json.loads(self.request.body.decode("utf-8"))
groupid = data.get("groupid", "")
userid = data.get("userid", "")
logging.info("begin to remove member %s from group %s" % (userid, groupid))
if not userid or not groupid:
logging.error("invalid request")
self.set_status(403)
self.finish()
return
group = yield coll.find_one({"_id":ObjectId(groupid)})
groupname = ""
receivers = []
if group:
groupname = group.get("name", "")
owner = group.get("owner", "")
if self.p_userid != owner and self.p_userid != userid:
logging.error("no right")
self.set_status(403)
self.finish()
return
# the owner can not quit
if self.p_userid == owner and self.p_userid == userid:
logging.error("the owner cannot quit, you can dismiss")
self.set_status(403)
self.finish()
return
receivers = [x.get("id", "") for x in group.get("members", "")]
else:
self.set_status(404)
self.finish()
return
result = yield coll.find_and_modify({"_id":ObjectId(groupid)},
{
"$pull":{"members":{"id":userid}},
"$unset": {"garbage": 1}
})
# remove group from the user's group list
yield usercoll.find_and_modify({"id":userid},
{
"$pull":{"groups":{"id":groupid}},
"$pull":{"realgroups":groupid},
"$unset": {"garbage": 1}
})
#remove users from conference
mickey.userfetcher.remove_users_from_conf(groupid, [userid])
if result:
self.set_status(200)
mickey.tp.removegroupmember(groupid, userid, "")
#send notify to deleted user
notify = {}
notify["name"] = "mx.group.group_kick"
notify["pub_type"] = "any"
notify["nty_type"] = "app"
notify["groupid"] = groupid
notify["groupname"] = groupname
notify["userid"] = userid
if self.p_userid == userid:
notify["quit"] = "true"
if userid in receivers:
receivers.remove(userid)
else:
notify["quit"] = "false"
publish.publish_multi(receivers, notify)
#notify self
if self.p_userid == userid:
self_notify = {
"name":"mx.group.self_group_quit",
"groupid":groupid,
"pub_type":"any",
"nty_type":"app"
}
publish.publish_one(userid, self_notify)
#remove maps
mickey.maps.removemembers(groupid, [userid])
else:
logging.error("remove member failed groupid = %s, member = %s" % (groupid, userid))
self.set_status(500)
self.finish()
|
draugiskisprendimai/odoo
|
refs/heads/8.0
|
addons/point_of_sale/__init__.py
|
378
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_bank_statement
import controllers
import point_of_sale
import report
import res_users
import res_partner
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
helloTC/ATT
|
refs/heads/master
|
algorithm/__init__.py
|
2
|
__all__ = ['tools', 'surf_tools', 'vol_tools']
|
motion2015/a3
|
refs/heads/a3
|
common/lib/xmodule/xmodule/modulestore/mongo/base.py
|
7
|
"""
Modulestore backed by Mongodb.
Stores individual XModules as single documents with the following
structure:
{
'_id': <location.as_dict>,
'metadata': <dict containing all Scope.settings fields>
'definition': <dict containing all Scope.content fields>
'definition.children': <list of all child location.to_deprecated_string()s>
}
"""
import pymongo
import sys
import logging
import copy
import re
from uuid import uuid4
from bson.son import SON
from datetime import datetime
from fs.osfs import OSFS
from mongodb_proxy import MongoProxy, autoretry_read
from path import path
from pytz import UTC
from contracts import contract, new_contract
from importlib import import_module
from opaque_keys.edx.keys import UsageKey, CourseKey, AssetKey
from opaque_keys.edx.locations import Location, BlockUsageLocator
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator, LibraryLocator
from xblock.core import XBlock
from xblock.exceptions import InvalidScopeError
from xblock.fields import Scope, ScopeIds, Reference, ReferenceList, ReferenceValueDict
from xblock.runtime import KvsFieldData
from xmodule.assetstore import AssetMetadata, CourseAssetsFromStorage
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import null_error_tracker, exc_info_to_str
from xmodule.exceptions import HeartbeatFailure
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.modulestore import ModuleStoreWriteBase, ModuleStoreEnum, BulkOperationsMixin, BulkOpsRecord
from xmodule.modulestore.draft_and_published import ModuleStoreDraftAndPublished, DIRECT_ONLY_CATEGORIES
from xmodule.modulestore.edit_info import EditInfoRuntimeMixin
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError, ReferentialIntegrityError
from xmodule.modulestore.inheritance import InheritanceMixin, inherit_metadata, InheritanceKeyValueStore
from xmodule.modulestore.xml import CourseLocationManager
log = logging.getLogger(__name__)
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('long', long)
new_contract('BlockUsageLocator', BlockUsageLocator)
# sort order that returns DRAFT items first
SORT_REVISION_FAVOR_DRAFT = ('_id.revision', pymongo.DESCENDING)
# sort order that returns PUBLISHED items first
SORT_REVISION_FAVOR_PUBLISHED = ('_id.revision', pymongo.ASCENDING)
BLOCK_TYPES_WITH_CHILDREN = list(set(
name for name, class_ in XBlock.load_classes() if getattr(class_, 'has_children', False)
))
# Allow us to call _from_deprecated_(son|string) throughout the file
# pylint: disable=protected-access
# at module level, cache one instance of OSFS per filesystem root.
_OSFS_INSTANCE = {}
_DETACHED_CATEGORIES = [name for name, __ in XBlock.load_tagged_classes("detached")]
class MongoRevisionKey(object):
"""
Key Revision constants to use for Location and Usage Keys in the Mongo modulestore
Note: These values are persisted in the database, so should not be changed without migrations
"""
draft = 'draft'
published = None
class InvalidWriteError(Exception):
"""
Raised to indicate that writing to a particular key
in the KeyValueStore is disabled
"""
pass
class MongoKeyValueStore(InheritanceKeyValueStore):
"""
A KeyValueStore that maps keyed data access to one of the 3 data areas
known to the MongoModuleStore (data, children, and metadata)
"""
def __init__(self, data, parent, children, metadata):
super(MongoKeyValueStore, self).__init__()
if not isinstance(data, dict):
self._data = {'data': data}
else:
self._data = data
self._parent = parent
self._children = children
self._metadata = metadata
def get(self, key):
if key.scope == Scope.children:
return self._children
elif key.scope == Scope.parent:
return self._parent
elif key.scope == Scope.settings:
return self._metadata[key.field_name]
elif key.scope == Scope.content:
return self._data[key.field_name]
else:
raise InvalidScopeError(key)
def set(self, key, value):
if key.scope == Scope.children:
self._children = value
elif key.scope == Scope.settings:
self._metadata[key.field_name] = value
elif key.scope == Scope.content:
self._data[key.field_name] = value
else:
raise InvalidScopeError(key)
def delete(self, key):
if key.scope == Scope.children:
self._children = []
elif key.scope == Scope.settings:
if key.field_name in self._metadata:
del self._metadata[key.field_name]
elif key.scope == Scope.content:
if key.field_name in self._data:
del self._data[key.field_name]
else:
raise InvalidScopeError(key)
def has(self, key):
if key.scope in (Scope.children, Scope.parent):
return True
elif key.scope == Scope.settings:
return key.field_name in self._metadata
elif key.scope == Scope.content:
return key.field_name in self._data
else:
return False
def __repr__(self):
return "MongoKeyValueStore{!r}<{!r}, {!r}>".format(
(self._data, self._parent, self._children, self._metadata),
self._fields,
self.inherited_settings
)
class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
"""
A system that has a cache of module json that it will use to load modules
from, with a backup of calling to the underlying modulestore for more data
"""
def __repr__(self):
return "CachingDescriptorSystem{!r}".format((
self.modulestore,
unicode(self.course_id),
[unicode(key) for key in self.module_data.keys()],
self.default_class,
[unicode(key) for key in self.cached_metadata.keys()],
))
def __init__(self, modulestore, course_key, module_data, default_class, cached_metadata, **kwargs):
"""
modulestore: the module store that can be used to retrieve additional modules
course_key: the course for which everything in this runtime will be relative
module_data: a dict mapping Location -> json that was cached from the
underlying modulestore
default_class: The default_class to use when loading an
XModuleDescriptor from the module_data
cached_metadata: the cache for handling inheritance computation. internal use only
resources_fs: a filesystem, as per MakoDescriptorSystem
error_tracker: a function that logs errors for later display to users
render_template: a function for rendering templates, as per
MakoDescriptorSystem
"""
id_manager = CourseLocationManager(course_key)
kwargs.setdefault('id_reader', id_manager)
kwargs.setdefault('id_generator', id_manager)
super(CachingDescriptorSystem, self).__init__(
field_data=None,
load_item=self.load_item,
**kwargs
)
self.modulestore = modulestore
self.module_data = module_data
self.default_class = default_class
# cdodge: other Systems have a course_id attribute defined. To keep things consistent, let's
# define an attribute here as well, even though it's None
self.course_id = course_key
self.cached_metadata = cached_metadata
def load_item(self, location):
"""
Return an XModule instance for the specified location
"""
assert isinstance(location, UsageKey)
json_data = self.module_data.get(location)
if json_data is None:
module = self.modulestore.get_item(location, using_descriptor_system=self)
return module
else:
# load the module and apply the inherited metadata
try:
category = json_data['location']['category']
class_ = self.load_block_type(category)
definition = json_data.get('definition', {})
metadata = json_data.get('metadata', {})
for old_name, new_name in getattr(class_, 'metadata_translations', {}).items():
if old_name in metadata:
metadata[new_name] = metadata[old_name]
del metadata[old_name]
children = [
self._convert_reference_to_key(childloc)
for childloc in definition.get('children', [])
]
parent = None
if self.cached_metadata is not None:
# fish the parent out of here if it's available
parent_url = self.cached_metadata.get(unicode(location), {}).get('parent', {}).get(
ModuleStoreEnum.Branch.published_only if location.revision is None
else ModuleStoreEnum.Branch.draft_preferred
)
if parent_url:
parent = BlockUsageLocator.from_string(parent_url)
if not parent and category != 'course':
# try looking it up just-in-time (but not if we're working with a root node (course).
parent = self.modulestore.get_parent_location(
as_published(location),
ModuleStoreEnum.RevisionOption.published_only if location.revision is None
else ModuleStoreEnum.RevisionOption.draft_preferred
)
data = definition.get('data', {})
if isinstance(data, basestring):
data = {'data': data}
mixed_class = self.mixologist.mix(class_)
if data: # empty or None means no work
data = self._convert_reference_fields_to_keys(mixed_class, location.course_key, data)
metadata = self._convert_reference_fields_to_keys(mixed_class, location.course_key, metadata)
kvs = MongoKeyValueStore(
data,
parent,
children,
metadata,
)
field_data = KvsFieldData(kvs)
scope_ids = ScopeIds(None, category, location, location)
module = self.construct_xblock_from_class(class_, scope_ids, field_data)
if self.cached_metadata is not None:
# parent container pointers don't differentiate between draft and non-draft
# so when we do the lookup, we should do so with a non-draft location
non_draft_loc = as_published(location)
# Convert the serialized fields values in self.cached_metadata
# to python values
metadata_to_inherit = self.cached_metadata.get(unicode(non_draft_loc), {})
inherit_metadata(module, metadata_to_inherit)
module._edit_info = json_data.get('edit_info')
# migrate published_by and published_on if edit_info isn't present
if module._edit_info is None:
module._edit_info = {}
raw_metadata = json_data.get('metadata', {})
# published_on was previously stored as a list of time components instead of a datetime
if raw_metadata.get('published_date'):
module._edit_info['published_date'] = datetime(
*raw_metadata.get('published_date')[0:6]
).replace(tzinfo=UTC)
module._edit_info['published_by'] = raw_metadata.get('published_by')
# decache any computed pending field settings
module.save()
return module
except Exception: # pylint: disable=broad-except
log.warning("Failed to load descriptor from %s", json_data, exc_info=True)
return ErrorDescriptor.from_json(
json_data,
self,
location,
error_msg=exc_info_to_str(sys.exc_info())
)
def _convert_reference_to_key(self, ref_string):
"""
Convert a single serialized UsageKey string in a ReferenceField into a UsageKey.
"""
key = Location.from_string(ref_string)
return key.replace(run=self.modulestore.fill_in_run(key.course_key).run)
def __setattr__(self, name, value):
return super(CachingDescriptorSystem, self).__setattr__(name, value)
def _convert_reference_fields_to_keys(self, class_, course_key, jsonfields):
"""
Find all fields of type reference and convert the payload into UsageKeys
:param class_: the XBlock class
:param course_key: a CourseKey object for the given course
:param jsonfields: a dict of the jsonified version of the fields
"""
result = {}
for field_name, value in jsonfields.iteritems():
field = class_.fields.get(field_name)
if field is None:
continue
elif value is None:
result[field_name] = value
elif isinstance(field, Reference):
result[field_name] = self._convert_reference_to_key(value)
elif isinstance(field, ReferenceList):
result[field_name] = [
self._convert_reference_to_key(ele) for ele in value
]
elif isinstance(field, ReferenceValueDict):
result[field_name] = {
key: self._convert_reference_to_key(subvalue) for key, subvalue in value.iteritems()
}
else:
result[field_name] = value
return result
def lookup_item(self, location):
"""
Returns the JSON payload of the xblock at location.
"""
try:
json = self.module_data[location]
except KeyError:
json = self.modulestore._find_one(location)
self.module_data[location] = json
return json
def get_edited_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('edited_by')
def get_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('edited_on')
def get_subtree_edited_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('subtree_edited_by')
def get_subtree_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('subtree_edited_on')
def get_published_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('published_by')
def get_published_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('published_date')
def applicable_aside_types(self, block):
# "old" mongo does support asides yet
return []
new_contract('CachingDescriptorSystem', CachingDescriptorSystem)
# The only thing using this w/ wildcards is contentstore.mongo for asset retrieval
def location_to_query(location, wildcard=True, tag='i4x'):
"""
Takes a Location and returns a SON object that will query for that location by subfields
rather than subdoc.
Fields in location that are None are ignored in the query.
If `wildcard` is True, then a None in a location is treated as a wildcard
query. Otherwise, it is searched for literally
"""
query = location.to_deprecated_son(prefix='_id.', tag=tag)
if wildcard:
for key, value in query.items():
# don't allow wildcards on revision, since public is set as None, so
# its ambiguous between None as a real value versus None=wildcard
if value is None and key != '_id.revision':
del query[key]
return query
def as_draft(location):
"""
Returns the Location that is the draft for `location`
If the location is in the DIRECT_ONLY_CATEGORIES, returns itself
"""
if location.category in DIRECT_ONLY_CATEGORIES:
return location
return location.replace(revision=MongoRevisionKey.draft)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return location.replace(revision=MongoRevisionKey.published)
class MongoBulkOpsRecord(BulkOpsRecord):
"""
Tracks whether there've been any writes per course and disables inheritance generation
"""
def __init__(self):
super(MongoBulkOpsRecord, self).__init__()
self.dirty = False
class MongoBulkOpsMixin(BulkOperationsMixin):
"""
Mongo bulk operation support
"""
_bulk_ops_record_type = MongoBulkOpsRecord
def _start_outermost_bulk_operation(self, bulk_ops_record, course_key):
"""
Prevent updating the meta-data inheritance cache for the given course
"""
# ensure it starts clean
bulk_ops_record.dirty = False
def _end_outermost_bulk_operation(self, bulk_ops_record, course_id, emit_signals=True):
"""
Restart updating the meta-data inheritance cache for the given course.
Refresh the meta-data inheritance cache now since it was temporarily disabled.
"""
if bulk_ops_record.dirty:
self.refresh_cached_metadata_inheritance_tree(course_id)
if emit_signals:
self.send_bulk_published_signal(bulk_ops_record, course_id)
bulk_ops_record.dirty = False # brand spanking clean now
def _is_in_bulk_operation(self, course_id, ignore_case=False):
"""
Returns whether a bulk operation is in progress for the given course.
"""
return super(MongoBulkOpsMixin, self)._is_in_bulk_operation(
course_id.for_branch(None), ignore_case
)
class ParentLocationCache(dict):
"""
Dict-based object augmented with a more cache-like interface, for internal use.
"""
# pylint: disable=missing-docstring
@contract(key=unicode)
def has(self, key):
return key in self
@contract(key=unicode, value="BlockUsageLocator | None")
def set(self, key, value):
self[key] = value
@contract(value="BlockUsageLocator")
def delete_by_value(self, value):
keys_to_delete = [k for k, v in self.iteritems() if v == value]
for key in keys_to_delete:
del self[key]
class MongoModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase, MongoBulkOpsMixin):
"""
A Mongodb backed ModuleStore
"""
# If no name is specified for the asset metadata collection, this name is used.
DEFAULT_ASSET_COLLECTION_NAME = 'assetstore'
# TODO (cpennington): Enable non-filesystem filestores
# pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
retry_wait_time=0.1,
**kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(MongoModuleStore, self).__init__(contentstore=contentstore, **kwargs)
def do_connection(
db, collection, host, port=27017, tz_aware=True, user=None, password=None, asset_collection=None, **kwargs
):
"""
Create & open the connection, authenticate, and provide pointers to the collection
"""
self.database = MongoProxy(
pymongo.database.Database(
pymongo.MongoClient(
host=host,
port=port,
tz_aware=tz_aware,
document_class=dict,
**kwargs
),
db
),
wait_time=retry_wait_time
)
self.collection = self.database[collection]
# Collection which stores asset metadata.
if asset_collection is None:
asset_collection = self.DEFAULT_ASSET_COLLECTION_NAME
self.asset_collection = self.database[asset_collection]
if user is not None and password is not None:
self.database.authenticate(user, password)
do_connection(**doc_store_config)
# Force mongo to report errors, at the expense of performance
self.collection.write_concern = {'w': 1}
if default_class is not None:
module_path, _, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.i18n_service = i18n_service
self.fs_service = fs_service
self.user_service = user_service
self._course_run_cache = {}
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying database
"""
self.collection.database.connection.close()
def mongo_wire_version(self):
"""
Returns the wire version for mongo. Only used to unit tests which instrument the connection.
"""
self.database.connection._ensure_connected()
return self.database.connection.max_wire_version
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
# drop the assets
super(MongoModuleStore, self)._drop_database()
connection = self.collection.database.connection
connection.drop_database(self.collection.database.proxied_object)
connection.close()
@autoretry_read()
def fill_in_run(self, course_key):
"""
In mongo some course_keys are used without runs. This helper function returns
a course_key with the run filled in, if the course does actually exist.
"""
if course_key.run is not None:
return course_key
cache_key = (course_key.org, course_key.course)
if cache_key not in self._course_run_cache:
matching_courses = list(self.collection.find(SON([
('_id.tag', 'i4x'),
('_id.org', course_key.org),
('_id.course', course_key.course),
('_id.category', 'course'),
])).limit(1))
if not matching_courses:
return course_key
self._course_run_cache[cache_key] = matching_courses[0]['_id']['name']
return course_key.replace(run=self._course_run_cache[cache_key])
def for_branch_setting(self, location):
"""
Returns the Location that is for the current branch setting.
"""
if location.category in DIRECT_ONLY_CATEGORIES:
return location.replace(revision=MongoRevisionKey.published)
if self.get_branch_setting() == ModuleStoreEnum.Branch.draft_preferred:
return location.replace(revision=MongoRevisionKey.draft)
return location.replace(revision=MongoRevisionKey.published)
def _get_parent_cache(self, branch):
"""
Provides a reference to one of the two branch-specific
ParentLocationCaches associated with the current request (if any).
"""
if self.request_cache is not None:
return self.request_cache.data.setdefault('parent-location-{}'.format(branch), ParentLocationCache())
else:
return ParentLocationCache()
def _compute_metadata_inheritance_tree(self, course_id):
'''
Find all inheritable fields from all xblocks in the course which may define inheritable data
'''
# get all collections in the course, this query should not return any leaf nodes
course_id = self.fill_in_run(course_id)
query = SON([
('_id.tag', 'i4x'),
('_id.org', course_id.org),
('_id.course', course_id.course),
('_id.category', {'$in': BLOCK_TYPES_WITH_CHILDREN})
])
# if we're only dealing in the published branch, then only get published containers
if self.get_branch_setting() == ModuleStoreEnum.Branch.published_only:
query['_id.revision'] = None
# we just want the Location, children, and inheritable metadata
record_filter = {'_id': 1, 'definition.children': 1}
# just get the inheritable metadata since that is all we need for the computation
# this minimizes both data pushed over the wire
for field_name in InheritanceMixin.fields:
record_filter['metadata.{0}'.format(field_name)] = 1
# call out to the DB
resultset = self.collection.find(query, record_filter)
# it's ok to keep these as deprecated strings b/c the overall cache is indexed by course_key and this
# is a dictionary relative to that course
results_by_url = {}
root = None
# now go through the results and order them by the location url
for result in resultset:
# manually pick it apart b/c the db has tag and we want as_published revision regardless
location = as_published(Location._from_deprecated_son(result['_id'], course_id.run))
location_url = unicode(location)
if location_url in results_by_url:
# found either draft or live to complement the other revision
# FIXME this is wrong. If the child was moved in draft from one parent to the other, it will
# show up under both in this logic: https://openedx.atlassian.net/browse/TNL-1075
existing_children = results_by_url[location_url].get('definition', {}).get('children', [])
additional_children = result.get('definition', {}).get('children', [])
total_children = existing_children + additional_children
# use set to get rid of duplicates. We don't care about order; so, it shouldn't matter.
results_by_url[location_url].setdefault('definition', {})['children'] = set(total_children)
else:
results_by_url[location_url] = result
if location.category == 'course':
root = location_url
# now traverse the tree and compute down the inherited metadata
metadata_to_inherit = {}
def _compute_inherited_metadata(url):
"""
Helper method for computing inherited metadata for a specific location url
"""
my_metadata = results_by_url[url].get('metadata', {})
# go through all the children and recurse, but only if we have
# in the result set. Remember results will not contain leaf nodes
for child in results_by_url[url].get('definition', {}).get('children', []):
if child in results_by_url:
new_child_metadata = copy.deepcopy(my_metadata)
new_child_metadata.update(results_by_url[child].get('metadata', {}))
results_by_url[child]['metadata'] = new_child_metadata
metadata_to_inherit[child] = new_child_metadata
_compute_inherited_metadata(child)
else:
# this is likely a leaf node, so let's record what metadata we need to inherit
metadata_to_inherit[child] = my_metadata.copy()
# WARNING: 'parent' is not part of inherited metadata, but
# we're piggybacking on this recursive traversal to grab
# and cache the child's parent, as a performance optimization.
# The 'parent' key will be popped out of the dictionary during
# CachingDescriptorSystem.load_item
metadata_to_inherit[child].setdefault('parent', {})[self.get_branch_setting()] = url
if root is not None:
_compute_inherited_metadata(root)
return metadata_to_inherit
def _get_cached_metadata_inheritance_tree(self, course_id, force_refresh=False):
'''
Compute the metadata inheritance for the course.
'''
tree = {}
course_id = self.fill_in_run(course_id)
if not force_refresh:
# see if we are first in the request cache (if present)
if self.request_cache is not None and unicode(course_id) in self.request_cache.data.get('metadata_inheritance', {}):
return self.request_cache.data['metadata_inheritance'][unicode(course_id)]
# then look in any caching subsystem (e.g. memcached)
if self.metadata_inheritance_cache_subsystem is not None:
tree = self.metadata_inheritance_cache_subsystem.get(unicode(course_id), {})
else:
logging.warning(
'Running MongoModuleStore without a metadata_inheritance_cache_subsystem. This is \
OK in localdev and testing environment. Not OK in production.'
)
if not tree:
# if not in subsystem, or we are on force refresh, then we have to compute
tree = self._compute_metadata_inheritance_tree(course_id)
# now write out computed tree to caching subsystem (e.g. memcached), if available
if self.metadata_inheritance_cache_subsystem is not None:
self.metadata_inheritance_cache_subsystem.set(unicode(course_id), tree)
# now populate a request_cache, if available. NOTE, we are outside of the
# scope of the above if: statement so that after a memcache hit, it'll get
# put into the request_cache
if self.request_cache is not None:
# we can't assume the 'metadatat_inheritance' part of the request cache dict has been
# defined
if 'metadata_inheritance' not in self.request_cache.data:
self.request_cache.data['metadata_inheritance'] = {}
self.request_cache.data['metadata_inheritance'][unicode(course_id)] = tree
return tree
def refresh_cached_metadata_inheritance_tree(self, course_id, runtime=None):
"""
Refresh the cached metadata inheritance tree for the org/course combination
for location
If given a runtime, it replaces the cached_metadata in that runtime. NOTE: failure to provide
a runtime may mean that some objects report old values for inherited data.
"""
course_id = course_id.for_branch(None)
if not self._is_in_bulk_operation(course_id):
# below is done for side effects when runtime is None
cached_metadata = self._get_cached_metadata_inheritance_tree(course_id, force_refresh=True)
if runtime:
runtime.cached_metadata = cached_metadata
def _clean_item_data(self, item):
"""
Renames the '_id' field in item to 'location'
"""
item['location'] = item['_id']
del item['_id']
@autoretry_read()
def _query_children_for_cache_children(self, course_key, items):
"""
Generate a pymongo in query for finding the items and return the payloads
"""
# first get non-draft in a round-trip
query = {
'_id': {'$in': [
course_key.make_usage_key_from_deprecated_string(item).to_deprecated_son() for item in items
]}
}
return list(self.collection.find(query))
def _cache_children(self, course_key, items, depth=0):
"""
Returns a dictionary mapping Location -> item data, populated with json data
for all descendents of items up to the specified depth.
(0 = no descendents, 1 = children, 2 = grandchildren, etc)
If depth is None, will load all the children.
This will make a number of queries that is linear in the depth.
"""
data = {}
to_process = list(items)
course_key = self.fill_in_run(course_key)
parent_cache = self._get_parent_cache(self.get_branch_setting())
while to_process and depth is None or depth >= 0:
children = []
for item in to_process:
self._clean_item_data(item)
item_location = Location._from_deprecated_son(item['location'], course_key.run)
item_children = item.get('definition', {}).get('children', [])
children.extend(item_children)
for item_child in item_children:
parent_cache.set(item_child, item_location)
data[item_location] = item
if depth == 0:
break
# Load all children by id. See
# http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24or
# for or-query syntax
to_process = []
if children:
to_process = self._query_children_for_cache_children(course_key, children)
# If depth is None, then we just recurse until we hit all the descendents
if depth is not None:
depth -= 1
return data
@contract(
course_key=CourseKey,
item=dict,
apply_cached_metadata=bool,
using_descriptor_system="None|CachingDescriptorSystem"
)
def _load_item(self, course_key, item, data_cache, apply_cached_metadata=True, using_descriptor_system=None):
"""
Load an XModuleDescriptor from item, using the children stored in data_cache
Arguments:
course_key (CourseKey): which course to load from
item (dict): A dictionary with the following keys:
location: The serialized UsageKey for the item to load
data_dir (optional): The directory name to use as the root data directory for this XModule
data_cache (dict): A dictionary mapping from UsageKeys to xblock field data
(this is the xblock data loaded from the database)
apply_cached_metadata (bool): Whether to use the cached metadata for inheritance
purposes.
using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem
to add data to, and to load the XBlocks from.
"""
course_key = self.fill_in_run(course_key)
location = Location._from_deprecated_son(item['location'], course_key.run)
data_dir = getattr(item, 'data_dir', location.course)
root = self.fs_root / data_dir
resource_fs = _OSFS_INSTANCE.setdefault(root, OSFS(root, create=True))
cached_metadata = {}
if apply_cached_metadata:
cached_metadata = self._get_cached_metadata_inheritance_tree(course_key)
if using_descriptor_system is None:
services = {}
if self.i18n_service:
services["i18n"] = self.i18n_service
if self.fs_service:
services["fs"] = self.fs_service
if self.user_service:
services["user"] = self.user_service
system = CachingDescriptorSystem(
modulestore=self,
course_key=course_key,
module_data=data_cache,
default_class=self.default_class,
resources_fs=resource_fs,
error_tracker=self.error_tracker,
render_template=self.render_template,
cached_metadata=cached_metadata,
mixins=self.xblock_mixins,
select=self.xblock_select,
services=services,
)
else:
system = using_descriptor_system
system.module_data.update(data_cache)
system.cached_metadata.update(cached_metadata)
return system.load_item(location)
def _load_items(self, course_key, items, depth=0, using_descriptor_system=None):
"""
Load a list of xmodules from the data in items, with children cached up
to specified depth
"""
course_key = self.fill_in_run(course_key)
data_cache = self._cache_children(course_key, items, depth)
# if we are loading a course object, if we're not prefetching children (depth != 0) then don't
# bother with the metadata inheritance
return [
self._load_item(
course_key,
item,
data_cache,
using_descriptor_system=using_descriptor_system,
apply_cached_metadata=self._should_apply_cached_metadata(item, depth)
)
for item in items
]
def _should_apply_cached_metadata(self, item, depth):
"""
Returns a boolean whether a particular query should trigger an application
of inherited metadata onto the item
"""
category = item['location']['category']
apply_cached_metadata = category not in _DETACHED_CATEGORIES and \
not (category == 'course' and depth == 0)
return apply_cached_metadata
@autoretry_read()
def get_courses(self, **kwargs):
'''
Returns a list of course descriptors. This accepts an optional parameter of 'org' which
will apply an efficient filter to only get courses with the specified ORG
'''
course_org_filter = kwargs.get('org')
if course_org_filter:
course_records = self.collection.find({'_id.category': 'course', '_id.org': course_org_filter})
else:
course_records = self.collection.find({'_id.category': 'course'})
base_list = sum(
[
self._load_items(
SlashSeparatedCourseKey(course['_id']['org'], course['_id']['course'], course['_id']['name']),
[course]
)
for course
# I tried to add '$and': [{'_id.org': {'$ne': 'edx'}}, {'_id.course': {'$ne': 'templates'}}]
# but it didn't do the right thing (it filtered all edx and all templates out)
in course_records
if not ( # TODO kill this
course['_id']['org'] == 'edx' and
course['_id']['course'] == 'templates'
)
],
[]
)
return [course for course in base_list if not isinstance(course, ErrorDescriptor)]
def _find_one(self, location):
'''Look for a given location in the collection. If the item is not present, raise
ItemNotFoundError.
'''
assert isinstance(location, UsageKey)
item = self.collection.find_one(
{'_id': location.to_deprecated_son()}
)
if item is None:
raise ItemNotFoundError(location)
return item
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run, deprecated=True)
def get_course(self, course_key, depth=0, **kwargs):
"""
Get the course with the given courseid (org/course/run)
"""
assert isinstance(course_key, CourseKey)
course_key = self.fill_in_run(course_key)
location = course_key.make_usage_key('course', course_key.run)
try:
return self.get_item(location, depth=depth)
except ItemNotFoundError:
return None
def has_course(self, course_key, ignore_case=False, **kwargs):
"""
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
If ignore_case is True, do a case insensitive search,
otherwise, do a case sensitive search
"""
assert isinstance(course_key, CourseKey)
if isinstance(course_key, LibraryLocator):
return None # Libraries require split mongo
course_key = self.fill_in_run(course_key)
location = course_key.make_usage_key('course', course_key.run)
if ignore_case:
course_query = location.to_deprecated_son('_id.')
for key in course_query.iterkeys():
if isinstance(course_query[key], basestring):
course_query[key] = re.compile(r"(?i)^{}$".format(course_query[key]))
else:
course_query = {'_id': location.to_deprecated_son()}
course = self.collection.find_one(course_query, fields={'_id': True})
if course:
return SlashSeparatedCourseKey(course['_id']['org'], course['_id']['course'], course['_id']['name'])
else:
return None
def has_item(self, usage_key):
"""
Returns True if location exists in this ModuleStore.
"""
try:
self._find_one(usage_key)
return True
except ItemNotFoundError:
return False
def get_item(self, usage_key, depth=0, using_descriptor_system=None):
"""
Returns an XModuleDescriptor instance for the item at location.
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
Arguments:
usage_key: a :class:`.UsageKey` instance
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all descendents.
using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem
to add data to, and to load the XBlocks from.
"""
item = self._find_one(usage_key)
module = self._load_items(
usage_key.course_key,
[item],
depth,
using_descriptor_system=using_descriptor_system
)[0]
return module
@staticmethod
def _course_key_to_son(course_id, tag='i4x'):
"""
Generate the partial key to look up items relative to a given course
"""
return SON([
('_id.tag', tag),
('_id.org', course_id.org),
('_id.course', course_id.course),
])
@staticmethod
def _id_dict_to_son(id_dict):
"""
Generate the partial key to look up items relative to a given course
"""
return SON([
(key, id_dict[key])
for key in ('tag', 'org', 'course', 'category', 'name', 'revision')
])
@autoretry_read()
def get_items(
self,
course_id,
settings=None,
content=None,
key_revision=MongoRevisionKey.published,
qualifiers=None,
using_descriptor_system=None,
**kwargs
):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_id
NOTE: don't use this to look for courses
as the course_id is required. Use get_courses which is a lot faster anyway.
If you don't provide a value for revision, this limits the result to only ones in the
published course. Call this method on draft mongo store if you want to include drafts.
Args:
course_id (CourseKey): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
key_revision (str): the revision of the items you're looking for.
MongoRevisionKey.draft - only returns drafts
MongoRevisionKey.published (equates to None) - only returns published
If you want one of each matching xblock but preferring draft to published, call this same method
on the draft modulestore with ModuleStoreEnum.RevisionOption.draft_preferred.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For this modulestore, ``name`` is a commonly provided key (Location based stores)
This modulestore does not allow searching dates by comparison or edited_by, previous_version,
update_version info.
using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem
to add data to, and to load the XBlocks from.
"""
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
query = self._course_key_to_son(course_id)
query['_id.revision'] = key_revision
for field in ['category', 'name']:
if field in qualifiers:
query['_id.' + field] = qualifiers.pop(field)
for key, value in (settings or {}).iteritems():
query['metadata.' + key] = value
for key, value in (content or {}).iteritems():
query['definition.data.' + key] = value
if 'children' in qualifiers:
query['definition.children'] = qualifiers.pop('children')
query.update(qualifiers)
items = self.collection.find(
query,
sort=[SORT_REVISION_FAVOR_DRAFT],
)
modules = self._load_items(
course_id,
list(items),
using_descriptor_system=using_descriptor_system
)
return modules
def create_course(self, org, course, run, user_id, fields=None, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
Raises:
InvalidLocationError: If a course with the same org, course, and run already exists
"""
course_id = SlashSeparatedCourseKey(org, course, run)
# Check if a course with this org/course has been defined before (case-insensitive)
course_search_location = SON([
('_id.tag', 'i4x'),
('_id.org', re.compile(u'^{}$'.format(course_id.org), re.IGNORECASE)),
('_id.course', re.compile(u'^{}$'.format(course_id.course), re.IGNORECASE)),
('_id.category', 'course'),
])
courses = self.collection.find(course_search_location, fields=('_id'))
if courses.count() > 0:
raise DuplicateCourseError(course_id, courses[0]['_id'])
with self.bulk_operations(course_id):
xblock = self.create_item(user_id, course_id, 'course', course_id.run, fields=fields, **kwargs)
# create any other necessary things as a side effect
super(MongoModuleStore, self).create_course(
org, course, run, user_id, runtime=xblock.runtime, **kwargs
)
return xblock
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
metadata=None, definition_data=None, **kwargs
):
"""
Create the new xblock but don't save it. Returns the new module.
:param runtime: if you already have an xblock from the course, the xblock.runtime value
:param fields: a dictionary of field names and values for the new xmodule
"""
if metadata is None:
metadata = {}
if definition_data is None:
definition_data = {}
# @Cale, should this use LocalId like we do in split?
if block_id is None:
if block_type == 'course':
block_id = course_key.run
else:
block_id = u'{}_{}'.format(block_type, uuid4().hex[:5])
if runtime is None:
services = {}
if self.i18n_service:
services["i18n"] = self.i18n_service
if self.fs_service:
services["fs"] = self.fs_service
if self.user_service:
services["user"] = self.user_service
runtime = CachingDescriptorSystem(
modulestore=self,
module_data={},
course_key=course_key,
default_class=self.default_class,
resources_fs=None,
error_tracker=self.error_tracker,
render_template=self.render_template,
cached_metadata={},
mixins=self.xblock_mixins,
select=self.xblock_select,
services=services,
)
xblock_class = runtime.load_block_type(block_type)
location = course_key.make_usage_key(block_type, block_id)
dbmodel = self._create_new_field_data(block_type, location, definition_data, metadata)
xmodule = runtime.construct_xblock_from_class(
xblock_class,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both.
ScopeIds(None, block_type, location, location),
dbmodel,
)
if fields is not None:
for key, value in fields.iteritems():
setattr(xmodule, key, value)
# decache any pending field settings from init
xmodule.save()
return xmodule
def create_item(self, user_id, course_key, block_type, block_id=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
"""
if block_id is None:
if block_type == 'course':
block_id = course_key.run
else:
block_id = u'{}_{}'.format(block_type, uuid4().hex[:5])
runtime = kwargs.pop('runtime', None)
xblock = self.create_xblock(runtime, course_key, block_type, block_id, **kwargs)
xblock = self.update_item(xblock, user_id, allow_not_found=True)
return xblock
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifing the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
"""
xblock = self.create_item(user_id, parent_usage_key.course_key, block_type, block_id=block_id, **kwargs)
# attach to parent if given
if 'detached' not in xblock._class_tags:
parent = self.get_item(parent_usage_key)
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.children.append(xblock.location)
else:
parent.children.insert(kwargs.get('position'), xblock.location)
self.update_item(parent, user_id, child_update=True)
return xblock
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Simple implementation of overwriting any existing xblock
"""
if block_type == 'course':
block_id = course_key.run
xblock = self.create_xblock(runtime, course_key, block_type, block_id, fields)
return self.update_item(xblock, user_id, allow_not_found=True)
def _get_course_for_item(self, location, depth=0):
'''
for a given Xmodule, return the course that it belongs to
Also we have to assert that this module maps to only one course item - it'll throw an
assert if not
'''
return self.get_course(location.course_key, depth)
def _update_single_item(self, location, update, allow_not_found=False):
"""
Set update on the specified item, and raises ItemNotFoundError
if the location doesn't exist
"""
bulk_record = self._get_bulk_ops_record(location.course_key)
bulk_record.dirty = True
# See http://www.mongodb.org/display/DOCS/Updating for
# atomic update syntax
result = self.collection.update(
{'_id': location.to_deprecated_son()},
{'$set': update},
multi=False,
upsert=allow_not_found,
w=1, # wait until primary commits
)
if result['n'] == 0:
raise ItemNotFoundError(location)
def _update_ancestors(self, location, update):
"""
Recursively applies update to all the ancestors of location
"""
parent = self._get_raw_parent_location(as_published(location), ModuleStoreEnum.RevisionOption.draft_preferred)
if parent:
self._update_single_item(parent, update)
self._update_ancestors(parent, update)
def update_item(self, xblock, user_id, allow_not_found=False, force=False, isPublish=False,
is_publish_root=True):
"""
Update the persisted version of xblock to reflect its current values.
xblock: which xblock to persist
user_id: who made the change (ignored for now by this modulestore)
allow_not_found: whether to create a new object if one didn't already exist or give an error
force: force is meaningless for this modulestore
isPublish: an internal parameter that indicates whether this update is due to a Publish operation, and
thus whether the item's published information should be updated.
is_publish_root: when publishing, this indicates whether xblock is the root of the publish and should
therefore propagate subtree edit info up the tree
"""
course_key = xblock.location.course_key
try:
definition_data = self._serialize_scope(xblock, Scope.content)
now = datetime.now(UTC)
payload = {
'definition.data': definition_data,
'metadata': self._serialize_scope(xblock, Scope.settings),
'edit_info': {
'edited_on': now,
'edited_by': user_id,
'subtree_edited_on': now,
'subtree_edited_by': user_id,
}
}
if isPublish:
payload['edit_info']['published_date'] = now
payload['edit_info']['published_by'] = user_id
elif 'published_date' in getattr(xblock, '_edit_info', {}):
payload['edit_info']['published_date'] = xblock._edit_info['published_date']
payload['edit_info']['published_by'] = xblock._edit_info['published_by']
if xblock.has_children:
children = self._serialize_scope(xblock, Scope.children)
payload.update({'definition.children': children['children']})
# Remove all old pointers to me, then add my current children back
parent_cache = self._get_parent_cache(self.get_branch_setting())
parent_cache.delete_by_value(xblock.location)
for child in xblock.children:
parent_cache.set(unicode(child), xblock.location)
self._update_single_item(xblock.scope_ids.usage_id, payload, allow_not_found=allow_not_found)
# update subtree edited info for ancestors
# don't update the subtree info for descendants of the publish root for efficiency
if not isPublish or (isPublish and is_publish_root):
ancestor_payload = {
'edit_info.subtree_edited_on': now,
'edit_info.subtree_edited_by': user_id
}
self._update_ancestors(xblock.scope_ids.usage_id, ancestor_payload)
# update the edit info of the instantiated xblock
xblock._edit_info = payload['edit_info']
# recompute (and update) the metadata inheritance tree which is cached
self.refresh_cached_metadata_inheritance_tree(xblock.scope_ids.usage_id.course_key, xblock.runtime)
# fire signal that we've written to DB
except ItemNotFoundError:
if not allow_not_found:
raise
elif not self.has_course(course_key):
raise ItemNotFoundError(course_key)
return xblock
def _serialize_scope(self, xblock, scope):
"""
Find all fields of type reference and convert the payload from UsageKeys to deprecated strings
:param xblock: the XBlock class
:param jsonfields: a dict of the jsonified version of the fields
"""
jsonfields = {}
for field_name, field in xblock.fields.iteritems():
if field.scope == scope and field.is_set_on(xblock):
if field.scope == Scope.parent:
continue
elif isinstance(field, Reference):
jsonfields[field_name] = unicode(field.read_from(xblock))
elif isinstance(field, ReferenceList):
jsonfields[field_name] = [
unicode(ele) for ele in field.read_from(xblock)
]
elif isinstance(field, ReferenceValueDict):
jsonfields[field_name] = {
key: unicode(subvalue) for key, subvalue in field.read_from(xblock).iteritems()
}
else:
jsonfields[field_name] = field.read_json(xblock)
return jsonfields
def _get_non_orphan_parents(self, location, parents, revision):
"""
Extract non orphan parents by traversing the list of possible parents and remove current location
from orphan parents to avoid parents calculation overhead next time.
"""
non_orphan_parents = []
# get bulk_record once rather than for each iteration
bulk_record = self._get_bulk_ops_record(location.course_key)
for parent in parents:
parent_loc = Location._from_deprecated_son(parent['_id'], location.course_key.run)
# travel up the tree for orphan validation
ancestor_loc = parent_loc
while ancestor_loc is not None:
current_loc = ancestor_loc
ancestor_loc = self._get_raw_parent_location(as_published(current_loc), revision)
if ancestor_loc is None:
bulk_record.dirty = True
# The parent is an orphan, so remove all the children including
# the location whose parent we are looking for from orphan parent
self.collection.update(
{'_id': parent_loc.to_deprecated_son()},
{'$set': {'definition.children': []}},
multi=False,
upsert=True,
)
elif ancestor_loc.category == 'course':
# once we reach the top location of the tree and if the location is not an orphan then the
# parent is not an orphan either
non_orphan_parents.append(parent_loc)
break
return non_orphan_parents
def _get_raw_parent_location(self, location, revision=ModuleStoreEnum.RevisionOption.published_only):
'''
Helper for get_parent_location that finds the location that is the parent of this location in this course,
but does NOT return a version agnostic location.
'''
assert location.revision is None
assert revision == ModuleStoreEnum.RevisionOption.published_only \
or revision == ModuleStoreEnum.RevisionOption.draft_preferred
parent_cache = self._get_parent_cache(self.get_branch_setting())
if parent_cache.has(unicode(location)):
return parent_cache.get(unicode(location))
# create a query with tag, org, course, and the children field set to the given location
query = self._course_key_to_son(location.course_key)
query['definition.children'] = unicode(location)
# if only looking for the PUBLISHED parent, set the revision in the query to None
if revision == ModuleStoreEnum.RevisionOption.published_only:
query['_id.revision'] = MongoRevisionKey.published
def cache_and_return(parent_loc): # pylint:disable=missing-docstring
parent_cache.set(unicode(location), parent_loc)
return parent_loc
# query the collection, sorting by DRAFT first
parents = list(
self.collection.find(query, {'_id': True}, sort=[SORT_REVISION_FAVOR_DRAFT])
)
if len(parents) == 0:
# no parents were found
return cache_and_return(None)
if revision == ModuleStoreEnum.RevisionOption.published_only:
if len(parents) > 1:
non_orphan_parents = self._get_non_orphan_parents(location, parents, revision)
if len(non_orphan_parents) == 0:
# no actual parent found
return cache_and_return(None)
if len(non_orphan_parents) > 1:
# should never have multiple PUBLISHED parents
raise ReferentialIntegrityError(
u"{} parents claim {}".format(len(parents), location)
)
else:
return cache_and_return(non_orphan_parents[0].replace(run=location.course_key.run))
else:
# return the single PUBLISHED parent
return cache_and_return(Location._from_deprecated_son(parents[0]['_id'], location.course_key.run))
else:
# there could be 2 different parents if
# (1) the draft item was moved or
# (2) the parent itself has 2 versions: DRAFT and PUBLISHED
# if there are multiple parents with version PUBLISHED then choose from non-orphan parents
all_parents = []
published_parents = 0
for parent in parents:
if parent['_id']['revision'] is None:
published_parents += 1
all_parents.append(parent)
# since we sorted by SORT_REVISION_FAVOR_DRAFT, the 0'th parent is the one we want
if published_parents > 1:
non_orphan_parents = self._get_non_orphan_parents(location, all_parents, revision)
return cache_and_return(non_orphan_parents[0].replace(run=location.course_key.run))
found_id = all_parents[0]['_id']
# don't disclose revision outside modulestore
return cache_and_return(Location._from_deprecated_son(found_id, location.course_key.run))
def get_parent_location(self, location, revision=ModuleStoreEnum.RevisionOption.published_only, **kwargs):
'''
Find the location that is the parent of this location in this course.
Returns: version agnostic location (revision always None) as per the rest of mongo.
Args:
revision:
ModuleStoreEnum.RevisionOption.published_only
- return only the PUBLISHED parent if it exists, else returns None
ModuleStoreEnum.RevisionOption.draft_preferred
- return either the DRAFT or PUBLISHED parent,
preferring DRAFT, if parent(s) exists,
else returns None
'''
parent = self._get_raw_parent_location(location, revision)
if parent:
return parent
return None
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore per ModuleStoreEnum.Type
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.mongo
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the locations for orphans in the course.
"""
course_key = self.fill_in_run(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
query = self._course_key_to_son(course_key)
query['_id.category'] = {'$nin': detached_categories}
all_items = self.collection.find(query)
all_reachable = set()
item_locs = set()
for item in all_items:
if item['_id']['category'] != 'course':
# It would be nice to change this method to return UsageKeys instead of the deprecated string.
item_locs.add(
unicode(as_published(Location._from_deprecated_son(item['_id'], course_key.run)))
)
all_reachable = all_reachable.union(item.get('definition', {}).get('children', []))
item_locs -= all_reachable
return [course_key.make_usage_key_from_deprecated_string(item_loc) for item_loc in item_locs]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
courses = self.collection.find(
{'_id.category': 'course', 'definition.data.wiki_slug': wiki_slug},
{'_id': True}
)
# the course's run == its name. It's the only xblock for which that's necessarily true.
return [
Location._from_deprecated_son(course['_id'], course['_id']['name']).course_key
for course in courses
]
def _create_new_field_data(self, _category, _location, definition_data, metadata):
"""
To instantiate a new xmodule which will be saved later, set up the dbModel and kvs
"""
kvs = MongoKeyValueStore(
definition_data,
None,
[],
metadata,
)
field_data = KvsFieldData(kvs)
return field_data
def _find_course_assets(self, course_key):
"""
Internal; finds (or creates) course asset info about all assets for a particular course
Arguments:
course_key (CourseKey): course identifier
Returns:
CourseAssetsFromStorage object, wrapping the relevant Mongo doc. If asset metadata
exists, other keys will be the other asset types with values as lists of asset metadata.
"""
# Using the course_key, find or insert the course asset metadata document.
# A single document exists per course to store the course asset metadata.
course_key = self.fill_in_run(course_key)
if course_key.run is None:
log.warning(u'No run found for combo org "{}" course "{}" on asset request.'.format(
course_key.org, course_key.course
))
course_assets = None
else:
# Complete course key, so query for asset metadata.
course_assets = self.asset_collection.find_one(
{'course_id': unicode(course_key)},
)
doc_id = None if course_assets is None else course_assets['_id']
if course_assets is None:
# Check to see if the course is created in the course collection.
if self.get_course(course_key) is None:
raise ItemNotFoundError(course_key)
else:
# Course exists, so create matching assets document.
course_assets = {'course_id': unicode(course_key), 'assets': {}}
doc_id = self.asset_collection.insert(course_assets)
elif isinstance(course_assets['assets'], list):
# This record is in the old course assets format.
# Ensure that no data exists before updating the format.
assert len(course_assets['assets']) == 0
# Update the format to a dict.
self.asset_collection.update(
{'_id': doc_id},
{'$set': {'assets': {}}}
)
# Pass back wrapped 'assets' dict with the '_id' key added to it for document update purposes.
return CourseAssetsFromStorage(course_key, doc_id, course_assets['assets'])
def _make_mongo_asset_key(self, asset_type):
"""
Given a asset type, form a key needed to update the proper embedded field in the Mongo doc.
"""
return 'assets.{}'.format(asset_type)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long')
def _save_asset_metadata_list(self, asset_metadata_list, user_id, import_only):
"""
Internal; saves the info for a particular course's asset.
Arguments:
asset_metadata_list (list(AssetMetadata)): list of data about several course assets
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if edited_on/by data should remain unchanged.
"""
course_key = asset_metadata_list[0].asset_id.course_key
course_assets = self._find_course_assets(course_key)
assets_by_type = self._save_assets_by_type(course_key, asset_metadata_list, course_assets, user_id, import_only)
# Build an update set with potentially multiple embedded fields.
updates_by_type = {}
for asset_type, assets in assets_by_type.iteritems():
updates_by_type[self._make_mongo_asset_key(asset_type)] = assets.as_list()
# Update the document.
self.asset_collection.update(
{'_id': course_assets.doc_id},
{'$set': updates_by_type}
)
return True
@contract(asset_metadata='AssetMetadata', user_id='int|long')
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves the info for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
return self._save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long')
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves the asset metadata for each asset in a list of asset metadata.
Optimizes the saving of many assets.
Args:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
return self._save_asset_metadata_list(asset_metadata_list, user_id, import_only)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey', user_id='int|long')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
If dest_course already has assets, this removes the previous value.
It doesn't combine the assets in dest.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_assets = self._find_course_assets(source_course_key)
dest_assets = {'assets': source_assets.asset_md.copy(), 'course_id': unicode(dest_course_key)}
self.asset_collection.remove({'course_id': unicode(dest_course_key)})
# Update the document.
self.asset_collection.insert(dest_assets)
@contract(asset_key='AssetKey', attr_dict=dict, user_id='int|long')
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
all_assets = course_assets[asset_key.asset_type]
md = AssetMetadata(asset_key, asset_key.path)
md.from_storable(all_assets[asset_idx])
md.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets[asset_idx] = md.to_storable()
self.asset_collection.update(
{'_id': course_assets.doc_id},
{"$set": {self._make_mongo_asset_key(asset_key.asset_type): all_assets}}
)
@contract(asset_key='AssetKey', user_id='int|long')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
return 0
all_asset_info = course_assets[asset_key.asset_type]
all_asset_info.pop(asset_idx)
# Update the document.
self.asset_collection.update(
{'_id': course_assets.doc_id},
{'$set': {self._make_mongo_asset_key(asset_key.asset_type): all_asset_info}}
)
return 1
# pylint: disable=unused-argument
@contract(course_key='CourseKey', user_id='int|long')
def delete_all_asset_metadata(self, course_key, user_id):
"""
Delete all of the assets which use this course_key as an identifier.
Arguments:
course_key (CourseKey): course_identifier
"""
# Using the course_id, find the course asset metadata document.
# A single document exists per course to store the course asset metadata.
try:
course_assets = self._find_course_assets(course_key)
self.asset_collection.remove(course_assets.doc_id)
except ItemNotFoundError:
# When deleting asset metadata, if a course's asset metadata is not present, no big deal.
pass
def heartbeat(self):
"""
Check that the db is reachable.
"""
if self.database.connection.alive():
return {ModuleStoreEnum.Type.mongo: True}
else:
raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo')
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
# Because we often query for some subset of the id, we define this index:
self.collection.create_index([
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('_id.category', pymongo.ASCENDING),
('_id.name', pymongo.ASCENDING),
])
# Because we often scan for all category='course' regardless of the value of the other fields:
self.collection.create_index('_id.category')
# Because lms calls get_parent_locations frequently (for path generation):
self.collection.create_index('definition.children', sparse=True)
# To allow prioritizing draft vs published material
self.collection.create_index('_id.revision')
# Some overrides that still need to be implemented by subclasses
def convert_to_draft(self, location, user_id):
raise NotImplementedError()
def delete_item(self, location, user_id, **kwargs):
raise NotImplementedError()
def has_changes(self, xblock):
raise NotImplementedError()
def has_published_version(self, xblock):
raise NotImplementedError()
def publish(self, location, user_id):
raise NotImplementedError()
def revert_to_published(self, location, user_id):
raise NotImplementedError()
def unpublish(self, location, user_id):
raise NotImplementedError()
|
diverted247/signer
|
refs/heads/master
|
libs/pdfminer/pdfminer/pdfparser.py
|
9
|
#!/usr/bin/env python
import sys
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from psparser import PSStackParser
from psparser import PSSyntaxError, PSEOF
from psparser import KWD, STRICT
from pdftypes import PDFException
from pdftypes import PDFStream, PDFObjRef
from pdftypes import int_value
from pdftypes import dict_value
## Exceptions
##
class PDFSyntaxError(PDFException):
pass
## PDFParser
##
class PDFParser(PSStackParser):
"""
PDFParser fetch PDF objects from a file stream.
It can handle indirect references by referring to
a PDF document set by set_document method.
It also reads XRefs at the end of every PDF file.
Typical usage:
parser = PDFParser(fp)
parser.read_xref()
parser.read_xref(fallback=True) # optional
parser.set_document(doc)
parser.seek(offset)
parser.nextobject()
"""
def __init__(self, fp):
PSStackParser.__init__(self, fp)
self.doc = None
self.fallback = False
return
def set_document(self, doc):
"""Associates the parser with a PDFDocument object."""
self.doc = doc
return
KEYWORD_R = KWD('R')
KEYWORD_NULL = KWD('null')
KEYWORD_ENDOBJ = KWD('endobj')
KEYWORD_STREAM = KWD('stream')
KEYWORD_XREF = KWD('xref')
KEYWORD_STARTXREF = KWD('startxref')
def do_keyword(self, pos, token):
"""Handles PDF-related keywords."""
if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF):
self.add_results(*self.pop(1))
elif token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4))
elif token is self.KEYWORD_NULL:
# null object
self.push((pos, None))
elif token is self.KEYWORD_R:
# reference to indirect object
try:
((_, objid), (_, genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
elif token is self.KEYWORD_STREAM:
# stream object
((_, dic),) = self.pop(1)
dic = dict_value(dic)
objlen = 0
if not self.fallback:
try:
objlen = int_value(dic['Length'])
except KeyError:
if STRICT:
raise PDFSyntaxError('/Length is undefined: %r' % dic)
self.seek(pos)
try:
(_, line) = self.nextline() # 'stream'
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
return
pos += len(line)
self.fp.seek(pos)
data = self.fp.read(objlen)
self.seek(pos+objlen)
while 1:
try:
(linepos, line) = self.nextline()
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
break
if 'endstream' in line:
i = line.index('endstream')
objlen += i
data += line[:i]
break
objlen += len(line)
data += line
self.seek(pos+objlen)
# XXX limit objlen not to exceed object boundary
if 2 <= self.debug:
print >>sys.stderr, 'Stream: pos=%d, objlen=%d, dic=%r, data=%r...' % \
(pos, objlen, dic, data[:10])
obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj))
else:
# others
self.push((pos, token))
return
## PDFStreamParser
##
class PDFStreamParser(PDFParser):
"""
PDFStreamParser is used to parse PDF content streams
that is contained in each page and has instructions
for rendering the page. A reference to a PDF document is
needed because a PDF content stream can also have
indirect references to other objects in the same document.
"""
def __init__(self, data):
PDFParser.__init__(self, StringIO(data))
return
def flush(self):
self.add_results(*self.popall())
return
def do_keyword(self, pos, token):
if token is self.KEYWORD_R:
# reference to indirect object
try:
((_, objid), (_, genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
return
# others
self.push((pos, token))
return
|
bratsche/Neutron-Drive
|
refs/heads/master
|
neutron-drive/django/http/multipartparser.py
|
65
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type)
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(MultiValueDict(), encoding=self._encoding), MultiValueDict()
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_unicode(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_unicode(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_unicode(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
try:
charset = meta_data.get('content-type', (0,{}))[1].get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
try:
chunk = str(chunk).decode('base64')
except Exception, e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile, e:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload, e:
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_unicode(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(object):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = ''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield ''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = self.next()
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = ''.join(parts())
return out
def next(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = ''
else:
output = self._producer.next()
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = ''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(object):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(object):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def next(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(object):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to .next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def next(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = ''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
if data[max(0,end-1)] == '\n':
end -= 1
if data[max(0,end-1)] == '\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find('\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split('\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = '--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value. """
plist = _parse_header_params(';' + line)
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
tcporco/SageBoxModels
|
refs/heads/master
|
boxmodel/code_generation.py
|
1
|
#*****************************************************************************
# Copyright (C) 2017 Lee Worden <worden dot lee at gmail dot com>
#
# Distributed under the terms of the GNU General Public License (GPL) v.2
# http://www.gnu.org/licenses/
#*****************************************************************************
#from sage.all import *
from product import *
from dynamicalsystems import *
## code evaluating categories of compartments in R
def R_inclusions_fn( self, name='observations', inclusions=None, extras=Bindings() ):
"""R_inclusions_fn: emit definition of an R function that constructs
aggregate quantities from the compartments of a product model.
inclusions: which quantities to define, if not the ones generated in
the process of the product operation by tracking the division of
factor compartments into product compartments.
extras: quantities to include in addition to the above."""
code = '#!/usr/bin/R\n'
code += name + ' <- function( state ) {\n'
code += ' with(state, {\n'
code += ' obs <- list(c(\n'
if inclusions is None:
code += ',\n'.join(
' ' + str(v) + ' = ' + ' + '.join( str(vt) for vt in ll )
for v, ll in self._inclusion_variables.iteritems()
if ll != [v]
) + '\n'
else:
code += ',\n'.join(
' ' + str(k) + ' = ' + str(v)
for k, v in inclusions._dict.iteritems()
) + '\n'
if len(extras._dict) > 0:
code += ',\n'.join(
' ' + str(k) + ' = ' + str(v)
for k, v in extras._dict.iteritems()
) + '\n'
code += ' ))\n'
code += ' })\n'
code += ' return(obs)\n'
code += '}\n'
return code
BoxModelProduct.R_inclusions_fn = R_inclusions_fn
def R_marginal_names( self, name='marginals' ):
"""R_marginal_names_fn: provide for R the names of compartments
indexed by compartments of the factor models"""
code = '#!/usr/bin/R\n'
code += name + ' <- c(\n'
if len(self._variable_marginals):
code += ',\n'.join(
' ' + str(v) + ' = c("' + '", "'.join( str(vt) for vt in ll ) + '")'
for v, ll in self._variable_marginals.iteritems()
if ll != [v]
) + '\n'
if len(self._parameter_marginals):
code += ',\n'.join(
' ' + str(v) + ' = c("' + '", "'.join( str(vt) for vt in ll ) + '")'
for v, ll in self._parameter_marginals.iteritems()
if ll != [v]
) + '\n'
code += ')\n'
return code
BoxModelProduct.R_marginal_names = R_marginal_names
|
Gustry/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/gdal/roughness.py
|
12
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
roughness.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class roughness(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
OPTIONS = 'OPTIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
1,
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'),
defaultValue=False))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Roughness')))
def name(self):
return 'roughness'
def displayName(self):
return self.tr('Roughness')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def commandName(self):
return 'gdaldem'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments = [
'roughness',
inLayer.source(),
out,
'-of',
QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]),
'-b',
str(self.parameterAsInt(parameters, self.BAND, context))
]
if self.parameterAsBoolean(parameters, self.COMPUTE_EDGES, context):
arguments.append('-compute_edges')
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
uraniumanchor/django-smart-selects
|
refs/heads/master
|
smart_selects/urls.py
|
9
|
try:
from django.conf.urls.defaults import patterns, url
except ImportError:
from django.conf.urls import patterns, url
urlpatterns = patterns(
'smart_selects.views',
url(r'^all/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$',
'filterchain_all', name='chained_filter_all'),
url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$',
'filterchain', name='chained_filter'),
url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<manager>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$',
'filterchain', name='chained_filter'),
)
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnboundLocalVariableInspection/PositiveIteration.py
|
21
|
def test1():
for i in "abc":
j = 1
print(j)
def test2():
for i in (1, 2):
j = 1
print(j)
def test3():
for i in [1, 2]:
j = 1
print(j)
|
qutip/qutip-benchmark
|
refs/heads/master
|
benchmark/tests/test_15.py
|
1
|
from qutip import *
from numpy import *
from time import time
def test_15(runs=1):
"""
mcsolve_f90 evolution of 8-spin chain
"""
test_name='8-spin MC_F90 [256]'
N = 8# number of spins
# uniform parameters
h = 1.0 * 2 * pi * ones(N)
Jz = 0.1 * 2 * pi * ones(N)
Jx = 0.1 * 2 * pi * ones(N)
Jy = 0.1 * 2 * pi * ones(N)
# dephasing rate
gamma = 0.01 * ones(N)
# intial state, first spin in state |1>, the rest in state |0>
psi_list = []
psi_list.append(basis(2,1))
for n in range(N-1):
psi_list.append(basis(2,0))
psi0 = tensor(psi_list)
tlist = linspace(0, 10, 200)
# Hamiltonian
si = qeye(2)
sx = sigmax()
sy = sigmay()
sz = sigmaz()
sx_list = []
sy_list = []
sz_list = []
for n in range(N):
op_list = []
for m in range(N):
op_list.append(si)
op_list[n] = sx
sx_list.append(tensor(op_list))
op_list[n] = sy
sy_list.append(tensor(op_list))
op_list[n] = sz
sz_list.append(tensor(op_list))
# construct the hamiltonian
H = 0
# energy splitting terms
for n in range(N):
H += - 0.5 * h[n] * sz_list[n]
# interaction terms
for n in range(N-1):
H += - 0.5 * Jx[n] * sx_list[n] * sx_list[n+1]
H += - 0.5 * Jy[n] * sy_list[n] * sy_list[n+1]
H += - 0.5 * Jz[n] * sz_list[n] * sz_list[n+1]
# collapse operators
c_op_list = []
# spin dephasing
for n in range(N):
c_op_list.append(sqrt(gamma[n]) * sz_list[n])
# evolve and calculate expectation values
opts=Odeoptions(gui=False)
tot_elapsed = 0
for n in range(runs):
tic=time()
mcsolve_f90(H, psi0, tlist, c_op_list, sz_list,options=opts)
toc=time()
tot_elapsed += toc - tic
return [test_name], [tot_elapsed / runs]
if __name__=='__main__':
test_15()
|
sbussetti/django-nested-admin
|
refs/heads/master
|
nested_admin/tests/one_deep/tests.py
|
1
|
try:
from distutils.spawn import find_executable
except:
find_executable = lambda f: None
import django
import inspect
import logging
import os
import shutil
import subprocess
import tempfile
from unittest import SkipTest
from django.contrib.admin.sites import site as admin_site
from nested_admin.tests.base import BaseNestedAdminTestCase, get_model_name
from .models import (
PlainStackedRoot, PlainTabularRoot, NestedStackedRoot, NestedTabularRoot)
logger = logging.getLogger(__name__)
class VisualComparisonTestCase(BaseNestedAdminTestCase):
root_model = None
root_models = [PlainStackedRoot, PlainTabularRoot, NestedStackedRoot, NestedTabularRoot]
@classmethod
def setUpClass(cls):
cls.blinkdiff_bin = os.environ.get('BLINKDIFF_BIN')
if not cls.blinkdiff_bin:
cls.blinkdiff_bin = find_executable('blink-diff')
if not cls.blinkdiff_bin or not os.path.exists(cls.blinkdiff_bin):
raise SkipTest("blink-diff not installed")
cls.screenshot_output_dir = os.environ.get('SCREENSHOT_OUTPUT_DIR')
super(BaseNestedAdminTestCase, cls).setUpClass()
cls.root_temp_dir = tempfile.mkdtemp()
cls.all_models = {}
cls.all_model_names = {}
for root_model in cls.root_models:
root_admin = admin_site._registry[root_model]
def descend_admin_inlines(admin):
data = [admin.model, []]
for inline in (getattr(admin, 'inlines', None) or []):
data[1].append(descend_admin_inlines(inline))
return data
cls.all_models[root_model] = models = descend_admin_inlines(root_admin)
def recursive_map_model_names(data):
if isinstance(data, list):
return [m for m in map(recursive_map_model_names, data)]
else:
return get_model_name(data)
cls.all_model_names[root_model] = recursive_map_model_names(models)
@classmethod
def tearDownClass(cls):
super(VisualComparisonTestCase, cls).tearDownClass()
shutil.rmtree(cls.root_temp_dir)
def setUp(self):
super(VisualComparisonTestCase, self).setUp()
self.temp_dir = tempfile.mkdtemp(dir=self.root_temp_dir)
self.selenium.set_window_size(780, 600)
@property
def models(self):
return self.all_models[self.root_model]
@property
def model_names(self):
return self.all_model_names[self.root_model]
def assertSameScreenshot(self, a, b, extra_args=None):
diff_output_path = a.replace('_a.png', '_diff.png')
args = [
self.blinkdiff_bin, "--verbose", "--threshold", "1", "--delta", "0",
"--output", diff_output_path]
if self.has_suit:
suit_left = self.selenium.find_element_by_css_selector('#suit-left')
args += ['--block-out', "%(x)s,%(y)s,%(w)s,%(h)s" % {
'x': suit_left.location['x'],
'y': suit_left.location['y'],
'w': suit_left.size['width'],
'h': suit_left.size['height'],
}]
if extra_args:
args += extra_args
args += [a, b]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = p.communicate()
if p.returncode == 0:
# No differences found
if self.screenshot_output_dir:
os.unlink(a)
os.unlink(b)
os.unlink(diff_output_path)
return
else:
logger.info(stdout)
msg = "Screenshots do not match"
if self.screenshot_output_dir:
msg = "%s (See %s)" % (msg, diff_output_path)
raise AssertionError(msg)
def get_admin_screenshot(self):
name = inspect.stack()[1][3]
prefix = "dj%s%s" % django.VERSION[:2]
if self.has_grappelli:
prefix += "_grp"
output_dir = self.screenshot_output_dir or self.temp_dir
suffix = ('a' if self.root_model.__name__.startswith('Plain') else 'b')
image_path = os.path.join(output_dir, "%s_%s_%s.png" % (prefix, name, suffix))
self.selenium.save_screenshot(image_path)
return image_path
def add_inline(self):
child_model = self.models[1][0][0]
verbose_name = child_model._meta.verbose_name.title()
with self.clickable_xpath('//a[contains(string(.), "Add another %s")]' % verbose_name) as el:
el.click()
def test_stacked_empty(self):
screenshots = []
for model in [PlainStackedRoot, NestedStackedRoot]:
self.root_model = model
self.load_admin()
screenshots.append(self.get_admin_screenshot())
self.assertSameScreenshot(*screenshots)
def test_tabular_empty(self):
screenshots = []
for model in [PlainTabularRoot, NestedTabularRoot]:
self.root_model = model
self.load_admin()
screenshots.append(self.get_admin_screenshot())
self.assertSameScreenshot(*screenshots)
def test_tabular_one_item(self):
screenshots = []
for model in [PlainTabularRoot, NestedTabularRoot]:
self.root_model = model
child_model = self.models[1][0][0]
root = model.objects.create(slug='a')
child_model.objects.create(slug='b', root=root, position=0)
self.load_admin(obj=root)
screenshots.append(self.get_admin_screenshot())
self.assertSameScreenshot(*screenshots)
def test_stacked_one_item(self):
screenshots = []
for model in [PlainStackedRoot, NestedStackedRoot]:
self.root_model = model
child_model = self.models[1][0][0]
root = model.objects.create(slug='a')
child_model.objects.create(slug='b', root=root, position=0)
self.load_admin(obj=root)
screenshots.append(self.get_admin_screenshot())
self.assertSameScreenshot(*screenshots)
def test_tabular_added_item(self):
screenshots = []
for model in [PlainTabularRoot, NestedTabularRoot]:
self.root_model = model
self.load_admin()
self.add_inline()
screenshots.append(self.get_admin_screenshot())
self.assertSameScreenshot(*screenshots)
def test_stacked_added_item(self):
screenshots = []
for model in [PlainStackedRoot, NestedStackedRoot]:
self.root_model = model
self.load_admin()
self.add_inline()
screenshots.append(self.get_admin_screenshot())
self.assertSameScreenshot(*screenshots)
def test_tabular_validation_error(self):
screenshots = []
for model in [PlainTabularRoot, NestedTabularRoot]:
self.root_model = model
self.load_admin()
if self.has_suit:
self.selenium.set_window_size(1400, 800)
self.add_inline()
with self.clickable_selector('#id_slug') as el:
el.send_keys('a')
with self.clickable_selector('#id_children-0-slug') as el:
el.send_keys('b')
self.save_form()
screenshots.append(self.get_admin_screenshot())
extra_args = []
if not self.has_grappelli:
# django has a bug where it doesn't show the 'Remove' link
# if there is a validationerror on a newly added inline
# see <https://code.djangoproject.com/ticket/15910>
delete_col = self.selenium.find_element_by_css_selector('#children0 .delete')
extra_args += ['--block-out', "%(x)s,%(y)s,%(w)s,%(h)s" % {
'x': delete_col.location['x'],
'y': delete_col.location['y'],
'w': delete_col.size['width'],
'h': delete_col.size['height'],
}]
self.assertSameScreenshot(*screenshots, extra_args=extra_args)
def test_stacked_validation_error(self):
screenshots = []
for model in [PlainStackedRoot, NestedStackedRoot]:
self.root_model = model
self.load_admin()
self.add_inline()
with self.clickable_selector('#id_slug') as el:
el.send_keys('a')
with self.clickable_selector('#id_children-0-slug') as el:
el.send_keys('b')
self.save_form()
screenshots.append(self.get_admin_screenshot())
extra_args = []
if not self.has_grappelli:
# django has a bug where it doesn't show the 'Remove' link
# if there is a validationerror on a newly added inline
# see <https://code.djangoproject.com/ticket/15910>
delete_col = self.selenium.find_element_by_css_selector('#children0 .inline-deletelink')
extra_args += ['--block-out', "%(x)s,%(y)s,%(w)s,%(h)s" % {
'x': delete_col.location['x'],
'y': delete_col.location['y'],
'w': delete_col.size['width'],
'h': delete_col.size['height'],
}]
self.assertSameScreenshot(*screenshots, extra_args=extra_args)
|
Morgan-Stanley/treadmill
|
refs/heads/master
|
lib/python/treadmill/sproc/appcfgmgr.py
|
2
|
"""Treadmill app configurator daemon, subscribes to eventmgr events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
from treadmill import appcfgmgr
from treadmill import cli
def init():
"""Top level command handler."""
@click.command()
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--runtime', envvar='TREADMILL_RUNTIME', required=True)
@click.option('--runtime-param', type=cli.LIST, required=False)
def run(approot, runtime, runtime_param=None):
"""Starts appcfgmgr process."""
mgr = appcfgmgr.AppCfgMgr(approot, runtime, runtime_param)
mgr.run()
return run
|
gangadhar-kadam/verve_live_erp
|
refs/heads/v5.0
|
erpnext/selling/doctype/campaign/campaign.py
|
100
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.naming import make_autoname
class Campaign(Document):
def autoname(self):
if frappe.defaults.get_global_default('campaign_naming_by') != 'Naming Series':
self.name = self.campaign_name
else:
self.name = make_autoname(self.naming_series+'.#####')
|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/NameCollisionFile.before.py
|
83
|
def hello():
pass
<selection>print("Hello")</selection>
|
ryfeus/lambda-packs
|
refs/heads/master
|
Tensorflow_OpenCV_Nightly/source/numpy/fft/helper.py
|
15
|
"""
Discrete Fourier Transforms - helper.py
"""
from __future__ import division, absolute_import, print_function
import collections
try:
import threading
except ImportError:
import dummy_threading as threading
from numpy.compat import integer_types
from numpy.core import integer, empty, arange, asarray, roll
# Created by Pearu Peterson, September 2002
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
integer_types = integer_types + (integer,)
def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to shift. Default is None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
ifftshift : The inverse of `fftshift`.
Examples
--------
>>> freqs = np.fft.fftfreq(10, 0.1)
>>> freqs
array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])
>>> np.fft.fftshift(freqs)
array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
Shift the zero-frequency component only along the second axis:
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.fftshift(freqs, axes=(1,))
array([[ 2., 0., 1.],
[-4., 3., 4.],
[-1., -3., -2.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [dim // 2 for dim in x.shape]
elif isinstance(axes, integer_types):
shift = x.shape[axes] // 2
else:
shift = [x.shape[ax] // 2 for ax in axes]
return roll(x, shift, axes)
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
functions differ by one sample for odd-length `x`.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to calculate. Defaults to None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
fftshift : Shift zero-frequency component to the center of the spectrum.
Examples
--------
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.ifftshift(np.fft.fftshift(freqs))
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [-(dim // 2) for dim in x.shape]
elif isinstance(axes, integer_types):
shift = -(x.shape[axes] // 2)
else:
shift = [-(x.shape[ax] // 2) for ax in axes]
return roll(x, shift, axes)
def fftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length `n` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> fourier = np.fft.fft(signal)
>>> n = signal.size
>>> timestep = 0.1
>>> freq = np.fft.fftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0 / (n * d)
results = empty(n, int)
N = (n-1)//2 + 1
p1 = arange(0, N, dtype=int)
results[:N] = p1
p2 = arange(-(n//2), 0, dtype=int)
results[N:] = p2
return results * val
#return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d)
def rfftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies
(for usage with rfft, irfft).
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
the Nyquist frequency component is considered to be positive.
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length ``n//2 + 1`` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
>>> fourier = np.fft.rfft(signal)
>>> n = signal.size
>>> sample_rate = 100
>>> freq = np.fft.fftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.])
>>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., 30., 40., 50.])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0/(n*d)
N = n//2 + 1
results = arange(0, N, dtype=int)
return results * val
class _FFTCache(object):
"""
Cache for the FFT twiddle factors as an LRU (least recently used) cache.
Parameters
----------
max_size_in_mb : int
Maximum memory usage of the cache before items are being evicted.
max_item_count : int
Maximum item count of the cache before items are being evicted.
Notes
-----
Items will be evicted if either limit has been reached upon getting and
setting. The maximum memory usages is not strictly the given
``max_size_in_mb`` but rather
``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will
never be completely cleared - at least one item will remain and a single
large item can cause the cache to retain several smaller items even if the
given maximum cache size has been exceeded.
"""
def __init__(self, max_size_in_mb, max_item_count):
self._max_size_in_bytes = max_size_in_mb * 1024 ** 2
self._max_item_count = max_item_count
self._dict = collections.OrderedDict()
self._lock = threading.Lock()
def put_twiddle_factors(self, n, factors):
"""
Store twiddle factors for an FFT of length n in the cache.
Putting multiple twiddle factors for a certain n will store it multiple
times.
Parameters
----------
n : int
Data length for the FFT.
factors : ndarray
The actual twiddle values.
"""
with self._lock:
# Pop + later add to move it to the end for LRU behavior.
# Internally everything is stored in a dictionary whose values are
# lists.
try:
value = self._dict.pop(n)
except KeyError:
value = []
value.append(factors)
self._dict[n] = value
self._prune_cache()
def pop_twiddle_factors(self, n):
"""
Pop twiddle factors for an FFT of length n from the cache.
Will return None if the requested twiddle factors are not available in
the cache.
Parameters
----------
n : int
Data length for the FFT.
Returns
-------
out : ndarray or None
The retrieved twiddle factors if available, else None.
"""
with self._lock:
if n not in self._dict or not self._dict[n]:
return None
# Pop + later add to move it to the end for LRU behavior.
all_values = self._dict.pop(n)
value = all_values.pop()
# Only put pack if there are still some arrays left in the list.
if all_values:
self._dict[n] = all_values
return value
def _prune_cache(self):
# Always keep at least one item.
while len(self._dict) > 1 and (
len(self._dict) > self._max_item_count or self._check_size()):
self._dict.popitem(last=False)
def _check_size(self):
item_sizes = [sum(_j.nbytes for _j in _i)
for _i in self._dict.values() if _i]
if not item_sizes:
return False
max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes))
return sum(item_sizes) > max_size
|
georgepinca1/Iron
|
refs/heads/master
|
contrib/linearize/linearize-hashes.py
|
33
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
40223226/2015cdb_g8
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/dom/xmlbuilder.py
|
873
|
"""Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return key in self._settings
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib.request
fp = urllib.request.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urllib.parse
parts = urllib.parse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urllib.parse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib.request
return urllib.request.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if "Content-Type" in info:
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
|
shashank971/edx-platform
|
refs/heads/master
|
common/djangoapps/util/tests/test_sandboxing.py
|
162
|
"""
Tests for sandboxing.py in util app
"""
from django.test import TestCase
from opaque_keys.edx.locator import LibraryLocator
from util.sandboxing import can_execute_unsafe_code
from django.test.utils import override_settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class SandboxingTest(TestCase):
"""
Test sandbox whitelisting
"""
@override_settings(COURSES_WITH_UNSAFE_CODE=['edX/full/.*', 'library:v1-edX+.*'])
def test_sandbox_exclusion(self):
"""
Test to make sure that a non-match returns false
"""
self.assertFalse(can_execute_unsafe_code(SlashSeparatedCourseKey('edX', 'notful', 'empty')))
self.assertFalse(can_execute_unsafe_code(LibraryLocator('edY', 'test_bank')))
@override_settings(COURSES_WITH_UNSAFE_CODE=['edX/full/.*'])
def test_sandbox_inclusion(self):
"""
Test to make sure that a match works across course runs
"""
self.assertTrue(can_execute_unsafe_code(SlashSeparatedCourseKey('edX', 'full', '2012_Fall')))
self.assertTrue(can_execute_unsafe_code(SlashSeparatedCourseKey('edX', 'full', '2013_Spring')))
self.assertFalse(can_execute_unsafe_code(LibraryLocator('edX', 'test_bank')))
def test_courselikes_with_unsafe_code_default(self):
"""
Test that the default setting for COURSES_WITH_UNSAFE_CODE is an empty setting, e.g. we don't use @override_settings in these tests
"""
self.assertFalse(can_execute_unsafe_code(SlashSeparatedCourseKey('edX', 'full', '2012_Fall')))
self.assertFalse(can_execute_unsafe_code(SlashSeparatedCourseKey('edX', 'full', '2013_Spring')))
self.assertFalse(can_execute_unsafe_code(LibraryLocator('edX', 'test_bank')))
|
johan--/tornado
|
refs/heads/master
|
maint/vm/windows/bootstrap.py
|
99
|
r"""Installs files needed for tornado testing on windows.
These instructions are compatible with the VMs provided by http://modern.ie.
The bootstrapping script works on the WinXP/IE6 and Win8/IE10 configurations,
although tornado's tests do not pass on XP.
1) Install virtualbox guest additions (from the device menu in virtualbox)
2) Set up a shared folder to the root of your tornado repo. It must be a
read-write mount to use tox, although the tests can be run directly
in a read-only mount. This will probably assign drive letter E:.
3) Install Python 2.7 from python.org.
4) Run this script by double-clicking it, or running
"c:\python27\python.exe bootstrap.py" in a shell.
To run the tests by hand, cd to e:\ and run
c:\python27\python.exe -m tornado.test.runtests
To run the tests with tox, cd to e:\maint\vm\windows and run
c:\python27\scripts\tox
To run under cygwin (which must be installed separately), run
cd /cygdrive/e; python -m tornado.test.runtests
"""
import os
import subprocess
import sys
import urllib
TMPDIR = r'c:\tornado_bootstrap'
PYTHON_VERSIONS = [
(r'c:\python26\python.exe', 'http://www.python.org/ftp/python/2.6.6/python-2.6.6.msi'),
(r'c:\python27\python.exe', 'http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi'),
(r'c:\python32\python.exe', 'http://www.python.org/ftp/python/3.2.3/python-3.2.3.msi'),
(r'c:\python33\python.exe', 'http://www.python.org/ftp/python/3.3.0/python-3.3.0.msi'),
]
SCRIPTS_DIR = r'c:\python27\scripts'
EASY_INSTALL = os.path.join(SCRIPTS_DIR, 'easy_install.exe')
PY_PACKAGES = ['tox', 'virtualenv', 'pip']
def download_to_cache(url, local_name=None):
if local_name is None:
local_name = url.split('/')[-1]
filename = os.path.join(TMPDIR, local_name)
if not os.path.exists(filename):
data = urllib.urlopen(url).read()
with open(filename, 'wb') as f:
f.write(data)
return filename
def main():
if not os.path.exists(TMPDIR):
os.mkdir(TMPDIR)
os.chdir(TMPDIR)
for exe, url in PYTHON_VERSIONS:
if os.path.exists(exe):
print "%s already exists, skipping" % exe
continue
print "Installing %s" % url
filename = download_to_cache(url)
# http://blog.jaraco.com/2012/01/how-i-install-python-on-windows.html
subprocess.check_call(['msiexec', '/i', filename,
'ALLUSERS=1', '/passive'])
if not os.path.exists(EASY_INSTALL):
filename = download_to_cache('http://python-distribute.org/distribute_setup.py')
subprocess.check_call([sys.executable, filename])
subprocess.check_call([EASY_INSTALL] + PY_PACKAGES)
# cygwin's setup.exe doesn't like being run from a script (looks
# UAC-related). If it did, something like this might install it.
# (install python, python-setuptools, python3, and easy_install
# unittest2 (cygwin's python 2 is 2.6))
#filename = download_to_cache('http://cygwin.com/setup.exe')
#CYGTMPDIR = os.path.join(TMPDIR, 'cygwin')
#if not os.path.exists(CYGTMPDIR):
# os.mkdir(CYGTMPDIR)
## http://www.jbmurphy.com/2011/06/16/powershell-script-to-install-cygwin/
#CYGWIN_ARGS = [filename, '-q', '-l', CYGTMPDIR,
# '-s', 'http://mirror.nyi.net/cygwin/', '-R', r'c:\cygwin']
#subprocess.check_call(CYGWIN_ARGS)
if __name__ == '__main__':
main()
|
pyamg/pyamg
|
refs/heads/lloyd-unify
|
pyamg/classical/split.py
|
1
|
"""Functions to compute C/F splittings for use in Classical AMG.
Overview
--------
A C/F splitting is a partitioning of the nodes in the graph of as connection
matrix (denoted S for strength) into sets of C (coarse) and F (fine) nodes.
The C-nodes are promoted to the coarser grid while the F-nodes are retained
on the finer grid. Ideally, the C-nodes, which represent the coarse-level
unknowns, should be far fewer in number than the F-nodes. Furthermore,
algebraically smooth error must be well-approximated by the coarse level
degrees of freedom.
Representation
--------------
C/F splitting is represented by an array with ones for all the C-nodes
and zeros for the F-nodes.
C/F Splitting Methods
---------------------
RS : Original Ruge-Stuben method
- Produces good C/F splittings.
- May produce AMG hierarchies with relatively high operator complexities.
- See References [1] and [4]
PMIS: Parallel Modified Independent Set
- Very fast construction with low operator complexity.
- Convergence can deteriorate with increasing problem
size on structured meshes.
- Uses method similar to Luby's Maximal Independent Set algorithm.
- See References [1] and [3]
PMISc: Parallel Modified Independent Set in Color
- Fast construction with low operator complexity.
- Better scalability than PMIS on structured meshes.
- Augments random weights with a (graph) vertex coloring
- See References [1]
CLJP: Cleary-Luby-Jones-Plassmann
- Parallel method with cost and complexity comparable to Ruge-Stuben.
- Convergence can deteriorate with increasing problem
size on structured meshes.
- See References [1] and [2]
CLJP-c: Cleary-Luby-Jones-Plassmann in Color
- Parallel method with cost and complexity comparable to Ruge-Stuben.
- Better scalability than CLJP on structured meshes.
- See References [1]
Summary
-------
In general, methods that use a graph coloring perform better on structured
meshes [1]. Unstructured meshes do not appear to benefit substantially
from coloring.
======== ======== ======== ==========
method parallel in color cost
======== ======== ======== ==========
RS no no moderate
PMIS yes no very low
PMISc yes yes low
CLJP yes no moderate
CLJPc yes yes moderate
======== ======== ======== ==========
References
----------
.. [1] Cleary AJ, Falgout RD, Henson VE, Jones JE.
"Coarse-grid selection for parallel algebraic multigrid"
Proceedings of the 5th International Symposium on Solving Irregularly
Structured Problems in Parallel. Springer: Berlin, 1998; 104-115.
.. [2] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
.. [3] Hans De Sterck, Ulrike M Yang, and Jeffrey J Heys
"Reducing complexity in parallel algebraic multigrid preconditioners"
SIAM Journal on Matrix Analysis and Applications 2006; 27:1019-1039.
.. [4] Ruge JW, Stuben K.
"Algebraic multigrid (AMG)"
In Multigrid Methods, McCormick SF (ed.),
Frontiers in Applied Mathematics, vol. 3.
SIAM: Philadelphia, PA, 1987; 73-130.
"""
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix, isspmatrix_csr
from pyamg.graph import vertex_coloring
from pyamg import amg_core
from pyamg.util.utils import remove_diagonal
__all__ = ['RS', 'PMIS', 'PMISc', 'CLJP', 'CLJPc', 'MIS']
def RS(S, second_pass=False):
"""Compute a C/F splitting using Ruge-Stuben coarsening
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
second_pass : bool, default False
Perform second pass of classical AMG coarsening. Can be important for
classical AMG interpolation. Typically not done in parallel (e.g. Hypre).
Returns
-------
splitting : ndarray
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import RS
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = RS(S)
See Also
--------
amg_core.rs_cf_splitting
References
----------
.. [1] Ruge JW, Stuben K. "Algebraic multigrid (AMG)"
In Multigrid Methods, McCormick SF (ed.),
Frontiers in Applied Mathematics, vol. 3.
SIAM: Philadelphia, PA, 1987; 73-130.
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
S = remove_diagonal(S)
T = S.T.tocsr() # transpose S for efficient column access
splitting = np.empty(S.shape[0], dtype='intc')
influence = np.zeros((S.shape[0],), dtype='intc')
amg_core.rs_cf_splitting(S.shape[0],
S.indptr, S.indices,
T.indptr, T.indices,
influence,
splitting)
if second_pass:
amg_core.rs_cf_splitting_pass2(S.shape[0], S.indptr,
S.indices, splitting)
return splitting
def PMIS(S):
"""C/F splitting using the Parallel Modified Independent Set method.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
Returns
-------
splitting : ndarray
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import PMIS
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = PMIS(S)
See Also
--------
MIS
References
----------
.. [6] Hans De Sterck, Ulrike M Yang, and Jeffrey J Heys
"Reducing complexity in parallel algebraic multigrid preconditioners"
SIAM Journal on Matrix Analysis and Applications 2006; 27:1019-1039.
"""
S = remove_diagonal(S)
weights, G, S, T = preprocess(S)
return MIS(G, weights)
def PMISc(S, method='JP'):
"""C/F splitting using Parallel Modified Independent Set (in color).
PMIS-c, or PMIS in color, improves PMIS by perturbing the initial
random weights with weights determined by a vertex coloring.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
method : string
Algorithm used to compute the initial vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import PMISc
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = PMISc(S)
See Also
--------
MIS
References
----------
.. [7] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
S = remove_diagonal(S)
weights, G, S, T = preprocess(S, coloring_method=method)
return MIS(G, weights)
def CLJP(S, color=False):
"""Compute a C/F splitting using the parallel CLJP algorithm.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
color : bool
use the CLJP coloring approach
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJP
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJP(S)
See Also
--------
MIS, PMIS, CLJPc
References
----------
.. [8] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
S = remove_diagonal(S)
colorid = 0
if color:
colorid = 1
T = S.T.tocsr() # transpose S for efficient column access
splitting = np.empty(S.shape[0], dtype='intc')
amg_core.cljp_naive_splitting(S.shape[0],
S.indptr, S.indices,
T.indptr, T.indices,
splitting,
colorid)
return splitting
def CLJPc(S):
"""Compute a C/F splitting using the parallel CLJP-c algorithm.
CLJP-c, or CLJP in color, improves CLJP by perturbing the initial
random weights with weights determined by a vertex coloring.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJPc
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJPc(S)
See Also
--------
MIS, PMIS, CLJP
References
----------
.. [1] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
S = remove_diagonal(S)
return CLJP(S, color=True)
def MIS(G, weights, maxiter=None):
"""Compute a maximal independent set of a graph in parallel.
Parameters
----------
G : csr_matrix
Matrix graph, G[i,j] != 0 indicates an edge
weights : ndarray
Array of weights for each vertex in the graph G
maxiter : int
Maximum number of iterations (default: None)
Returns
-------
mis : array
Array of length of G of zeros/ones indicating the independent set
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import MIS
>>> import numpy as np
>>> G = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> w = np.ones((G.shape[0],1)).ravel()
>>> mis = MIS(G,w)
See Also
--------
fn = amg_core.maximal_independent_set_parallel
"""
if not isspmatrix_csr(G):
raise TypeError('expected csr_matrix')
G = remove_diagonal(G)
mis = np.empty(G.shape[0], dtype='intc')
mis[:] = -1
fn = amg_core.maximal_independent_set_parallel
if maxiter is None:
fn(G.shape[0], G.indptr, G.indices, -1, 1, 0, mis, weights, -1)
else:
if maxiter < 0:
raise ValueError('maxiter must be >= 0')
fn(G.shape[0], G.indptr, G.indices, -1, 1, 0, mis, weights, maxiter)
return mis
# internal function
def preprocess(S, coloring_method=None):
"""Preprocess splitting functions.
Parameters
----------
S : csr_matrix
Strength of connection matrix
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
weights: ndarray
Weights from a graph coloring of G
S : csr_matrix
Strength matrix with ones
T : csr_matrix
transpose of S
G : csr_matrix
union of S and T
Notes
-----
Performs the following operations:
- Checks input strength of connection matrix S
- Replaces S.data with ones
- Creates T = S.T in CSR format
- Creates G = S union T in CSR format
- Creates random weights
- Augments weights with graph coloring (if use_color == True)
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
if S.shape[0] != S.shape[1]:
raise ValueError('expected square matrix, shape=%s' % (S.shape,))
N = S.shape[0]
S = csr_matrix((np.ones(S.nnz, dtype='int8'), S.indices, S.indptr),
shape=(N, N))
T = S.T.tocsr() # transpose S for efficient column access
G = S + T # form graph (must be symmetric)
G.data[:] = 1
weights = np.ravel(T.sum(axis=1)) # initial weights
# weights -= T.diagonal() # discount self loops
if coloring_method is None:
weights = weights + np.random.rand(len(weights))
else:
coloring = vertex_coloring(G, coloring_method)
num_colors = coloring.max() + 1
weights = (weights + (np.random.rand(len(weights)) + coloring)
/ num_colors)
return (weights, G, S, T)
|
marrow/package
|
refs/heads/develop
|
marrow/package/loader.py
|
1
|
from pkg_resources import iter_entry_points
from typeguard import check_argument_types
from typing import Sequence
nodefault = object()
def traverse(obj, target:str, default=nodefault, executable:bool=False, separator:str='.', protect:bool=True):
"""Traverse down an object, using getattr or getitem.
If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will
continue on the result of that call. You can change the separator as desired, i.e. to a '/'.
By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve,
raising a LookupError.
Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute
lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup
very flexible.
"""
# TODO: Support numerical slicing, i.e. ``1:4``, or even just ``:-1`` and things.
assert check_argument_types()
value = obj
remainder = target
if not target:
return obj
while separator:
name, separator, remainder = remainder.partition(separator)
numeric = name.lstrip('-').isdigit()
try:
if numeric or (protect and name.startswith('_')):
raise AttributeError()
value = getattr(value, name)
if executable and callable(value):
value = value()
except AttributeError:
try:
value = value[int(name) if numeric else name]
except (KeyError, TypeError):
if default is nodefault:
raise LookupError("Could not resolve '" + target + "' on: " + repr(obj))
return default
return value
def load(target:str, namespace:str=None, default=nodefault, executable:bool=False, separators:Sequence[str]=('.', ':'),
protect:bool=True):
"""This helper function loads an object identified by a dotted-notation string.
For example::
# Load class Foo from example.objects
load('example.objects:Foo')
# Load the result of the class method ``new`` of the Foo object
load('example.objects:Foo.new', executable=True)
If a plugin namespace is provided simple name references are allowed. For example::
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load('routing', 'web.dispatch')
The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function.
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
assert check_argument_types()
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in iter_entry_points(namespace))
if target not in allowable:
raise LookupError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, _, target = target.partition(separators[1])
try:
obj = __import__(parts)
except ImportError:
if default is not nodefault:
return default
raise
return traverse(
obj,
separators[0].join(parts.split(separators[0])[1:] + target.split(separators[0])),
default = default,
executable = executable,
protect = protect
) if target else obj
|
ebolyen/qiime2
|
refs/heads/master
|
qiime2/core/type/tests/test_primitive.py
|
3
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
# TODO: Write some tests.
if __name__ == '__main__':
unittest.main()
|
alxgu/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/na_ontap_vserver_peer.py
|
38
|
#!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create/Delete vserver peer
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_vserver_peer
options:
state:
choices: ['present', 'absent']
description:
- Whether the specified vserver peer should exist or not.
default: present
vserver:
description:
- Specifies name of the source Vserver in the relationship.
applications:
choices: ['snapmirror', 'file_copy', 'lun_copy', 'flexcache']
description:
- List of applications which can make use of the peering relationship.
- FlexCache supported from ONTAP 9.5 onwards.
peer_vserver:
description:
- Specifies name of the peer Vserver in the relationship.
peer_cluster:
description:
- Specifies name of the peer Cluster.
- Required for creating the vserver peer relationship with a remote cluster
dest_hostname:
description:
- Destination hostname or IP address.
- Required for creating the vserver peer relationship with a remote cluster
dest_username:
description:
- Destination username.
- Optional if this is same as source username.
dest_password:
description:
- Destination password.
- Optional if this is same as source password.
short_description: NetApp ONTAP Vserver peering
version_added: "2.7"
'''
EXAMPLES = """
- name: Source vserver peer create
na_ontap_vserver_peer:
state: present
peer_vserver: ansible2
peer_cluster: ansibleCluster
vserver: ansible
applications: snapmirror
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
dest_hostname: "{{ netapp_dest_hostname }}"
- name: vserver peer delete
na_ontap_vserver_peer:
state: absent
peer_vserver: ansible2
vserver: ansible
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPVserverPeer(object):
"""
Class with vserver peer methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
peer_vserver=dict(required=True, type='str'),
peer_cluster=dict(required=False, type='str'),
applications=dict(required=False, type='list', choices=['snapmirror', 'file_copy', 'lun_copy', 'flexcache']),
dest_hostname=dict(required=False, type='str'),
dest_username=dict(required=False, type='str'),
dest_password=dict(required=False, type='str', no_log=True)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
if self.parameters.get('dest_hostname'):
self.module.params['hostname'] = self.parameters['dest_hostname']
if self.parameters.get('dest_username'):
self.module.params['username'] = self.parameters['dest_username']
if self.parameters.get('dest_password'):
self.module.params['password'] = self.parameters['dest_password']
self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
# reset to source host connection for asup logs
self.module.params['hostname'] = self.parameters['hostname']
def vserver_peer_get_iter(self):
"""
Compose NaElement object to query current vserver using peer-vserver and vserver parameters
:return: NaElement object for vserver-get-iter with query
"""
vserver_peer_get = netapp_utils.zapi.NaElement('vserver-peer-get-iter')
query = netapp_utils.zapi.NaElement('query')
vserver_peer_info = netapp_utils.zapi.NaElement('vserver-peer-info')
vserver_peer_info.add_new_child('peer-vserver', self.parameters['peer_vserver'])
vserver_peer_info.add_new_child('vserver', self.parameters['vserver'])
query.add_child_elem(vserver_peer_info)
vserver_peer_get.add_child_elem(query)
return vserver_peer_get
def vserver_peer_get(self):
"""
Get current vserver peer info
:return: Dictionary of current vserver peer details if query successful, else return None
"""
vserver_peer_get_iter = self.vserver_peer_get_iter()
vserver_info = dict()
try:
result = self.server.invoke_successfully(vserver_peer_get_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching vserver peer %s: %s'
% (self.parameters['vserver'], to_native(error)),
exception=traceback.format_exc())
# return vserver peer details
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
vserver_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('vserver-peer-info')
vserver_info['peer_vserver'] = vserver_peer_info.get_child_content('peer-vserver')
vserver_info['vserver'] = vserver_peer_info.get_child_content('vserver')
vserver_info['peer_state'] = vserver_peer_info.get_child_content('peer-state')
return vserver_info
return None
def vserver_peer_delete(self):
"""
Delete a vserver peer
"""
vserver_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-peer-delete', **{'peer-vserver': self.parameters['peer_vserver'],
'vserver': self.parameters['vserver']})
try:
self.server.invoke_successfully(vserver_peer_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting vserver peer %s: %s'
% (self.parameters['vserver'], to_native(error)),
exception=traceback.format_exc())
def get_peer_cluster_name(self):
"""
Get local cluster name
:return: cluster name
"""
cluster_info = netapp_utils.zapi.NaElement('cluster-identity-get')
try:
result = self.server.invoke_successfully(cluster_info, enable_tunneling=True)
return result.get_child_by_name('attributes').get_child_by_name(
'cluster-identity-info').get_child_content('cluster-name')
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching peer cluster name for peer vserver %s: %s'
% (self.parameters['peer_vserver'], to_native(error)),
exception=traceback.format_exc())
def vserver_peer_create(self):
"""
Create a vserver peer
"""
if self.parameters.get('applications') is None:
self.module.fail_json(msg='applications parameter is missing')
if self.parameters.get('peer_cluster') is not None and self.parameters.get('dest_hostname') is None:
self.module.fail_json(msg='dest_hostname is required for peering a vserver in remote cluster')
if self.parameters.get('peer_cluster') is None:
self.parameters['peer_cluster'] = self.get_peer_cluster_name()
vserver_peer_create = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-peer-create', **{'peer-vserver': self.parameters['peer_vserver'],
'vserver': self.parameters['vserver'],
'peer-cluster': self.parameters['peer_cluster']})
applications = netapp_utils.zapi.NaElement('applications')
for application in self.parameters['applications']:
applications.add_new_child('vserver-peer-application', application)
vserver_peer_create.add_child_elem(applications)
try:
self.server.invoke_successfully(vserver_peer_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating vserver peer %s: %s'
% (self.parameters['vserver'], to_native(error)),
exception=traceback.format_exc())
def is_remote_peer(self):
if self.parameters.get('dest_hostname') is None or \
(self.parameters['dest_hostname'] == self.parameters['hostname']):
return False
return True
def vserver_peer_accept(self):
"""
Accept a vserver peer at destination
"""
# peer-vserver -> remote (source vserver is provided)
# vserver -> local (destination vserver is provided)
vserver_peer_accept = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-peer-accept', **{'peer-vserver': self.parameters['vserver'],
'vserver': self.parameters['peer_vserver']})
try:
self.dest_server.invoke_successfully(vserver_peer_accept, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error accepting vserver peer %s: %s'
% (self.parameters['peer_vserver'], to_native(error)),
exception=traceback.format_exc())
def apply(self):
"""
Apply action to create/delete or accept vserver peer
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_vserver_peer", cserver)
current = self.vserver_peer_get()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if cd_action == 'create':
self.vserver_peer_create()
# accept only if the peer relationship is on a remote cluster
if self.is_remote_peer():
self.vserver_peer_accept()
elif cd_action == 'delete':
self.vserver_peer_delete()
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""Execute action"""
community_obj = NetAppONTAPVserverPeer()
community_obj.apply()
if __name__ == '__main__':
main()
|
chrisspen/burlap
|
refs/heads/master
|
burlap/ssh.py
|
1
|
"""
OpenSSH tasks
=============
This module provides tools to manage OpenSSH server and client.
"""
from __future__ import print_function
# from fabric.api import hide, shell_env
# from fabric.contrib.files import append, sed
# from burlap.service import is_running, restart
# from burlap.files import watch
from burlap import Satchel
from burlap.constants import *
from burlap.decorators import task
# def harden(allow_root_login=False, allow_password_auth=False,
# sshd_config='/etc/ssh/sshd_config'):
# """
# Apply best practices for ssh security.
#
# See :func:`burlap.ssh.disable_password_auth` and
# :func:`burlap.ssh.disable_root_login` for a detailed
# description.
#
# ::
#
# import burlap
#
# # This will apply all hardening techniques.
# burlap.ssh.harden()
#
# # Only apply some of the techniques.
# burlap.ssh.harden(allow_password_auth=True)
#
# # Override the sshd_config file location.
# burlap.ssh.harden(sshd_config='/etc/sshd_config')
#
# """
#
# if not allow_password_auth:
# disable_password_auth(sshd_config=sshd_config)
#
# if not allow_root_login:
# disable_root_login(sshd_config=sshd_config)
#
#
# def disable_password_auth(sshd_config='/etc/ssh/sshd_config'):
# """
# Do not allow users to use passwords to login via ssh.
# """
#
# _update_ssh_setting(sshd_config, 'PasswordAuthentication', 'no')
#
#
# def enable_password_auth(sshd_config='/etc/ssh/sshd_config'):
# """
# Allow users to use passwords to login via ssh.
# """
#
# _update_ssh_setting(sshd_config, 'PasswordAuthentication', 'yes')
#
#
# def disable_root_login(sshd_config='/etc/ssh/sshd_config'):
# """
# Do not allow root to login via ssh.
# """
#
# _update_ssh_setting(sshd_config, 'PermitRootLogin', 'no')
#
#
# def enable_root_login(sshd_config='/etc/ssh/sshd_config'):
# """
# Allow root to login via ssh.
# """
#
# _update_ssh_setting(sshd_config, 'PermitRootLogin', 'yes')
#
#
# def _update_ssh_setting(sshd_config, name, value):
# """
# Update a yes/no setting in the SSH config file
# """
#
# with watch(sshd_config) as config_file:
#
# with shell_env():
#
# # First try to change existing setting
# sed(sshd_config,
# r'^(\s*#\s*)?%s\s+(yes|no)' % name,
# '%s %s' % (name, value),
# use_sudo=True)
#
# # Then append setting if it's still missing
# _append(sshd_config,
# '%s %s' % (name, value),
# use_sudo=True)
#
# if config_file.changed and is_running('ssh'):
# restart('ssh')
# def _append(filename, regex, use_sudo):
# """
# Less verbose append
# """
# with hide('stdout', 'warnings'):
# return append(filename, regex, use_sudo=use_sudo)
class SSHNiceSatchel(Satchel):
name = 'sshnice'
@property
def packager_system_packages(self):
return {
FEDORA: ['cron'],
UBUNTU: ['cron'],
DEBIAN: ['cron'],
}
def set_defaults(self):
self.env.enabled = False
self.env.cron_script_path = '/etc/cron.d/sshnice'
self.env.cron_perms = '600'
@task(precursors=['packager'])
def configure(self):
r = self.local_renderer
if self.env.enabled:
self.install_packages()
remote_path = r.env.remote_path = self.env.cron_script_path
r.put(
local_path=self.find_template('sshnice/etc_crond_sshnice'),
remote_path=remote_path, use_sudo=True)
r.sudo('chown root:root %s' % remote_path)
# Must be 600, otherwise gives INSECURE MODE error.
# http://unix.stackexchange.com/questions/91202/cron-does-not-print-to-syslog
r.sudo('chmod {cron_perms} {remote_path}')
r.sudo('service cron restart')
else:
r.sudo('rm -f {cron_script_path}')
r.sudo('service cron restart')
sshnice = SSHNiceSatchel()
|
Xeleste/namebench
|
refs/heads/master
|
libnamebench/config_test.py
|
173
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
import sys
sys.path.append('..')
import third_party
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = 'NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569', 'hostname': 'y.ns.gin.ntt.net'}
self.assertEquals(config._ParseServerValue(line), expected)
def testOpenDNSLine(self):
line = 'OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None, 'hostname': 'resolver2.opendns.com'}
self.assertEquals(config._ParseServerValue(line), expected)
def testLineWithNoRegion(self):
line = 'Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0',
'hostname': 'vnsc-bak.sys.gtei.net'}
self.assertEquals(config._ParseServerValue(line), expected)
if __name__ == '__main__':
unittest.main()
|
WCCCEDU/twitter-commons
|
refs/heads/master
|
src/python/twitter/common/app/inspection.py
|
16
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
import inspect
import os
import sys
class Inspection(object):
class InternalError(Exception): pass
# TODO(wickman)
# Remove all calls to inspect.stack(). This is just bad. Port everything over
# to iterating from currentframe => outer frames.
@staticmethod
def find_main_from_caller():
last_frame = inspect.currentframe()
while True:
inspect_frame = last_frame.f_back
if not inspect_frame:
break
if 'main' in inspect_frame.f_locals:
return inspect_frame.f_locals['main']
last_frame = inspect_frame
raise Inspection.InternalError("Unable to detect main from the stack!")
@staticmethod
def print_stack_locals(out=sys.stderr):
stack = inspect.stack()[1:]
for fr_n in range(len(stack)):
print('--- frame %s ---\n' % fr_n, file=out)
for key in stack[fr_n][0].f_locals:
print(' %s => %s' % (key, stack[fr_n][0].f_locals[key]), file=out)
@staticmethod
def find_main_module():
stack = inspect.stack()[1:]
for fr_n in range(len(stack)):
if 'main' in stack[fr_n][0].f_locals:
return stack[fr_n][0].f_locals['__name__']
return None
@staticmethod
def get_main_locals():
stack = inspect.stack()[1:]
for fr_n in range(len(stack)):
if '__name__' in stack[fr_n][0].f_locals and (
stack[fr_n][0].f_locals['__name__'] == '__main__'):
return stack[fr_n][0].f_locals
return {}
@staticmethod
def find_calling_module():
last_frame = inspect.currentframe()
while True:
inspect_frame = last_frame.f_back
if not inspect_frame:
break
if '__name__' in inspect_frame.f_locals:
return inspect_frame.f_locals['__name__']
last_frame = inspect_frame
raise Inspection.InternalError("Unable to interpret stack frame!")
@staticmethod
def find_application_name():
__entry_point__ = None
locals = Inspection.get_main_locals()
if '__file__' in locals and locals['__file__'] is not None:
__entry_point__ = locals['__file__']
elif '__loader__' in locals:
from zipimport import zipimporter
from pkgutil import ImpLoader
# TODO(wickman) The monkeypatched zipimporter should probably not be a function
# but instead a properly delegating proxy.
if hasattr(locals['__loader__'], 'archive'):
# assuming it ends in .zip or .egg, it may be of package format, so
# foo-version-py2.6-arch.egg, so split off anything after '-'.
__entry_point__ = os.path.basename(locals['__loader__'].archive)
__entry_point__ = __entry_point__.split('-')[0].split('.')[0]
elif isinstance(locals['__loader__'], ImpLoader):
__entry_point__ = locals['__loader__'].get_filename()
else:
__entry_point__ = '__interpreter__'
app_name = os.path.basename(__entry_point__)
return app_name.split('.')[0]
|
marcsans/cnn-physics-perception
|
refs/heads/master
|
phy/lib/python2.7/site-packages/numpy/distutils/command/autodist.py
|
148
|
"""This module implements additional tests ala autoconf which can be useful.
"""
from __future__ import division, absolute_import, print_function
# We put them here since they could be easily reused outside numpy.distutils
def check_inline(cmd):
"""Return the inline identifier (may be empty)."""
cmd._check_compiler()
body = """
#ifndef __cplusplus
static %(inline)s int static_func (void)
{
return 0;
}
%(inline)s int nostatic_func (void)
{
return 0;
}
#endif"""
for kw in ['inline', '__inline__', '__inline']:
st = cmd.try_compile(body % {'inline': kw}, None, None)
if st:
return kw
return ''
def check_restrict(cmd):
"""Return the restrict identifier (may be empty)."""
cmd._check_compiler()
body = """
static int static_func (char * %(restrict)s a)
{
return 0;
}
"""
for kw in ['restrict', '__restrict__', '__restrict']:
st = cmd.try_compile(body % {'restrict': kw}, None, None)
if st:
return kw
return ''
def check_compiler_gcc4(cmd):
"""Return True if the C compiler is GCC 4.x."""
cmd._check_compiler()
body = """
int
main()
{
#if (! defined __GNUC__) || (__GNUC__ < 4)
#error gcc >= 4 required
#endif
return 0;
}
"""
return cmd.try_compile(body, None, None)
def check_gcc_function_attribute(cmd, attribute, name):
"""Return True if the given function attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s %s(void*);
int
main()
{
return 0;
}
""" % (attribute, name)
return cmd.try_compile(body, None, None) != 0
def check_gcc_variable_attribute(cmd, attribute):
"""Return True if the given variable attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s foo;
int
main()
{
return 0;
}
""" % (attribute, )
return cmd.try_compile(body, None, None) != 0
|
kewu1992/test-infra
|
refs/heads/master
|
scenarios/execute.py
|
33
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Executes a command."""
import argparse
import os
import subprocess
import sys
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def main(envs, cmd):
"""Run script and verify it exits 0."""
for env in envs:
key, val = env.split('=', 1)
print >>sys.stderr, '%s=%s' % (key, val)
os.environ[key] = val
if not cmd:
raise ValueError(cmd)
check(*cmd)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--env', default=[], action='append')
PARSER.add_argument('cmd', nargs=1)
PARSER.add_argument('args', nargs='*')
ARGS = PARSER.parse_args()
main(ARGS.env, ARGS.cmd + ARGS.args)
|
davido/buck
|
refs/heads/master
|
third-party/py/setuptools/pkg_resources/_vendor/__init__.py
|
12133432
| |
rahulsharma1991/frontera
|
refs/heads/master
|
frontera/tests/__init__.py
|
12133432
| |
fgesora/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/test/test_fields.py
|
391
|
#
# Use this module to retrive the fields you need according to the type
# of the OpenOffice operation:
# * Insert a Field
# * Insert a RepeatIn
#
import xmlrpclib
import time
sock = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/object')
def get(object, level=3, ending=None, ending_excl=None, recur=None, root=''):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
res = sock.execute('terp', 3, 'admin', 'account.invoice', 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
print root+'/'+k
if res[k]['type'] in recur:
print root+'/'+k
if (res[k]['type'] in recur) and (level>0):
get(res[k]['relation'], level-1, ending, ending_excl, recur, root+'/'+k)
print 'Field selection for a rields', '='*40
get('account.invoice', level=0, ending_excl=['one2many','many2one','many2many','reference'], recur=['many2one'])
print
print 'Field selection for a repeatIn', '='*40
get('account.invoice', level=0, ending=['one2many','many2many'], recur=['many2one'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aesaae/ardupilot_str
|
refs/heads/master
|
mk/PX4/Tools/genmsg/test/test_genmsg_command_line.py
|
216
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def test_includepath_to_dict():
from genmsg.command_line import includepath_to_dict
assert {} == includepath_to_dict([])
assert {'std_msgs': [ 'foo' ]} == includepath_to_dict(['std_msgs:foo'])
assert {'std_msgs': [ 'foo' ], 'bar_msgs': [ 'baz:colon' ]} == includepath_to_dict(['std_msgs:foo', 'bar_msgs:baz:colon'])
|
psobot/SampleScanner
|
refs/heads/master
|
tests/readme_test.py
|
1
|
import subprocess
def readme_contentsof(name):
with open('README.md') as readme:
return (
readme.read()
.split('```contentsof<%s>' % name)[1]
.split('```')[0]
.strip()
)
def command_line_output_in_readme():
return readme_contentsof('samplescanner -h')
def license_output_in_readme():
return readme_contentsof('cat LICENSE')
def expected_command_line_output():
return subprocess.check_output(
['./samplescanner', '-h'],
stderr=subprocess.STDOUT,
).strip()
def expected_license():
with open('LICENSE') as license:
return license.read().strip()
def test_readme_contains_proper_command_line_output():
assert command_line_output_in_readme() == expected_command_line_output()
def test_readme_contains_content_of_license():
assert license_output_in_readme() == expected_license()
|
Peddle/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/utils_tests/models.py
|
265
|
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100)
def next(self):
return self
class Thing(models.Model):
name = models.CharField(max_length=100)
category = models.ForeignKey(Category)
|
simontakite/sysadmin
|
refs/heads/master
|
pythonscripts/programmingpython/Gui/PIL/temp.py
|
2
|
"""
display all images in a directory as thumbnail image buttons that display
the full image when clicked; requires PIL for JPEGs and thumbnail image
creation; to do: add scrolling if too many thumbs for window!
"""
import os, sys, math
from tkinter import *
from PIL import Image # <== required for thumbs
from PIL.ImageTk import PhotoImage # <== required for JPEG display
def makeThumbs(imgdir, size=(100, 100), subdir='thumbs'):
"""
get thumbnail images for all images in a directory; for each image, create
and save a new thumb, or load and return an existing thumb; makes thumb
dir if needed; returns a list of (image filename, thumb image object);
caller can also run listdir on thumb dir to load; on bad file types may
raise IOError, or other; caveat: could also check file timestamps;
"""
thumbdir = os.path.join(imgdir, subdir)
if not os.path.exists(thumbdir):
os.mkdir(thumbdir)
thumbs = []
for imgfile in os.listdir(imgdir):
thumbpath = os.path.join(thumbdir, imgfile)
if os.path.exists(thumbpath):
thumbobj = Image.open(thumbpath) # use already created
thumbs.append((imgfile, thumbobj))
else:
print('making', thumbpath)
imgpath = os.path.join(imgdir, imgfile)
try:
imgobj = Image.open(imgpath) # make new thumb
imgobj.thumbnail(size, Image.ANTIALIAS) # best downsize filter
imgobj.save(thumbpath) # type via ext or passed
thumbs.append((imgfile, imgobj))
except: # not always IOError
print("Skipping: ", imgpath)
return thumbs
class ViewOne(Toplevel):
"""
open a single image in a pop-up window when created; photoimage
object must be saved: images are erased if object is reclaimed;
"""
def __init__(self, imgdir, imgfile):
Toplevel.__init__(self)
self.title(imgfile)
imgpath = os.path.join(imgdir, imgfile)
imgobj = PhotoImage(file=imgpath)
Label(self, image=imgobj).pack()
print(imgpath, imgobj.width(), imgobj.height()) # size in pixels
self.savephoto = imgobj # keep reference on me
def viewer(imgdir, kind=Toplevel, cols=None):
"""
make thumb links window for an image directory: one thumb button per image;
use kind=Tk to show in main app window, or Frame container (pack); imgfile
differs per loop: must save with a default; photoimage objs must be saved:
erased if reclaimed; packed row frames (versus grids, fixed-sizes, canvas);
"""
win = kind()
win.title('Viewer: ' + imgdir)
quit = Button(win, text='Quit', command=win.quit) # pack first:
quit.pack(fill=X, side=BOTTOM) # so clip last
thumbs = makeThumbs(imgdir)
if not cols:
cols = int(math.ceil(math.sqrt(len(thumbs)))) # fixed or N x N
savephotos = []
while thumbs:
thumbsrow, thumbs = thumbs[:cols], thumbs[cols:]
row = Frame(win)
row.pack(fill=BOTH, expand=YES)
for (imgfile, imgobj) in thumbsrow:
photo = PhotoImage(imgobj)
link = Button(row, image=photo)
handler = lambda savefile=imgfile: ViewOne(imgdir, savefile)
link.config(command=handler)
link.pack(side=LEFT, expand=YES, fill=X)
savephotos.append(photo)
return win, savephotos
if __name__ == '__main__':
imgdir = (len(sys.argv) > 1 and sys.argv[1]) or 'images'
main, save = viewer(imgdir, kind=Tk)
main.mainloop()
|
jandersson/website
|
refs/heads/master
|
lib/werkzeug/contrib/testtools.py
|
365
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.testtools
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements extended wrappers for simplified testing.
`TestResponse`
A response wrapper which adds various cached attributes for
simplified assertions on various content types.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.utils import cached_property, import_string
from werkzeug.wrappers import Response
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
'will be removed with Werkzeug 1.0'))
class ContentAccessors(object):
"""
A mixin class for response objects that provides a couple of useful
accessors for unittesting.
"""
def xml(self):
"""Get an etree if possible."""
if 'xml' not in self.mimetype:
raise AttributeError(
'Not a XML response (Content-Type: %s)'
% self.mimetype)
for module in ['xml.etree.ElementTree', 'ElementTree',
'elementtree.ElementTree']:
etree = import_string(module, silent=True)
if etree is not None:
return etree.XML(self.body)
raise RuntimeError('You must have ElementTree installed '
'to use TestResponse.xml')
xml = cached_property(xml)
def lxml(self):
"""Get an lxml etree if possible."""
if ('html' not in self.mimetype and 'xml' not in self.mimetype):
raise AttributeError('Not an HTML/XML response')
from lxml import etree
try:
from lxml.html import fromstring
except ImportError:
fromstring = etree.HTML
if self.mimetype == 'text/html':
return fromstring(self.data)
return etree.XML(self.data)
lxml = cached_property(lxml)
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.mimetype:
raise AttributeError('Not a JSON response')
try:
from simplejson import loads
except ImportError:
from json import loads
return loads(self.data)
json = cached_property(json)
class TestResponse(Response, ContentAccessors):
"""Pass this to `werkzeug.test.Client` for easier unittesting."""
|
yukoba/sympy
|
refs/heads/master
|
examples/intermediate/mplot3d.py
|
93
|
#!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
|
sbussetti/django-rq
|
refs/heads/master
|
django_rq/admin.py
|
7
|
from django.contrib import admin
from django_rq import settings
if settings.SHOW_ADMIN_LINK:
admin.site.index_template = 'django_rq/index.html'
|
deot95/Tesis
|
refs/heads/master
|
Proyecto de Grado Ingeniería Electrónica/Workspace/Comparison/Full SWMM/ddpg.py
|
1
|
import linear_env
import sim_env
from actor import Actor
from critic import Critic
from replay_buffer import ReplayBuffer
import numpy as np
import tensorflow as tf
import keras.backend as kbck
import json
import time
import argparse
import matplotlib.pylab as plt
import os.path
def ou(x, mu, theta, sigma):
return theta * (mu - x) + sigma * np.random.randn(np.shape(x)[0])
def simulate(control, swmm ,flows):
best_reward = -1*np.inf
BUFFER_SIZE = 100000
BATCH_SIZE = 120
GAMMA = 0.99
TAU = 0.01 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 8
state_dim = 10
max_steps = 6000
np.random.seed(100)
EXPLORE = 100000.
episode_count = 1000
done = False
step = 0
epsilon = 1
if swmm:
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
inp_name = "swmm/modelo2.inp"
inp = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + inp_name
vref = np.zeros((state_dim,))
env = sim_env.sim_env(inp,vref)
rainfile()
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = np.minimum(np.maximum(a_t_original[0] + noise_t[0],np.zeros(np.shape(a_t_original))),np.ones(np.shape(a_t_original)))
#Act over the system and get info of the next states
s_t1 , r_t, done = env.step(list(a_t[0]))
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
inp_name = "swmm/modelo2.inp"
inp = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + inp_name
vref = np.zeros((state_dim,))
rainfile()
env = sim_env.sim_env(inp,vref)
resv = env.free_sim()
print(np.shape(resv))
vmax = env.vmax
vmax[0] += 0.002
resv_norm = np.divide(resv,np.matlib.repmat(env.vmax,np.shape(resv)[0],1))
x = np.linspace(0,1800,np.shape(resv)[0])
font_labels = 16
font_legends = 18
ticksize = 16
width = 2.5
f , axarr = plt.subplots(nrows=1, ncols=2,figsize=(14,8),sharex=True )
## Plot Volume Results
lines = axarr[0].plot(x,resv_norm[:,:5],linewidth=width)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(5))),prop ={'size':font_legends})
axarr[0].set_title("Volumes - Tanks 1 to 5",fontsize=font_labels)
axarr[0].set_xlabel("Time(s)",fontsize=font_labels)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[0].tick_params(labelsize=ticksize)
lines = axarr[1].plot(x,resv_norm[:,5:],linewidth=width)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=10 else "vT",range(5,10))),prop ={'size':font_legends})
axarr[1].set_title("Volumes - Tanks 6 to 9 and Storm Tank",fontsize=font_labels)
axarr[1].set_xlabel("Time(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[1].tick_params(labelsize=ticksize)
plt.tight_layout()
plt.show()
else:
# Constants for the linear environment
Hs = 1800
A1 = 0.0063 ; mu1 = 500; sigma1 = 150
A2 = 0.018; mu2 = 550; sigma2 = 150
dt = 1
x = np.arange(Hs)
d = np.zeros((2,Hs))
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
A1 += 0.0004*np.random.rand()
mu1 += 50*np.random.rand()
sigma1 += 14*np.random.rand()
A2 += 0.00096*np.random.rand()
mu2 += 50*np.random.rand()
sigma2 += 14*np.random.rand()
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = a_t_original[0] + noise_t[0]
#Act over the system and get info of the next states
s_t1 , r_t, done, _ = env.step(a_t[0],flows=flows)
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
resv, resf, resu = env.free_sim()
f , axarr = plt.subplots(nrows=1, ncols=2,figsize = (14,8) )
resv_norm = np.divide(np.transpose(resv),np.matlib.repmat(env.vmax,Hs,1))
resu = np.transpose(np.asarray(resu))
width = 2.5
font_legends = 16
font_labels = 16
ticksize = 16
## Plot Volume Results
lines = axarr[0].plot(x,resv_norm[:,:5],linewidth=width)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(5))),prop ={'size':font_legends})
axarr[0].set_title("Volumes - Tanks 1 to 5",fontsize=font_labels)
axarr[0].set_xlabel("Time(s)",fontsize=font_labels)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[0].tick_params(labelsize=ticksize)
lines = axarr[1].plot(x,resv_norm[:,5:],linewidth=width)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=10 else "vT",range(5,10))),prop ={'size':font_legends})
axarr[1].set_title("Volumes - Tanks 6 to 9 and Storm Tank",fontsize=font_labels)
axarr[1].set_xlabel("Time(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[1].tick_params(labelsize=ticksize)
plt.tight_layout()
plt.show()
def rainfile():
from math import exp
import numpy as np
from matplotlib import pylab as plt
#Gaussian Extension
A1 = 0.0063 ; mu1 = 500; sigma1 = 150
A2 = 0.018; mu2 = 550; sigma2 = 150
dt = 1
Hs = 1800
x = np.arange(0,Hs,dt)
d = [[],[]]
# dconst = 0.5*mpc_obj.k1*mpc_obj.vmax(1);
d[0] = A1*np.exp((-(x-mu1)**2)/(2*sigma1**2)) # Node 1 - left
d[1] = A2*np.exp((-(x-mu2)**2)/(2*sigma2**2)) # Node 2 - right
def secs_to_hour(secs_convert):
hour = secs_convert//3600
mins = (secs_convert%3600)//60
secs = secs_convert%60
return '{h:02d}:{m:02d}'.format(h=hour,m=mins)
secs_hour_vec = np.vectorize(secs_to_hour)
for k in (1,2):
with open('swmm/runoff%d.dat' % k, 'w') as f:
i = 0
for (t,val) in zip(secs_hour_vec(x), d[k-1]):
if i%60 == 0:
f.write(t+" "+str(val)+"\n")
i += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c","--control", type=int, choices = [0,1], help = "Choose between control(1) or free dynamics(0)")
parser.add_argument("-s","--swmm", type=int, choices = [0,1], help = "Choose between a simulation with swmm(1) or not(0)")
parser.add_argument("-f","--flow", type=int, choices = [0,1], help = "Choose between a simulation with flows(1) or not(0)")
args = parser.parse_args()
if args.flow == 1 and args.swmm == 1:
print("Conflicting option flow 1 and swmm 1")
else:
t0 = time.process_time()
simulate(control=args.control, swmm=args.swmm, flows = args.flow)
tf = time.process_time()
print("Elapsed time: ",tf-t0)
|
aifil/odoo
|
refs/heads/8.0
|
openerp/report/render/rml2txt/rml2txt.py
|
49
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import sys
import StringIO
from lxml import etree
import utils
Font_size= 10.0
def verbose(text):
sys.stderr.write(text+"\n")
class textbox(object):
"""A box containing plain text.
It can have an offset, in chars.
Lines can be either text strings, or textbox'es, recursively.
"""
def __init__(self,x=0, y=0):
self.posx = x
self.posy = y
self.lines = []
self.curline = ''
self.endspace = False
def newline(self):
if isinstance(self.curline, textbox):
self.lines.extend(self.curline.renderlines())
else:
self.lines.append(self.curline)
self.curline = ''
def fline(self):
if isinstance(self.curline, textbox):
self.lines.extend(self.curline.renderlines())
elif len(self.curline):
self.lines.append(self.curline)
self.curline = ''
def appendtxt(self,txt):
"""Append some text to the current line.
Mimic the HTML behaviour, where all whitespace evaluates to
a single space """
if not txt:
return
bs = es = False
if txt[0].isspace():
bs = True
if txt[len(txt)-1].isspace():
es = True
if bs and not self.endspace:
self.curline += " "
self.curline += txt.strip().replace("\n"," ").replace("\t"," ")
if es:
self.curline += " "
self.endspace = es
def rendertxt(self,xoffset=0):
result = ''
lineoff = ""
for i in range(self.posy):
result +="\n"
for i in range(self.posx+xoffset):
lineoff+=" "
for l in self.lines:
result+= lineoff+ l +"\n"
return result
def renderlines(self,pad=0):
"""Returns a list of lines, from the current object
pad: all lines must be at least pad characters.
"""
result = []
lineoff = ""
for i in range(self.posx):
lineoff+=" "
for l in self.lines:
lpad = ""
if pad and len(l) < pad :
for i in range(pad - len(l)):
lpad += " "
#elif pad and len(l) > pad ?
result.append(lineoff+ l+lpad)
return result
def haplines(self,arr,offset,cc= ''):
""" Horizontaly append lines
"""
while len(self.lines) < len(arr):
self.lines.append("")
for i in range(len(self.lines)):
while len(self.lines[i]) < offset:
self.lines[i] += " "
for i in range(len(arr)):
self.lines[i] += cc +arr[i]
class _flowable(object):
def __init__(self, template, doc,localcontext):
self._tags = {
'1title': self._tag_title,
'1spacer': self._tag_spacer,
'para': self._tag_para,
'font': self._tag_font,
'section': self._tag_section,
'1nextFrame': self._tag_next_frame,
'blockTable': self._tag_table,
'1pageBreak': self._tag_page_break,
'1setNextTemplate': self._tag_next_template,
}
self.template = template
self.doc = doc
self.localcontext = localcontext
self.nitags = []
self.tbox = None
def warn_nitag(self,tag):
if tag not in self.nitags:
verbose("Unknown tag \"%s\", please implement it." % tag)
self.nitags.append(tag)
def _tag_page_break(self, node):
return "\f"
def _tag_next_template(self, node):
return ''
def _tag_next_frame(self, node):
result=self.template.frame_stop()
result+='\n'
result+=self.template.frame_start()
return result
def _tag_title(self, node):
node.tagName='h1'
return node.toxml()
def _tag_spacer(self, node):
length = 1+int(utils.unit_get(node.get('length')))/35
return "\n"*length
def _tag_table(self, node):
self.tb.fline()
saved_tb = self.tb
self.tb = None
sizes = None
if node.get('colWidths'):
sizes = map(lambda x: utils.unit_get(x), node.get('colWidths').split(','))
trs = []
for n in utils._child_get(node,self):
if n.tag == 'tr':
tds = []
for m in utils._child_get(n,self):
if m.tag == 'td':
self.tb = textbox()
self.rec_render_cnodes(m)
tds.append(self.tb)
self.tb = None
if len(tds):
trs.append(tds)
if not sizes:
verbose("computing table sizes..")
for tds in trs:
trt = textbox()
off=0
for i in range(len(tds)):
p = int(sizes[i]/Font_size)
trl = tds[i].renderlines(pad=p)
trt.haplines(trl,off)
off += sizes[i]/Font_size
saved_tb.curline = trt
saved_tb.fline()
self.tb = saved_tb
return
def _tag_para(self, node):
#TODO: styles
self.rec_render_cnodes(node)
self.tb.newline()
def _tag_section(self, node):
#TODO: styles
self.rec_render_cnodes(node)
self.tb.newline()
def _tag_font(self, node):
"""We do ignore fonts.."""
self.rec_render_cnodes(node)
def rec_render_cnodes(self,node):
self.tb.appendtxt(utils._process_text(self, node.text or ''))
for n in utils._child_get(node,self):
self.rec_render(n)
self.tb.appendtxt(utils._process_text(self, node.tail or ''))
def rec_render(self,node):
""" Recursive render: fill outarr with text of current node
"""
if node.tag is not None:
if node.tag in self._tags:
self._tags[node.tag](node)
else:
self.warn_nitag(node.tag)
def render(self, node):
self.tb= textbox()
#result = self.template.start()
#result += self.template.frame_start()
self.rec_render_cnodes(node)
#result += self.template.frame_stop()
#result += self.template.end()
result = self.tb.rendertxt()
del self.tb
return result
class _rml_tmpl_tag(object):
def __init__(self, *args):
pass
def tag_start(self):
return ''
def tag_end(self):
return False
def tag_stop(self):
return ''
def tag_mergeable(self):
return True
class _rml_tmpl_frame(_rml_tmpl_tag):
def __init__(self, posx, width):
self.width = width
self.posx = posx
def tag_start(self):
return "frame start"
def tag_end(self):
return True
def tag_stop(self):
return "frame stop"
def tag_mergeable(self):
return False
# An awfull workaround since I don't really understand the semantic behind merge.
def merge(self, frame):
pass
class _rml_tmpl_draw_string(_rml_tmpl_tag):
def __init__(self, node, style):
self.posx = utils.unit_get(node.get('x'))
self.posy = utils.unit_get(node.get('y'))
aligns = {
'drawString': 'left',
'drawRightString': 'right',
'drawCentredString': 'center'
}
align = aligns[node.localName]
self.pos = [(self.posx, self.posy, align, utils.text_get(node), style.get('td'), style.font_size_get('td'))]
def tag_start(self):
return "draw string \"%s\" @(%d,%d)..\n" %("txt",self.posx,self.posy)
def merge(self, ds):
self.pos+=ds.pos
class _rml_tmpl_draw_lines(_rml_tmpl_tag):
def __init__(self, node, style):
coord = [utils.unit_get(x) for x in utils.text_get(node).split(' ')]
self.ok = False
self.posx = coord[0]
self.posy = coord[1]
self.width = coord[2]-coord[0]
self.ok = coord[1]==coord[3]
self.style = style
self.style = style.get('hr')
def tag_start(self):
return "draw lines..\n"
class _rml_stylesheet(object):
def __init__(self, stylesheet, doc):
self.doc = doc
self.attrs = {}
self._tags = {
'fontSize': lambda x: ('font-size',str(utils.unit_get(x))+'px'),
'alignment': lambda x: ('text-align',str(x))
}
result = ''
for ps in stylesheet.findall('paraStyle'):
attr = {}
attrs = ps.attributes
for i in range(attrs.length):
name = attrs.item(i).localName
attr[name] = ps.get(name)
attrs = []
for a in attr:
if a in self._tags:
attrs.append("%s:%s" % self._tags[a](attr[a]))
if len(attrs):
result += "p."+attr['name']+" {"+'; '.join(attrs)+"}\n"
self.result = result
def render(self):
return ''
class _rml_draw_style(object):
def __init__(self):
self.style = {}
self._styles = {
'fill': lambda x: {'td': {'color':x.get('color')}},
'setFont': lambda x: {'td': {'font-size':x.get('size')+'px'}},
'stroke': lambda x: {'hr': {'color':x.get('color')}},
}
def update(self, node):
if node.localName in self._styles:
result = self._styles[node.localName](node)
for key in result:
if key in self.style:
self.style[key].update(result[key])
else:
self.style[key] = result[key]
def font_size_get(self,tag):
size = utils.unit_get(self.style.get('td', {}).get('font-size','16'))
return size
def get(self,tag):
if not tag in self.style:
return ""
return ';'.join(['%s:%s' % (x[0],x[1]) for x in self.style[tag].items()])
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
self.localcontext = localcontext
self.frame_pos = -1
self.frames = []
self.template_order = []
self.page_template = {}
self.loop = 0
self._tags = {
'drawString': _rml_tmpl_draw_string,
'drawRightString': _rml_tmpl_draw_string,
'drawCentredString': _rml_tmpl_draw_string,
'lines': _rml_tmpl_draw_lines
}
self.style = _rml_draw_style()
for pt in node.findall('pageTemplate'):
frames = {}
id = pt.get('id')
self.template_order.append(id)
for tmpl in pt.findall('frame'):
posy = int(utils.unit_get(tmpl.get('y1'))) #+utils.unit_get(tmpl.get('height')))
posx = int(utils.unit_get(tmpl.get('x1')))
frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width')))
for tmpl in node.findall('pageGraphics'):
for n in tmpl.getchildren():
if n.nodeType==n.ELEMENT_NODE:
if n.localName in self._tags:
t = self._tags[n.localName](n, self.style)
frames[(t.posy,t.posx,n.localName)] = t
else:
self.style.update(n)
keys = frames.keys()
keys.sort()
keys.reverse()
self.page_template[id] = []
for key in range(len(keys)):
if key>0 and keys[key-1][0] == keys[key][0]:
if type(self.page_template[id][-1]) == type(frames[keys[key]]):
if self.page_template[id][-1].tag_mergeable():
self.page_template[id][-1].merge(frames[keys[key]])
continue
self.page_template[id].append(frames[keys[key]])
self.template = self.template_order[0]
def _get_style(self):
return self.style
def set_next_template(self):
self.template = self.template_order[(self.template_order.index(name)+1) % self.template_order]
self.frame_pos = -1
def set_template(self, name):
self.template = name
self.frame_pos = -1
def frame_start(self):
result = ''
frames = self.page_template[self.template]
ok = True
while ok:
self.frame_pos += 1
if self.frame_pos>=len(frames):
self.frame_pos=0
self.loop=1
ok = False
continue
f = frames[self.frame_pos]
result+=f.tag_start()
ok = not f.tag_end()
if ok:
result+=f.tag_stop()
return result
def frame_stop(self):
frames = self.page_template[self.template]
f = frames[self.frame_pos]
result=f.tag_stop()
return result
def start(self):
return ''
def end(self):
return "template end\n"
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
self.localcontext = {} if localcontext is None else localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.result = ''
def render(self, out):
#el = self.etree.findall('docinit')
#if el:
#self.docinit(el)
#el = self.etree.findall('stylesheet')
#self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('template')
self.result =""
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self)
stories = utils._child_get(self.etree, self, 'story')
for story in stories:
if self.result:
self.result += '\f'
f = _flowable(pt_obj,story,self.localcontext)
self.result += f.render(story)
del f
else:
self.result = "<cannot render w/o template>"
self.result += '\n'
out.write( self.result)
def parseNode(rml, localcontext=None,fout=None, images=None, path='.',title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
fp = StringIO.StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None,fout=None, images=None, path='.',title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO.StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: rml2txt input.rml >output.html'
print 'Render the standard input (RML) and output an TXT file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()).encode('iso8859-7')
else:
print 'Usage: trml2txt input.rml >output.pdf'
print 'Try \'trml2txt --help\' for more information.'
|
mkrupcale/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/openvswitch_bridge.py
|
16
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
#
# Portions copyright @ 2015 VMware, Inc.
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=C0111
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: openvswitch_bridge
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch bridges
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch bridges
options:
bridge:
required: true
description:
- Name of bridge or fake bridge to manage
parent:
version_added: "2.3"
required: false
default: None
description:
- Bridge parent of the fake bridge to manage
vlan:
version_added: "2.3"
required: false
default: None
description:
- The VLAN id of the fake bridge to manage (must be between 0 and 4095)
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the bridge should exist
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
required: false
default: None
description:
- A dictionary of external-ids. Omitting this parameter is a No-op.
To clear all external-ids pass an empty value.
fail_mode:
version_added: 2.0
default: None
required: false
choices : [secure, standalone]
description:
- Set bridge fail-mode. The default value (None) is a No-op.
'''
EXAMPLES = '''
# Create a bridge named br-int
- openvswitch_bridge:
bridge: br-int
state: present
# Create a fake bridge named br-int within br-parent on the VLAN 405
- openvswitch_bridge:
bridge: br-int
parent: br-parent
vlan: 405
state: present
# Create an integration bridge
- openvswitch_bridge:
bridge: br-int
state: present
fail_mode: secure
args:
external_ids:
bridge-id: br-int
'''
class OVSBridge(object):
""" Interface to ovs-vsctl. """
def __init__(self, module):
self.module = module
self.bridge = module.params['bridge']
self.parent = module.params['parent']
self.vlan = module.params['vlan']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.fail_mode = module.params['fail_mode']
if self.parent:
if self.vlan is None:
self.module.fail_json(msg='VLAN id must be set when parent is defined')
elif self.vlan < 0 or self.vlan > 4095:
self.module.fail_json(msg='Invalid VLAN ID (must be between 0 and 4095)')
def _vsctl(self, command):
'''Run ovs-vsctl command'''
return self.module.run_command(['ovs-vsctl', '-t',
str(self.timeout)] + command)
def exists(self):
'''Check if the bridge already exists'''
rtc, _, err = self._vsctl(['br-exists', self.bridge])
if rtc == 0: # See ovs-vsctl(8) for status codes
return True
if rtc == 2:
return False
self.module.fail_json(msg=err)
def add(self):
'''Create the bridge'''
if self.parent and self.vlan: # Add fake bridge
rtc, _, err = self._vsctl(['add-br', self.bridge, self.parent, self.vlan])
else:
rtc, _, err = self._vsctl(['add-br', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
if self.fail_mode:
self.set_fail_mode()
def delete(self):
'''Delete the bridge'''
rtc, _, err = self._vsctl(['del-br', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
def check(self):
'''Run check mode'''
changed = False
# pylint: disable=W0703
try:
if self.state == 'present' and self.exists():
if (self.fail_mode and
(self.fail_mode != self.get_fail_mode())):
changed = True
##
# Check if external ids would change.
current_external_ids = self.get_external_ids()
exp_external_ids = self.module.params['external_ids']
if exp_external_ids is not None:
for (key, value) in exp_external_ids:
if ((key in current_external_ids) and
(value != current_external_ids[key])):
changed = True
##
# Check if external ids would be removed.
for (key, value) in current_external_ids.items():
if key not in exp_external_ids:
changed = True
elif self.state == 'absent' and self.exists():
changed = True
elif self.state == 'present' and not self.exists():
changed = True
except Exception:
earg = get_exception()
self.module.fail_json(msg=str(earg))
# pylint: enable=W0703
self.module.exit_json(changed=changed)
def run(self):
'''Make the necessary changes'''
changed = False
# pylint: disable=W0703
try:
if self.state == 'absent':
if self.exists():
self.delete()
changed = True
elif self.state == 'present':
if not self.exists():
self.add()
changed = True
current_fail_mode = self.get_fail_mode()
if self.fail_mode and (self.fail_mode != current_fail_mode):
self.module.log( "changing fail mode %s to %s" % (current_fail_mode, self.fail_mode))
self.set_fail_mode()
changed = True
current_external_ids = self.get_external_ids()
##
# Change and add existing external ids.
exp_external_ids = self.module.params['external_ids']
if exp_external_ids is not None:
for (key, value) in exp_external_ids.items():
if ((value != current_external_ids.get(key, None)) and
self.set_external_id(key, value)):
changed = True
##
# Remove current external ids that are not passed in.
for (key, value) in current_external_ids.items():
if ((key not in exp_external_ids) and
self.set_external_id(key, None)):
changed = True
except Exception:
earg = get_exception()
self.module.fail_json(msg=str(earg))
# pylint: enable=W0703
self.module.exit_json(changed=changed)
def get_external_ids(self):
""" Return the bridge's external ids as a dict. """
results = {}
if self.exists():
rtc, out, err = self._vsctl(['br-get-external-id', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
lines = out.split("\n")
lines = [item.split("=") for item in lines if len(item) > 0]
for item in lines:
results[item[0]] = item[1]
return results
def set_external_id(self, key, value):
""" Set external id. """
if self.exists():
cmd = ['br-set-external-id', self.bridge, key]
if value:
cmd += [value]
(rtc, _, err) = self._vsctl(cmd)
if rtc != 0:
self.module.fail_json(msg=err)
return True
return False
def get_fail_mode(self):
""" Get failure mode. """
value = ''
if self.exists():
rtc, out, err = self._vsctl(['get-fail-mode', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
value = out.strip("\n")
return value
def set_fail_mode(self):
""" Set failure mode. """
if self.exists():
(rtc, _, err) = self._vsctl(['set-fail-mode', self.bridge,
self.fail_mode])
if rtc != 0:
self.module.fail_json(msg=err)
# pylint: disable=E0602
def main():
""" Entry point. """
module = AnsibleModule(
argument_spec={
'bridge': {'required': True},
'parent': {'default': None},
'vlan': {'default': None, 'type': 'int'},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'external_ids': {'default': None, 'type': 'dict'},
'fail_mode': {'default': None},
},
supports_check_mode=True,
)
bridge = OVSBridge(module)
if module.check_mode:
bridge.check()
else:
bridge.run()
# pylint: disable=W0614
# pylint: disable=W0401
# pylint: disable=W0622
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
|
dvliman/jaikuengine
|
refs/heads/master
|
.google_appengine/lib/django-1.4/django/contrib/markup/templatetags/markup.py
|
65
|
"""
Set of "markup" template filters for Django. These filters transform plain text
markup syntaxes to HTML; currently there is support for:
* Textile, which requires the PyTextile library available at
http://loopcore.com/python-textile/
* Markdown, which requires the Python-markdown library from
http://www.freewisdom.org/projects/python-markdown
* reStructuredText, which requires docutils from http://docutils.sf.net/
"""
import warnings
from django import template
from django.conf import settings
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True)
def textile(value):
try:
import textile
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'textile' filter: The Python textile library isn't installed.")
return force_unicode(value)
else:
return mark_safe(force_unicode(textile.textile(smart_str(value), encoding='utf-8', output='utf-8')))
@register.filter(is_safe=True)
def markdown(value, arg=''):
"""
Runs Markdown over a given value, optionally using various
extensions python-markdown supports.
Syntax::
{{ value|markdown:"extension1_name,extension2_name..." }}
To enable safe mode, which strips raw HTML and only returns HTML
generated by actual Markdown syntax, pass "safe" as the first
extension in the list.
If the version of Markdown in use does not support extensions,
they will be silently ignored.
"""
try:
import markdown
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'markdown' filter: The Python markdown library isn't installed.")
return force_unicode(value)
else:
# markdown.version was first added in 1.6b. The only version of markdown
# to fully support extensions before 1.6b was the shortlived 1.6a.
if hasattr(markdown, 'version'):
extensions = [e for e in arg.split(",") if e]
if len(extensions) > 0 and extensions[0] == "safe":
extensions = extensions[1:]
safe_mode = True
else:
safe_mode = False
python_markdown_deprecation = ("The use of Python-Markdown "
"< 2.1 in Django is deprecated; please update to the current version")
# Unicode support only in markdown v1.7 or above. Version_info
# exist only in markdown v1.6.2rc-2 or above.
markdown_vers = getattr(markdown, "version_info", None)
if markdown_vers < (1,7):
warnings.warn(python_markdown_deprecation, DeprecationWarning)
return mark_safe(force_unicode(markdown.markdown(smart_str(value), extensions, safe_mode=safe_mode)))
else:
if markdown_vers >= (2,1):
if safe_mode:
return mark_safe(markdown.markdown(force_unicode(value), extensions, safe_mode=safe_mode, enable_attributes=False))
else:
return mark_safe(markdown.markdown(force_unicode(value), extensions, safe_mode=safe_mode))
else:
warnings.warn(python_markdown_deprecation, DeprecationWarning)
return mark_safe(markdown.markdown(force_unicode(value), extensions, safe_mode=safe_mode))
else:
warnings.warn(python_markdown_deprecation, DeprecationWarning)
return mark_safe(force_unicode(markdown.markdown(smart_str(value))))
@register.filter(is_safe=True)
def restructuredtext(value):
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'restructuredtext' filter: The Python docutils library isn't installed.")
return force_unicode(value)
else:
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {})
parts = publish_parts(source=smart_str(value), writer_name="html4css1", settings_overrides=docutils_settings)
return mark_safe(force_unicode(parts["fragment"]))
|
robotichead/NearBeach
|
refs/heads/development-0.28.8
|
NearBeach/migrations/0009_auto_20210506_1925.py
|
1
|
# Generated by Django 3.1.2 on 2021-05-06 09:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('NearBeach', '0008_auto_20210424_2333'),
]
operations = [
migrations.AlterField(
model_name='project',
name='project_status',
field=models.CharField(choices=[('New', 'New'), ('Backlog', 'Backlog'), ('Blocked', 'Blocked'), ('In Progress', 'In Progress'), ('Test/Review', 'Test/Review'), ('Closed', 'Closed')], default='New', max_length=15),
),
migrations.AlterField(
model_name='task',
name='task_status',
field=models.CharField(choices=[('New', 'New'), ('Backlog', 'Backlog'), ('Blocked', 'Blocked'), ('In Progress', 'In Progress'), ('Test/Review', 'Test/Review'), ('Closed', 'Closed')], default='New', max_length=15),
),
]
|
elijah513/django
|
refs/heads/master
|
tests/get_earliest_or_latest/tests.py
|
326
|
from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from .models import Article, IndexErrorArticle, Person
class EarliestOrLatestTests(TestCase):
"""Tests for the earliest() and latest() objects methods"""
def tearDown(self):
"""Makes sure Article has a get_latest_by"""
if not Article._meta.get_latest_by:
Article._meta.get_latest_by = 'pub_date'
def test_earliest(self):
# Because no Articles exist yet, earliest() raises ArticleDoesNotExist.
self.assertRaises(Article.DoesNotExist, Article.objects.earliest)
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 8, 27)
)
Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the earliest Article.
self.assertEqual(Article.objects.earliest(), a1)
# Get the earliest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest(),
a2
)
# Pass a custom field name to earliest() to change the field that's used
# to determine the earliest object.
self.assertEqual(Article.objects.earliest('expire_date'), a2)
self.assertEqual(Article.objects.filter(
pub_date__gt=datetime(2005, 7, 26)).earliest('expire_date'), a2)
# Ensure that earliest() overrides any other ordering specified on the
# query. Refs #11283.
self.assertEqual(Article.objects.order_by('id').earliest(), a1)
# Ensure that error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model",
lambda: Article.objects.earliest(),
)
def test_latest(self):
# Because no Articles exist yet, latest() raises ArticleDoesNotExist.
self.assertRaises(Article.DoesNotExist, Article.objects.latest)
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 8, 27)
)
a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the latest Article.
self.assertEqual(Article.objects.latest(), a4)
# Get the latest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__lt=datetime(2005, 7, 27)).latest(),
a1
)
# Pass a custom field name to latest() to change the field that's used
# to determine the latest object.
self.assertEqual(Article.objects.latest('expire_date'), a1)
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).latest('expire_date'),
a3,
)
# Ensure that latest() overrides any other ordering specified on the query. Refs #11283.
self.assertEqual(Article.objects.order_by('id').latest(), a4)
# Ensure that error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model",
lambda: Article.objects.latest(),
)
def test_latest_manual(self):
# You can still use latest() with a model that doesn't have
# "get_latest_by" set -- just pass in the field name manually.
Person.objects.create(name="Ralph", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Stephanie", birthday=datetime(1960, 2, 3))
self.assertRaises(AssertionError, Person.objects.latest)
self.assertEqual(Person.objects.latest("birthday"), p2)
class TestFirstLast(TestCase):
def test_first(self):
p1 = Person.objects.create(name="Bob", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Alice", birthday=datetime(1961, 2, 3))
self.assertEqual(
Person.objects.first(), p1)
self.assertEqual(
Person.objects.order_by('name').first(), p2)
self.assertEqual(
Person.objects.filter(birthday__lte=datetime(1955, 1, 1)).first(),
p1)
self.assertIs(
Person.objects.filter(birthday__lte=datetime(1940, 1, 1)).first(),
None)
def test_last(self):
p1 = Person.objects.create(
name="Alice", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(
name="Bob", birthday=datetime(1960, 2, 3))
# Note: by default PK ordering.
self.assertEqual(
Person.objects.last(), p2)
self.assertEqual(
Person.objects.order_by('-name').last(), p1)
self.assertEqual(
Person.objects.filter(birthday__lte=datetime(1955, 1, 1)).last(),
p1)
self.assertIs(
Person.objects.filter(birthday__lte=datetime(1940, 1, 1)).last(),
None)
def test_index_error_not_suppressed(self):
"""
#23555 -- Unexpected IndexError exceptions in QuerySet iteration
shouldn't be suppressed.
"""
def check():
# We know that we've broken the __iter__ method, so the queryset
# should always raise an exception.
self.assertRaises(IndexError, lambda: IndexErrorArticle.objects.all()[0])
self.assertRaises(IndexError, IndexErrorArticle.objects.all().first)
self.assertRaises(IndexError, IndexErrorArticle.objects.all().last)
check()
# And it does not matter if there are any records in the DB.
IndexErrorArticle.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
check()
|
pigeonflight/strider-plone
|
refs/heads/master
|
docker/appengine/google/appengine/api/channel/__init__.py
|
27
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Channel API module."""
from channel import *
|
nin042/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py
|
122
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff
class CloseBugForLandDiffTest(unittest.TestCase):
def test_empty_state(self):
capture = OutputCapture()
step = CloseBugForLandDiff(MockTool(), MockOptions())
expected_logs = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n"
capture.assert_outputs(self, step.run, [{"commit_text": "Mock commit text"}], expected_logs=expected_logs)
|
sangwook236/SWL
|
refs/heads/master
|
python/test/machine_learning/tensorflow/run_mnist_cnn.py
|
2
|
#!/usr/bin/env python
# Path to libcudnn.so.
#export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
#--------------------
import os, sys
if 'posix' == os.name:
swl_python_home_dir_path = '/home/sangwook/work/SWL_github/python'
lib_home_dir_path = '/home/sangwook/lib_repo/python'
else:
swl_python_home_dir_path = 'D:/work/SWL_github/python'
lib_home_dir_path = 'D:/lib_repo/python'
#lib_home_dir_path = 'D:/lib_repo/python/rnd'
#sys.path.append('../../../src')
sys.path.append(os.path.join(swl_python_home_dir_path, 'src'))
sys.path.append(os.path.join(lib_home_dir_path, 'tflearn_github'))
sys.path.append(os.path.join(lib_home_dir_path, 'tf_cnnvis_github'))
#os.chdir(os.path.join(swl_python_home_dir_path, 'test/machine_learning/tensorflow'))
#--------------------
import time, datetime
import numpy as np
import tensorflow as tf
#import imgaug as ia
from imgaug import augmenters as iaa
from swl.machine_learning.tensorflow.simple_neural_net_trainer import SimpleNeuralNetTrainer
from swl.machine_learning.tensorflow.neural_net_evaluator import NeuralNetEvaluator
from swl.machine_learning.tensorflow.neural_net_inferrer import NeuralNetInferrer
import swl.util.util as swl_util
import swl.machine_learning.util as swl_ml_util
import swl.machine_learning.tensorflow.util as swl_tf_util
from mnist_cnn_tf import MnistCnnUsingTF
#from mnist_cnn_tf_slim import MnistCnnUsingTfSlim
#from mnist_cnn_keras import MnistCnnUsingKeras
#from mnist_cnn_tflearn import MnistCnnUsingTfLearn
#from keras import backend as K
#--------------------------------------------------------------------
def create_mnist_cnn(input_shape, output_shape):
model_type = 0 # {0, 1}.
return MnistCnnUsingTF(input_shape, output_shape, model_type)
#return MnistCnnUsingTfSlim(input_shape, output_shape)
#return MnistCnnUsingTfLearn(input_shape, output_shape)
#return MnistCnnUsingKeras(input_shape, output_shape, model_type)
#--------------------------------------------------------------------
def create_imgaug_augmenter():
return iaa.Sequential([
iaa.SomeOf(1, [
#iaa.Sometimes(0.5, iaa.Crop(px=(0, 100))), # Crop images from each side by 0 to 16px (randomly chosen).
iaa.Sometimes(0.5, iaa.Crop(percent=(0, 0.1))), # Crop images by 0-10% of their height/width.
iaa.Fliplr(0.1), # Horizontally flip 10% of the images.
iaa.Flipud(0.1), # Vertically flip 10% of the images.
iaa.Sometimes(0.5, iaa.Affine(
scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)}, # Scale images to 80-120% of their size, individually per axis.
translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)}, # Translate by -20 to +20 percent (per axis).
rotate=(-45, 45), # Rotate by -45 to +45 degrees.
shear=(-16, 16), # Shear by -16 to +16 degrees.
#order=[0, 1], # Use nearest neighbour or bilinear interpolation (fast).
order=0, # Use nearest neighbour or bilinear interpolation (fast).
#cval=(0, 255), # If mode is constant, use a cval between 0 and 255.
#mode=ia.ALL # Use any of scikit-image's warping modes (see 2nd image from the top for examples).
#mode='edge' # Use any of scikit-image's warping modes (see 2nd image from the top for examples).
)),
iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 3.0))) # Blur images with a sigma of 0 to 3.0.
]),
#iaa.Scale(size={'height': image_height, 'width': image_width}) # Resize.
])
class ImgaugAugmenter(object):
def __init__(self):
self._augmenter = create_imgaug_augmenter()
def __call__(self, inputs, outputs, is_output_augmented=False):
# Augments here.
if is_output_augmented:
augmenter_det = self._augmenter.to_deterministic() # Call this for each batch again, NOT only once at the start.
return augmenter_det.augment_images(inputs), augmenter_det.augment_images(outputs)
else:
return self._augmenter.augment_images(inputs), outputs
def preprocess_data(data, labels, num_classes, axis=0):
if data is not None:
# Preprocessing (normalization, standardization, etc.).
#data = data.astype(np.float32)
#data /= 255.0
#data = (data - np.mean(data, axis=axis)) / np.std(data, axis=axis)
#data = np.reshape(data, data.shape + (1,))
pass
if labels is not None:
# One-hot encoding (num_examples, height, width) -> (num_examples, height, width, num_classes).
#labels = swl_ml_util.to_one_hot_encoding(labels, num_classes).astype(np.uint8)
pass
return data, labels
def load_data(image_shape):
# Pixel value: [0, 255].
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images = train_images / 255.0
train_images = np.reshape(train_images, (-1,) + image_shape)
train_labels = tf.keras.utils.to_categorical(train_labels).astype(np.uint8)
test_images = test_images / 255.0
test_images = np.reshape(test_images, (-1,) + image_shape)
test_labels = tf.keras.utils.to_categorical(test_labels).astype(np.uint8)
# Pre-process.
#train_images, train_labels = preprocess_data(train_images, train_labels, num_classes)
#test_images, test_labels = preprocess_data(test_images, test_labels, num_classes)
return train_images, train_labels, test_images, test_labels
#--------------------------------------------------------------------
def main():
#np.random.seed(7)
#--------------------
# Sets parameters.
does_need_training = True
does_resume_training = False
output_dir_prefix = 'mnist_cnn'
output_dir_suffix = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
#output_dir_suffix = '20180302T155710'
initial_epoch = 0
num_classes = 10
input_shape = (None, 28, 28, 1) # 784 = 28 * 28.
output_shape = (None, num_classes)
batch_size = 128 # Number of samples per gradient update.
num_epochs = 30 # Number of times to iterate over training data.
shuffle = True
augmenter = ImgaugAugmenter()
#augmenter = create_imgaug_augmenter() # If imgaug augmenter is used, data are augmented in background augmentation processes. (faster)
is_output_augmented = False
sess_config = tf.ConfigProto()
#sess_config = tf.ConfigProto(device_count={'GPU': 2, 'CPU': 1}) # os.environ['CUDA_VISIBLE_DEVICES'] = 0,1.
sess_config.allow_soft_placement = True
sess_config.log_device_placement = True
#sess_config.operation_timeout_in_ms = 50000
sess_config.gpu_options.allow_growth = True
#sess_config.gpu_options.per_process_gpu_memory_fraction = 0.4 # Only allocate 40% of the total memory of each GPU.
train_device_name = '/device:GPU:1'
eval_device_name = '/device:GPU:1'
# NOTE [info] >> Cannot assign a device for operation save/SaveV2: Could not satisfy explicit device specification '/device:GPU:1' because no supported kernel for GPU devices is available.
# Errors occur in tf_cnnvis library when a GPU is assigned.
#infer_device_name = '/device:GPU:1'
infer_device_name = '/device:CPU:0'
#--------------------
# Prepares directories.
output_dir_path = os.path.join('.', '{}_{}'.format(output_dir_prefix, output_dir_suffix))
checkpoint_dir_path = os.path.join(output_dir_path, 'tf_checkpoint')
inference_dir_path = os.path.join(output_dir_path, 'inference')
train_summary_dir_path = os.path.join(output_dir_path, 'train_log')
val_summary_dir_path = os.path.join(output_dir_path, 'val_log')
swl_util.make_dir(checkpoint_dir_path)
swl_util.make_dir(inference_dir_path)
swl_util.make_dir(train_summary_dir_path)
swl_util.make_dir(val_summary_dir_path)
#--------------------
# Prepares data.
train_images, train_labels, test_images, test_labels = load_data(input_shape[1:])
#--------------------
# Creates models, sessions, and graphs.
# Creates graphs.
if does_need_training:
train_graph = tf.Graph()
eval_graph = tf.Graph()
infer_graph = tf.Graph()
if does_need_training:
with train_graph.as_default():
with tf.device(train_device_name):
#K.set_learning_phase(1) # Sets the learning phase to 'train'. (Required)
# Creates a model.
modelForTraining = create_mnist_cnn(input_shape, output_shape)
modelForTraining.create_training_model()
# Creates a trainer.
nnTrainer = SimpleNeuralNetTrainer(modelForTraining, initial_epoch, augmenter, is_output_augmented)
# Creates a saver.
# Saves a model every 2 hours and maximum 5 latest models are saved.
train_saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2)
initializer = tf.global_variables_initializer()
with eval_graph.as_default():
with tf.device(eval_device_name):
#K.set_learning_phase(0) # Sets the learning phase to 'test'. (Required)
# Creates a model.
modelForEvaluation = create_mnist_cnn(input_shape, output_shape)
modelForEvaluation.create_evaluation_model()
# Creates an evaluator.
nnEvaluator = NeuralNetEvaluator(modelForEvaluation)
# Creates a saver.
eval_saver = tf.train.Saver()
with infer_graph.as_default():
with tf.device(infer_device_name):
#K.set_learning_phase(0) # Sets the learning phase to 'test'. (Required)
# Creates a model.
modelForInference = create_mnist_cnn(input_shape, output_shape)
modelForInference.create_inference_model()
# Creates an inferrer.
nnInferrer = NeuralNetInferrer(modelForInference)
# Creates a saver.
infer_saver = tf.train.Saver()
# Creates sessions.
if does_need_training:
train_session = tf.Session(graph=train_graph, config=sess_config)
eval_session = tf.Session(graph=eval_graph, config=sess_config)
infer_session = tf.Session(graph=infer_graph, config=sess_config)
# Initializes.
if does_need_training:
train_session.run(initializer)
#--------------------------------------------------------------------
# Trains and evaluates.
if does_need_training:
start_time = time.time()
with train_session.as_default() as sess:
with sess.graph.as_default():
#K.set_session(sess)
#K.set_learning_phase(1) # Sets the learning phase to 'train'.
swl_tf_util.train_neural_net(sess, nnTrainer, train_images, train_labels, test_images, test_labels, batch_size, num_epochs, shuffle, does_resume_training, train_saver, output_dir_path, checkpoint_dir_path, train_summary_dir_path, val_summary_dir_path)
print('\tTotal training time = {}'.format(time.time() - start_time))
start_time = time.time()
with eval_session.as_default() as sess:
with sess.graph.as_default():
#K.set_session(sess)
#K.set_learning_phase(0) # Sets the learning phase to 'test'.
swl_tf_util.evaluate_neural_net(sess, nnEvaluator, test_images, test_labels, batch_size, eval_saver, checkpoint_dir_path)
print('\tTotal evaluation time = {}'.format(time.time() - start_time))
#--------------------------------------------------------------------
# Infers.
start_time = time.time()
with infer_session.as_default() as sess:
with sess.graph.as_default():
#K.set_session(sess)
#K.set_learning_phase(0) # Sets the learning phase to 'test'.
inferences = swl_tf_util.infer_by_neural_net(sess, nnInferrer, test_images, batch_size, infer_saver, checkpoint_dir_path)
print('\tTotal inference time = {}'.format(time.time() - start_time))
if inferences is not None:
if num_classes >= 2:
inferences = np.argmax(inferences, -1)
groundtruths = np.argmax(test_labels, -1)
else:
inferences = np.around(inferences)
groundtruths = test_labels
correct_estimation_count = np.count_nonzero(np.equal(inferences, groundtruths))
print('\tAccurary = {} / {} = {}'.format(correct_estimation_count, groundtruths.size, correct_estimation_count / groundtruths.size))
else:
print('[SWL] Warning: Invalid inference results.')
#--------------------------------------------------------------------
# Visualizes.
if True:
with infer_session.as_default() as sess:
with sess.graph.as_default():
#K.set_session(sess)
#K.set_learning_phase(0) # Sets the learning phase to 'test'.
#--------------------
idx = 0
#vis_images = train_images[idx:(idx+1)] # Recommends using a single image.
vis_images = test_images[idx:(idx+1)] # Recommends using a single image.
feed_dict = modelForInference.get_feed_dict(vis_images, is_training=False)
input_tensor = None
#input_tensor = modelForInference.input_tensor
print('[SWL] Info: Start visualizing activation...')
start = time.time()
is_succeeded = swl_ml_util.visualize_activation(sess, input_tensor, feed_dict, output_dir_path)
print('\tVisualization time = {}, succeeded? = {}'.format(time.time() - start, 'yes' if is_succeeded else 'no'))
print('[SWL] Info: End visualizing activation...')
print('[SWL] Info: Start visualizing by deconvolution...')
start = time.time()
is_succeeded = swl_ml_util.visualize_by_deconvolution(sess, input_tensor, feed_dict, output_dir_path)
print('\tVisualization time = {}, succeeded? = {}'.format(time.time() - start, 'yes' if is_succeeded else 'no'))
print('[SWL] Info: End visualizing by deconvolution...')
#import matplotlib.pyplot as plt
#plt.imsave(output_dir_path + '/vis.png', np.around(vis_images[0].reshape(vis_images[0].shape[:2]) * 255), cmap='gray')
#--------------------
#vis_images = train_images[0:10]
#vis_labels = train_labels[0:10]
vis_images = test_images[0:100]
vis_labels = test_labels[0:100]
print('[SWL] Info: Start visualizing by partial occlusion...')
start_time = time.time()
grid_counts = (28, 28) # (grid count in height, grid count in width).
grid_size = (4, 4) # (grid height, grid width).
occlusion_color = 0 # Black.
occluded_probilities = swl_ml_util.visualize_by_partial_occlusion(sess, nnInferrer, vis_images, vis_labels, grid_counts, grid_size, occlusion_color, num_classes, batch_size, infer_saver, checkpoint_dir_path)
print('\tVisualization time = {}'.format(time.time() - start_time))
print('[SWL] Info: End visualizing by partial occlusion...')
if occluded_probilities is not None:
import matplotlib.pyplot as plt
for (idx, prob) in enumerate(occluded_probilities):
#plt.figure()
#plt.imshow(1 - prob.reshape(prob.shape[:2]), cmap='gray')
#plt.figure()
#plt.imshow(vis_images[idx].reshape(vis_images[idx].shape[:2]), cmap='gray')
plt.imsave((output_dir_path + '/occluded_prob_{}.png').format(idx), np.around((1 - prob.reshape(prob.shape[:2])) * 255), cmap='gray')
plt.imsave((output_dir_path + '/vis_{}.png').format(idx), np.around(vis_images[idx].reshape(vis_images[idx].shape[:2]) * 255), cmap='gray')
#--------------------
# Closes sessions.
if does_need_training:
train_session.close()
del train_session
eval_session.close()
del eval_session
infer_session.close()
del infer_session
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
wisechengyi/pants
|
refs/heads/master
|
src/python/pants/__main__.py
|
6
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.bin import pants_loader
if __name__ == "__main__":
pants_loader.main()
|
Mistobaan/tensorflow
|
refs/heads/master
|
tensorflow/tools/git/gen_git_source.py
|
9
|
#!/usr/bin/env python
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import subprocess
import shutil
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, gen_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, 'symlink'):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
val = bytes(subprocess.check_output([
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
return val if val else unknown_label
except subprocess.CalledProcessError:
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = "git_version_is_invalid" # do not cause build to fail!
contents = """/* Generated by gen_git_source.py */
#include <string>
const char* tf_git_version() {return "%s";}
const char* tf_compiler_version() {return __VERSION__;}
const int tf_cxx11_abi_flag() {
#ifdef _GLIBCXX_USE_CXX11_ABI
return _GLIBCXX_USE_CXX11_ABI;
#else
return 0;
#endif
}
const int tf_monolithic_build() {
#ifdef TENSORFLOW_MONOLITHIC_BUILD
return 1;
#else
return 0;
#endif
}
""" % git_version
open(filename, "w").write(contents)
def generate(arglist):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"])
write_version_info(dest_file, git_version)
def raw_generate(output_file):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
"""
git_version = get_git_version(".")
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--gen_root_path", type=str,
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
if args.gen_root_path is None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
generate(args.generate)
elif args.raw_generate is not None:
raw_generate(args.raw_generate)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
|
mineo/python-musicbrainz2
|
refs/heads/master
|
examples/getartist.py
|
1
|
#! /usr/bin/env python
#
# Retrieve an artist by ID and display all official albums.
#
# Usage:
# python getartist.py artist-id
#
# $Id$
#
import sys
import logging
import musicbrainz2.webservice as ws
import musicbrainz2.model as m
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if len(sys.argv) < 2:
print "Usage: getartist.py artist-id"
sys.exit(1)
q = ws.Query()
try:
# The result should include all official albums.
#
inc = ws.ArtistIncludes(
releases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM),
tags=True, releaseGroups=True)
artist = q.getArtistById(sys.argv[1], inc)
except ws.WebServiceError, e:
print 'Error:', e
sys.exit(1)
print "Id :", artist.id
print "Name :", artist.name
print "SortName :", artist.sortName
print "UniqueName :", artist.getUniqueName()
print "Type :", artist.type
print "BeginDate :", artist.beginDate
print "EndDate :", artist.endDate
print "Tags :", ', '.join([t.value for t in artist.tags])
print
if len(artist.getReleases()) == 0:
print "No releases found."
else:
print "Releases:"
for release in artist.getReleases():
print
print "Id :", release.id
print "Title :", release.title
print "ASIN :", release.asin
print "Text :", release.textLanguage, '/', release.textScript
print "Types :", release.types
print
if len(artist.getReleaseGroups()) == 0:
print
print "No release groups found."
else:
print
print "Release groups:"
for rg in artist.getReleaseGroups():
print
print "Id :", rg.id
print "Title :", rg.title
print "Type :", rg.type
print
#
# Using the release IDs and Query.getReleaseById(), you could now request
# those releases, including the tracks, release events, the associated
# DiscIDs, and more. The 'getrelease.py' example shows how this works.
#
# EOF
|
bcornwellmott/erpnext
|
refs/heads/develop
|
erpnext/docs/assets/img/setup/print/__init__.py
|
12133432
| |
dbaxa/django
|
refs/heads/master
|
tests/contenttypes_tests/__init__.py
|
12133432
| |
ferdianjovan/simple_follow
|
refs/heads/master
|
src/simple_follow/__init__.py
|
12133432
| |
emakis/erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/offer_term/__init__.py
|
12133432
| |
abhikumar22/MYBLOG
|
refs/heads/master
|
blg/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py
|
450
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
|
bpyoung92/apprtc
|
refs/heads/master
|
build/build_app_engine_package.py
|
1
|
#!/usr/bin/python
"""Build App Engine source package.
"""
import json
import optparse
import os
import shutil
import subprocess
import sys
import requests
import test_file_herder
USAGE = """%prog src_path dest_path
Build the GAE source code package.
src_path Path to the source code root directory.
dest_path Path to the root directory to push/deploy GAE from."""
def call_cmd_and_return_output_lines(cmd):
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = process.communicate()[0]
return output.split('\n')
except OSError as e:
print str(e)
return []
def build_version_info_file(dest_path):
"""Build the version info JSON file."""
version_info = {
'gitHash': None,
'time': None,
'branch': None
}
lines = call_cmd_and_return_output_lines(['git', 'log', '-1'])
for line in lines:
if line.startswith('commit'):
version_info['gitHash'] = line.partition(' ')[2].strip()
elif line.startswith('Date'):
version_info['time'] = line.partition(':')[2].strip()
if version_info['gitHash'] is not None and version_info['time'] is not None:
break
lines = call_cmd_and_return_output_lines(['git', 'branch'])
for line in lines:
if line.startswith('*'):
version_info['branch'] = line.partition(' ')[2].strip()
break
try:
with open(dest_path, 'w') as f:
f.write(json.dumps(version_info))
except IOError as e:
print str(e)
# Download callstats, copy dependencies from node_modules for serving as
# static content on GAE.
def downloadCallstats():
print 'Downloading and copying callstats dependencies...'
path = 'out/app_engine/third_party/callstats/'
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
urls = {
'callstats.min.js': 'https://api.callstats.io/static/callstats.min.js',
'sha.js': 'https://cdnjs.cloudflare.com/ajax/libs/jsSHA/1.5.0/sha.js'
}
for fileName in urls:
response = requests.get(urls[fileName])
if response.status_code == 200:
print 'Downloading %s to %s...' % (urls[fileName], path)
with open(path + fileName, 'w') as to_file:
to_file.write(response.text)
else:
raise NameError('Could not download: ' + filename + ' Error:' + \
str(response.status_code))
# Need to copy this from node_modules due to https://cdn.socket.io/ requires
# SNI extensions which is not supported in python 2.7.9 and we use 2.7.6.
deps = {'socket.io-client': 'socket.io.js'}
for dirpath, unused_dirnames, files in os.walk('node_modules'):
for subdir in deps:
for name in files:
if name.endswith(deps[subdir]):
print 'Copying %s' % deps[subdir]
shutil.copy(os.path.join(dirpath, name), path)
# Verify that files in |deps| has been copied else fail build.
for dirpath, unused_dirnames, files in os.walk(path):
for subdir in deps:
if os.path.isfile(os.path.join(path, deps[subdir])):
print 'File found %s' % deps[subdir]
else:
raise NameError('Could not find: %s' + ', please try \
"npm update/install."' \
% os.path.join('node_modules', dirpath, deps[dirpath]))
def CopyApprtcSource(src_path, dest_path):
if os.path.exists(dest_path):
shutil.rmtree(dest_path)
os.makedirs(dest_path)
simply_copy_subdirs = ['bigquery', 'css', 'images', 'third_party']
for dirpath, unused_dirnames, files in os.walk(src_path):
for subdir in simply_copy_subdirs:
if dirpath.endswith(subdir):
shutil.copytree(dirpath, os.path.join(dest_path, subdir))
if dirpath.endswith('html'):
dest_html_path = os.path.join(dest_path, 'html')
os.makedirs(dest_html_path)
for name in files:
# Template files must be in the root directory.
if name.endswith('_template.html'):
shutil.copy(os.path.join(dirpath, name), dest_path)
else:
shutil.copy(os.path.join(dirpath, name), dest_html_path)
elif dirpath.endswith('app_engine'):
for name in files:
if (name.endswith('.py') and 'test' not in name
or name.endswith('.yaml')):
shutil.copy(os.path.join(dirpath, name), dest_path)
elif dirpath.endswith('js'):
for name in files:
# loopback.js is not compiled by Closure and needs to be copied
# separately.
if name == 'loopback.js':
dest_js_path = os.path.join(dest_path, 'js')
os.makedirs(dest_js_path)
shutil.copy(os.path.join(dirpath, name), dest_js_path)
break
build_version_info_file(os.path.join(dest_path, 'version_info.json'))
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option("-t", "--include-tests", action="store_true",
help='Also copy python tests to the out dir.')
options, args = parser.parse_args()
if len(args) != 2:
parser.error('Error: Exactly 2 arguments required.')
src_path, dest_path = args[0:2]
CopyApprtcSource(src_path, dest_path)
downloadCallstats()
if options.include_tests:
app_engine_code = os.path.join(src_path, 'app_engine')
test_file_herder.CopyTests(os.path.join(src_path, 'app_engine'), dest_path)
if __name__ == '__main__':
sys.exit(main())
|
haylesr/angr
|
refs/heads/master
|
angr/analyses/ddg.py
|
1
|
import logging
from collections import defaultdict
import networkx
from simuvex import SimRegisterVariable, SimMemoryVariable
from simuvex import SimSolverModeError
from ..errors import AngrDDGError
from ..analysis import Analysis, register_analysis
from .code_location import CodeLocation
l = logging.getLogger("angr.analyses.ddg")
class NodeWrapper(object):
def __init__(self, cfg_node, call_depth):
self.cfg_node = cfg_node
self.call_depth = call_depth
class DDG(Analysis):
"""
This is a fast data dependence graph directly gerenated from our CFG analysis result. The only reason for its
existance is the speed. There is zero guarantee for being sound or accurate. You are supposed to use it only when
you want to track the simplest data dependence, and you do not care about soundness or accuracy.
For a better data dependence graph, please consider to perform a better static analysis first (like Value-set
Analysis), and then construct a dependence graph on top of the analysis result (for example, the VFG in angr).
Also note that since we are using states from CFG, any improvement in analysis performed on CFG (like a points-to
analysis) will directly benefit the DDG.
"""
def __init__(self, cfg, start=None, keep_data=False, call_depth=None):
"""
:param cfg: Control flow graph. Please make sure each node has an associated `state` with it. You may
want to generate your CFG with `keep_state=True`.
:param start: An address, Specifies where we start the generation of this data dependence graph.
:param call_depth: None or integers. A non-negative integer specifies how deep we would like to track in the
call tree. None disables call_depth limit.
"""
# Sanity check
if not cfg._keep_state:
raise AngrDDGError('CFG must have "keep_state" set to True.')
self._cfg = cfg
self._start = self.project.entry if start is None else start
self._call_depth = call_depth
self._graph = networkx.DiGraph()
self._symbolic_mem_ops = set()
self.keep_data = keep_data
# Data dependency graph per function
self._function_data_dependencies = None
# Begin construction!
self._construct()
#
# Properties
#
@property
def graph(self):
"""
:returns: A networkx DiGraph instance representing the data dependence graph.
"""
return self._graph
#
# Public methods
#
def pp(self):
"""
Pretty printing.
"""
# TODO: make it prettier
for k,v in self.graph.iteritems():
for st, tup in v.iteritems():
print "(0x%x, %d) <- (0x%x, %d)" % (k, st,
list(tup)[0][0],
list(tup)[0][1])
def dbg_repr(self):
"""
Representation for debugging.
"""
# TODO:
return str(self._graph)
def __contains__(self, code_location):
"""
Returns whether `code_location` is in the graph.
:param code_location: A CodeLocation instance.
:returns: True/False
"""
return code_location in self.graph
def get_predecessors(self, code_location):
"""
Returns all predecessors of the code location.
:param code_location: A CodeLocation instance.
:returns: A list of all predecessors.
"""
return self._graph.predecessors(code_location)
def function_dependency_graph(self, func):
"""
Get a dependency graph for the function `func`.
:param func: The Function object in CFG.function_manager.
:returns: A networkx.DiGraph instance.
"""
if self._function_data_dependencies is None:
self._build_function_dependency_graphs()
if func in self._function_data_dependencies:
return self._function_data_dependencies[func]
# Not found
return None
#
# Private methods
#
def _construct(self):
"""
Construct the data dependence graph.
We track the following types of dependence:
- (Intra-IRSB) temporary variable dependencies
- Register dependencies
- Memory dependencies, although it's very limited. See below.
We track the following types of memory access:
- (Intra-functional) Stack read/write.
Trace changes of stack pointers inside a function, and the dereferences of stack pointers.
- (Inter-functional) Stack read/write.
- (Global) Static memory positions.
Keep a map of all accessible memory positions to their source statements per function. After that, we
traverse the CFG and link each pair of reads/writes together in the order of control-flow.
We do not track the following types of memory access
- Symbolic memory access
Well, they cannot be tracked under fastpath mode (which is the mode we are generating the CTF) anyways.
"""
# TODO: Here we are assuming that there is only one node whose address is the entry point. Otherwise it should
# TODO: be fixed.
initial_node = self._cfg.get_any_node(self._start)
# Initialize the worklist
nw = NodeWrapper(initial_node, 0)
worklist = [ ]
worklist_set = set()
self._worklist_append(nw, worklist, worklist_set)
# A dict storing defs set
# variable -> locations
live_defs_per_node = {}
while worklist:
# Pop out a node
node_wrapper = worklist[0]
node, call_depth = node_wrapper.cfg_node, node_wrapper.call_depth
worklist = worklist[ 1 : ]
worklist_set.remove(node)
# Grab all final states. There are usually more than one (one state for each successor), and we gotta
# process all of them
final_states = node.final_states
if node in live_defs_per_node:
live_defs = live_defs_per_node[node]
else:
live_defs = {}
live_defs_per_node[node] = live_defs
successing_nodes = self._cfg.graph.successors(node)
for state in final_states:
if state.scratch.jumpkind == 'Ijk_FakeRet' and len(final_states) > 1:
# Skip fakerets if there are other control flow transitions available
continue
new_call_depth = call_depth
if state.scratch.jumpkind == 'Ijk_Call':
new_call_depth += 1
elif state.scratch.jumpkind == 'Ijk_Ret':
new_call_depth -= 1
if self._call_depth is not None and call_depth > self._call_depth:
l.debug('Do not trace into %s due to the call depth limit', state.ip)
continue
new_defs = self._track(state, live_defs)
# TODO: Match the jumpkind
# TODO: Support cases where IP is undecidable
corresponding_successors = [n for n in successing_nodes if
not state.ip.symbolic and n.addr == state.se.any_int(state.ip)]
if not corresponding_successors:
continue
successing_node = corresponding_successors[0]
if successing_node in live_defs_per_node:
defs_for_next_node = live_defs_per_node[successing_node]
else:
defs_for_next_node = {}
live_defs_per_node[successing_node] = defs_for_next_node
changed = False
for var, code_loc_set in new_defs.iteritems():
if var not in defs_for_next_node:
l.debug('%s New var %s', state.ip, var)
defs_for_next_node[var] = code_loc_set
changed = True
else:
for code_loc in code_loc_set:
if code_loc not in defs_for_next_node[var]:
l.debug('%s New code location %s', state.ip, code_loc)
defs_for_next_node[var].add(code_loc)
changed = True
if changed:
if (self._call_depth is None) or \
(self._call_depth is not None and new_call_depth >= 0 and new_call_depth <= self._call_depth):
# Put all reachable successors back to our worklist again
nw = NodeWrapper(successing_node, new_call_depth)
self._worklist_append(nw, worklist, worklist_set)
def _track(self, state, live_defs):
"""
Given all live definitions prior to this program point, track the changes, and return a new list of live
definitions. We scan through the action list of the new state to track the changes.
:param state: The input state at that program point.
:param live_defs: A list of all live definitions prior to reaching this program point.
:returns: A list of new live definitions.
"""
# Make a copy of live_defs
live_defs = live_defs.copy()
action_list = list(state.log.actions)
# Since all temporary variables are local, we simply track them in a local dict
temps = {}
# All dependence edges are added to the graph either at the end of this method, or when they are going to be
# overwritten by a new edge. This is because we sometimes have to modify a previous edge (e.g. add new labels
# to the edge)
temps_to_edges = defaultdict(list)
regs_to_edges = defaultdict(list)
def _annotate_edges_in_dict(dict_, key, **new_labels):
"""
:param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges`.
:param key: The key used in finding elements in the dict.
:param new_labels: New labels to be added to those edges.
"""
for edge_tuple in dict_[key]:
# unpack it
_, _, labels = edge_tuple
for k, v in new_labels.iteritems():
if k in labels:
labels[k] = labels[k] + (v,)
else:
# Construct a tuple
labels[k] = (v,)
def _dump_edge_from_dict(dict_, key, del_key=True):
"""
Pick an edge from the dict based on the key specified, add it to our graph, and remove the key from dict.
:param dict_: The dict, can be either `temps_to_edges` or `regs_to_edges`.
:param key: The key used in finding elements in the dict.
"""
for edge_tuple in dict_[key]:
# unpack it
prev_code_loc, current_code_loc, labels = edge_tuple
# Add the new edge
self._add_edge(prev_code_loc, current_code_loc, **labels)
# Clear it
if del_key:
del dict_[key]
for a in action_list:
if a.bbl_addr is None:
current_code_loc = CodeLocation(None, None, sim_procedure=a.sim_procedure)
else:
current_code_loc = CodeLocation(a.bbl_addr, a.stmt_idx)
if a.type == "mem":
if a.actual_addrs is None:
# For now, mem reads don't necessarily have actual_addrs set properly
try:
addr_list = { state.se.any_int(a.addr.ast) }
except SimSolverModeError:
# it's symbolic... just continue
continue
else:
addr_list = set(a.actual_addrs)
for addr in addr_list:
variable = SimMemoryVariable(addr, a.data.ast.size()) # TODO: Properly unpack the SAO
if a.action == "read":
# Create an edge between def site and use site
prevdefs = self._def_lookup(live_defs, variable)
for prev_code_loc, labels in prevdefs.iteritems():
self._read_edge = True
self._add_edge(prev_code_loc, current_code_loc, **labels)
if a.action == "write":
# Kill the existing live def
self._kill(live_defs, variable, current_code_loc)
# For each of its register dependency and data dependency, we revise the corresponding edge
for reg_off in a.addr.reg_deps:
_annotate_edges_in_dict(regs_to_edges, reg_off, subtype='mem_addr')
for tmp in a.addr.tmp_deps:
_annotate_edges_in_dict(temps_to_edges, tmp, subtype='mem_addr')
for reg_off in a.data.reg_deps:
_annotate_edges_in_dict(regs_to_edges, reg_off, subtype='mem_data')
for tmp in a.data.tmp_deps:
_annotate_edges_in_dict(temps_to_edges, tmp, subtype='mem_data')
elif a.type == 'reg':
# For now, we assume a.offset is not symbolic
# TODO: Support symbolic register offsets
variable = SimRegisterVariable(a.offset, a.data.ast.size())
if a.action == 'read':
# What do we want to do?
prevdefs = self._def_lookup(live_defs, variable)
if a.offset in regs_to_edges:
_dump_edge_from_dict(regs_to_edges, a.offset)
for prev_code_loc, labels in prevdefs.iteritems():
edge_tuple = (prev_code_loc, current_code_loc, labels)
regs_to_edges[a.offset].append(edge_tuple)
else:
# write
self._kill(live_defs, variable, current_code_loc)
elif a.type == 'tmp':
# tmp is definitely not symbolic
if a.action == 'read':
prev_code_loc = temps[a.tmp]
edge_tuple = (prev_code_loc, current_code_loc, {'type': 'tmp', 'data': a.tmp})
if a.tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, a.tmp)
temps_to_edges[a.tmp].append(edge_tuple)
else:
# write
temps[a.tmp] = current_code_loc
elif a.type == 'exit':
# exits should only depend on tmps
for tmp in a.tmp_deps:
prev_code_loc = temps[tmp]
edge_tuple = (prev_code_loc, current_code_loc, {'type': 'exit', 'data': tmp})
if tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, tmp)
temps_to_edges[tmp].append(edge_tuple)
# In the end, dump all other edges in those two dicts
for reg_offset in regs_to_edges:
_dump_edge_from_dict(regs_to_edges, reg_offset, del_key=False)
for tmp in temps_to_edges:
_dump_edge_from_dict(temps_to_edges, tmp, del_key=False)
return live_defs
def _def_lookup(self, live_defs, variable):
"""
This is a backward lookup in the previous defs. Note that, as we are using VSA, it is possible that `variable`
is affected by several definitions.
:param addr_list: A list of normalized addresses.
:returns: A dict {stmt:labels} where label is the number of individual addresses of `addr_list` (or
the actual set of addresses depending on the keep_addrs flag) that are definted by stmt.
"""
prevdefs = {}
if variable in live_defs:
code_loc_set = live_defs[variable]
for code_loc in code_loc_set:
# Label edges with cardinality or actual sets of addresses
if isinstance(variable, SimMemoryVariable):
type_ = 'mem'
elif isinstance(variable, SimRegisterVariable):
type_ = 'reg'
else:
raise AngrDDGError('Unknown variable type %s' % type(variable))
if self.keep_data is True:
data = variable
prevdefs[code_loc] = {
'type': type_,
'data': data
}
else:
if code_loc in prevdefs:
count = prevdefs[code_loc]['count'] + 1
else:
count = 0
prevdefs[code_loc] = {
'type': type_,
'count': count
}
return prevdefs
def _kill(self, live_defs, variable, code_loc):
"""
Kill previous defs. addr_list is a list of normalized addresses.
"""
# Case 1: address perfectly match, we kill
# Case 2: a is a subset of the original address
# Case 3: a is a superset of the original address
live_defs[variable] = {code_loc}
#l.debug("XX CodeLoc %s kills variable %s", code_loc, variable)
def _add_edge(self, s_a, s_b, **edge_labels):
"""
Add an edge in the graph from `s_a` to statement `s_b`, where `s_a` and `s_b` are tuples of statements of the
form (irsb_addr, stmt_idx).
"""
# Is that edge already in the graph ?
# If at least one is new, then we are not redoing the same path again
if (s_a, s_b) not in self.graph.edges():
self.graph.add_edge(s_a, s_b, **edge_labels)
self._new = True
l.info("New edge: %s --> %s", s_a, s_b)
def _worklist_append(self, node_wrapper, worklist, worklist_set):
"""
Append a CFGNode and its successors into the work-list, and respect the call-depth limit
:param node_wrapper: The NodeWrapper instance to insert.
:param worklist: The work-list, which is a list.
:param worklist_set: A set of all CFGNodes that are inside the work-list, just for the sake of fast look-up.
It will be updated as well.
:returns: A set of newly-inserted CFGNodes (not NodeWrapper instances).
"""
if node_wrapper.cfg_node in worklist_set:
# It's already in the work-list
return
worklist.append(node_wrapper)
worklist_set.add(node_wrapper.cfg_node)
stack = [ node_wrapper ]
traversed_nodes = { node_wrapper.cfg_node }
inserted = { node_wrapper.cfg_node }
while stack:
nw = stack.pop()
n, call_depth = nw.cfg_node, nw.call_depth
# Get successors
edges = self._cfg.graph.out_edges(n, data=True)
for _, dst, data in edges:
if (dst not in traversed_nodes # which means we haven't touch this node in this appending procedure
and dst not in worklist_set): # which means this node is not in the work-list
# We see a new node!
traversed_nodes.add(dst)
if data['jumpkind'] == 'Ijk_Call':
if self._call_depth is None or call_depth < self._call_depth:
inserted.add(dst)
new_nw = NodeWrapper(dst, call_depth + 1)
worklist.append(new_nw)
worklist_set.add(dst)
stack.append(new_nw)
elif data['jumpkind'] == 'Ijk_Ret':
if call_depth > 0:
inserted.add(dst)
new_nw = NodeWrapper(dst, call_depth - 1)
worklist.append(new_nw)
worklist_set.add(dst)
stack.append(new_nw)
else:
new_nw = NodeWrapper(dst, call_depth)
inserted.add(dst)
worklist_set.add(dst)
worklist.append(new_nw)
stack.append(new_nw)
return inserted
def _build_function_dependency_graphs(self):
"""
Build dependency graphs for each function, and save them in self._function_data_dependencies.
"""
# This is a map between functions and its corresponding dependencies
self._function_data_dependencies = defaultdict(networkx.DiGraph)
# Group all dependencies first
simrun_addr_to_func = { }
for func_addr, func in self._cfg.function_manager.functions.iteritems():
for block in func.blocks:
simrun_addr_to_func[block] = func
for src, dst, data in self.graph.edges_iter(data=True):
src_target_func = None
if src.simrun_addr in simrun_addr_to_func:
src_target_func = simrun_addr_to_func[src.simrun_addr]
self._function_data_dependencies[src_target_func].add_edge(src, dst, **data)
if dst.simrun_addr in simrun_addr_to_func:
dst_target_func = simrun_addr_to_func[dst.simrun_addr]
if not dst_target_func is src_target_func:
self._function_data_dependencies[dst_target_func].add_edge(src, dst, **data)
register_analysis(DDG, 'DDG')
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/appengine/v1/google-cloud-appengine-v1-py/google/cloud/appengine_admin_v1/types/location.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.appengine.v1',
manifest={
'LocationMetadata',
},
)
class LocationMetadata(proto.Message):
r"""Metadata for the given
[google.cloud.location.Location][google.cloud.location.Location].
Attributes:
standard_environment_available (bool):
App Engine standard environment is available
in the given location.
@OutputOnly
flexible_environment_available (bool):
App Engine flexible environment is available
in the given location.
@OutputOnly
search_api_available (bool):
Output only. `Search
API <https://cloud.google.com/appengine/docs/standard/python/search>`__
is available in the given location.
"""
standard_environment_available = proto.Field(
proto.BOOL,
number=2,
)
flexible_environment_available = proto.Field(
proto.BOOL,
number=4,
)
search_api_available = proto.Field(
proto.BOOL,
number=6,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/322_test_modulefinder.py
|
59
|
import __future__
import os
import unittest
import distutils.dir_util
import tempfile
from test import support
import modulefinder
TEST_DIR = tempfile.mkdtemp()
TEST_PATH = [TEST_DIR, os.path.dirname(__future__.__file__)]
# Each test description is a list of 5 items:
#
# 1. a module name that will be imported by modulefinder
# 2. a list of module names that modulefinder is required to find
# 3. a list of module names that modulefinder should complain
# about because they are not found
# 4. a list of module names that modulefinder should complain
# about because they MAY be not found
# 5. a string specifying packages to create; the format is obvious imo.
#
# Each package will be created in TEST_DIR, and TEST_DIR will be
# removed after the tests again.
# Modulefinder searches in a path that contains TEST_DIR, plus
# the standard Lib directory.
maybe_test = [
"a.module",
["a", "a.module", "sys",
"b"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from sys import *
"""]
maybe_test_new = [
"a.module",
["a", "a.module", "sys",
"b", "__future__"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from __future__ import absolute_import
from sys import *
"""]
package_test = [
"a.module",
["a", "a.b", "a.c", "a.module", "mymodule", "sys"],
["blahblah", "c"], [],
"""\
mymodule.py
a/__init__.py
import blahblah
from a import b
import c
a/module.py
import sys
from a import b as x
from a.c import sillyname
a/b.py
a/c.py
from a.module import x
import mymodule as sillyname
from sys import version_info
"""]
absolute_import_test = [
"a.module",
["a", "a.module",
"b", "b.x", "b.y", "b.z",
"__future__", "sys", "gc"],
["blahblah", "z"], [],
"""\
mymodule.py
a/__init__.py
a/module.py
from __future__ import absolute_import
import sys # sys
import blahblah # fails
import gc # gc
import b.x # b.x
from b import y # b.y
from b.z import * # b.z.*
a/gc.py
a/sys.py
import mymodule
a/b/__init__.py
a/b/x.py
a/b/y.py
a/b/z.py
b/__init__.py
import z
b/unused.py
b/x.py
b/y.py
b/z.py
"""]
relative_import_test = [
"a.module",
["__future__",
"a", "a.module",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.moduleC",
"a.b.c.d", "a.b.c.e",
"a.b.x",
"gc"],
[], [],
"""\
mymodule.py
a/__init__.py
from .b import y, z # a.b.y, a.b.z
a/module.py
from __future__ import absolute_import # __future__
import gc # gc
a/gc.py
a/sys.py
a/b/__init__.py
from ..b import x # a.b.x
#from a.b.c import moduleC
from .c import moduleC # a.b.moduleC
a/b/x.py
a/b/y.py
a/b/z.py
a/b/g.py
a/b/c/__init__.py
from ..c import e # a.b.c.e
a/b/c/moduleC.py
from ..c import d # a.b.c.d
a/b/c/d.py
a/b/c/e.py
a/b/c/x.py
"""]
relative_import_test_2 = [
"a.module",
["a", "a.module",
"a.sys",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.d",
"a.b.c.e",
"a.b.c.moduleC",
"a.b.c.f",
"a.b.x",
"a.another"],
[], [],
"""\
mymodule.py
a/__init__.py
from . import sys # a.sys
a/another.py
a/module.py
from .b import y, z # a.b.y, a.b.z
a/gc.py
a/sys.py
a/b/__init__.py
from .c import moduleC # a.b.c.moduleC
from .c import d # a.b.c.d
a/b/x.py
a/b/y.py
a/b/z.py
a/b/c/__init__.py
from . import e # a.b.c.e
a/b/c/moduleC.py
#
from . import f # a.b.c.f
from .. import x # a.b.x
from ... import another # a.another
a/b/c/d.py
a/b/c/e.py
a/b/c/f.py
"""]
relative_import_test_3 = [
"a.module",
["a", "a.module"],
["a.bar"],
[],
"""\
a/__init__.py
def foo(): pass
a/module.py
from . import foo
from . import bar
"""]
def open_file(path):
##print "#", os.path.abspath(path)
dirname = os.path.dirname(path)
distutils.dir_util.mkpath(dirname)
return open(path, "w")
def create_package(source):
ofi = None
try:
for line in source.splitlines():
if line.startswith(" ") or line.startswith("\t"):
ofi.write(line.strip() + "\n")
else:
if ofi:
ofi.close()
ofi = open_file(os.path.join(TEST_DIR, line.strip()))
finally:
if ofi:
ofi.close()
class ModuleFinderTest(unittest.TestCase):
def _do_test(self, info, report=False):
import_this, modules, missing, maybe_missing, source = info
create_package(source)
try:
mf = modulefinder.ModuleFinder(path=TEST_PATH)
mf.import_hook(import_this)
if report:
mf.report()
## # This wouldn't work in general when executed several times:
## opath = sys.path[:]
## sys.path = TEST_PATH
## try:
## __import__(import_this)
## except:
## import traceback; traceback.print_exc()
## sys.path = opath
## return
modules = set(modules)
found = set(mf.modules.keys())
more = list(found - modules)
less = list(modules - found)
# check if we found what we expected, not more, not less
self.assertEqual((more, less), ([], []))
# check for missing and maybe missing modules
bad, maybe = mf.any_missing_maybe()
self.assertEqual(bad, missing)
self.assertEqual(maybe, maybe_missing)
finally:
distutils.dir_util.remove_tree(TEST_DIR)
def test_package(self):
self._do_test(package_test)
def test_maybe(self):
self._do_test(maybe_test)
if getattr(__future__, "absolute_import", None):
def test_maybe_new(self):
self._do_test(maybe_test_new)
def test_absolute_imports(self):
self._do_test(absolute_import_test)
def test_relative_imports(self):
self._do_test(relative_import_test)
def test_relative_imports_2(self):
self._do_test(relative_import_test_2)
def test_relative_imports_3(self):
self._do_test(relative_import_test_3)
def test_main():
distutils.log.set_threshold(distutils.log.WARN)
support.run_unittest(ModuleFinderTest)
if __name__ == "__main__":
unittest.main()
|
TDAbboud/micropython
|
refs/heads/master
|
tests/basics/frozenset_binop.py
|
41
|
try:
frozenset
except NameError:
print("SKIP")
raise SystemExit
sets = [
frozenset(), frozenset({1}), frozenset({1, 2}), frozenset({1, 2, 3}), frozenset({2, 3}),
frozenset({2, 3, 5}), frozenset({5}), frozenset({7})
]
for s in sets:
for t in sets:
print(sorted(s), '|', sorted(t), '=', sorted(s | t))
print(sorted(s), '^', sorted(t), '=', sorted(s ^ t))
print(sorted(s), '&', sorted(t), '=', sorted(s & t))
print(sorted(s), '-', sorted(t), '=', sorted(s - t))
u = s.copy()
u |= t
print(sorted(s), "|=", sorted(t), '-->', sorted(u))
u = s.copy()
u ^= t
print(sorted(s), "^=", sorted(t), '-->', sorted(u))
u = s.copy()
u &= t
print(sorted(s), "&=", sorted(t), "-->", sorted(u))
u = s.copy()
u -= t
print(sorted(s), "-=", sorted(t), "-->", sorted(u))
print(sorted(s), '==', sorted(t), '=', s == t)
print(sorted(s), '!=', sorted(t), '=', s != t)
print(sorted(s), '>', sorted(t), '=', s > t)
print(sorted(s), '>=', sorted(t), '=', s >= t)
print(sorted(s), '<', sorted(t), '=', s < t)
print(sorted(s), '<=', sorted(t), '=', s <= t)
|
WeblateOrg/weblate
|
refs/heads/main
|
weblate/trans/defines.py
|
2
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Hardcoded length limitations."""
# Component name and slug length
COMPONENT_NAME_LENGTH = 100
# Project name and slug length
PROJECT_NAME_LENGTH = 60
# Repository length
REPO_LENGTH = 200
# Maximal length of filename or mask
FILENAME_LENGTH = 400
# User model length
# Note: This is currently limited by 192 to allow index on MySQL
FULLNAME_LENGTH = 150
USERNAME_LENGTH = 150
EMAIL_LENGTH = 190
# Language
LANGUAGE_CODE_LENGTH = 50
LANGUAGE_NAME_LENGTH = 100
|
wldcordeiro/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/manifest/vcs.py
|
287
|
import os
import subprocess
def get_git_func(repo_path):
def git(cmd, *args):
full_cmd = ["git", cmd] + list(args)
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
return git
def is_git_repo(tests_root):
return os.path.exists(os.path.join(tests_root, ".git"))
_repo_root = None
def get_repo_root(initial_dir=None):
global _repo_root
if initial_dir is None:
initial_dir = os.path.dirname(__file__)
if _repo_root is None:
git = get_git_func(initial_dir)
_repo_root = git("rev-parse", "--show-toplevel").rstrip()
return _repo_root
|
ProfessorX/Config
|
refs/heads/master
|
.PyCharm30/system/python_stubs/-1247972723/gtk/_gtk/RecentChooserError.py
|
1
|
# encoding: utf-8
# module gtk._gtk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class RecentChooserError(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
}
__gtype__ = None # (!) real value is ''
|
Mistobaan/tensorflow
|
refs/heads/master
|
tensorflow/python/profiler/tfprof_logger_test.py
|
52
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TFProfLoggerTest(test.TestCase):
def _BuildSmallPlaceholderlModel(self):
a = array_ops.placeholder(dtypes.int32, [2, 2])
b = array_ops.placeholder(dtypes.int32, [2, 2])
y = math_ops.matmul(a, b)
return a, b, y
def _BuildSmallModel(self):
a = constant_op.constant([[1, 2], [3, 4]])
b = constant_op.constant([[1, 2], [3, 4]])
return math_ops.matmul(a, b)
# pylint: disable=pointless-string-statement
"""# TODO(xpan): This this out of core so it doesn't depend on contrib.
def testFillMissingShape(self):
a, b, y = self._BuildSmallPlaceholderlModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y,
options=run_options,
run_metadata=run_metadata,
feed_dict={a: [[1, 2], [2, 3]],
b: [[1, 2], [2, 3]]})
graph2 = ops.Graph()
# Use copy_op_to_graph to remove shape information.
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('(2, 2)', str(y2.get_shape()))
def testFailedFillMissingShape(self):
y = self._BuildSmallModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y, options=run_options, run_metadata=run_metadata)
graph2 = ops.Graph()
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
# run_metadata has special name for MatMul, hence failed to fill shape.
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('<unknown>', str(y2.get_shape()))
"""
if __name__ == '__main__':
test.main()
|
Intel-tensorflow/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/examples/v1/debug_errors.py
|
22
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of debugging TensorFlow runtime errors using tfdbg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import numpy as np
import tensorflow
from tensorflow.python import debug as tf_debug
tf = tensorflow.compat.v1
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
ph_float = tf.placeholder(tf.float32, name="ph_float")
x = tf.transpose(ph_float, name="x")
v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v")
m = tf.constant(
np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]),
dtype=tf.float32,
name="m")
y = tf.matmul(m, x, name="y")
z = tf.matmul(m, v, name="z")
if FLAGS.debug:
config_file_path = (
tempfile.mktemp(".tfdbg_config")
if FLAGS.use_random_config_path else None)
sess = tf_debug.LocalCLIDebugWrapperSession(
sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path)
if FLAGS.error == "shape_mismatch":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])}))
elif FLAGS.error == "uninitialized_variable":
print(sess.run(z))
elif FLAGS.error == "no_error":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])}))
else:
raise ValueError("Unrecognized error type: " + FLAGS.error)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--error",
type=str,
default="shape_mismatch",
help="""\
Type of the error to generate (shape_mismatch | uninitialized_variable |
no_error).\
""")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training")
parser.add_argument(
"--use_random_config_path",
type="bool",
nargs="?",
const=True,
default=False,
help="""If set, set config file path to a random file in the temporary
directory.""")
FLAGS, unparsed = parser.parse_known_args()
with tf.Graph().as_default():
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
yhoshino11/pytest_example
|
refs/heads/master
|
.tox/flake8/lib/python2.7/site-packages/setuptools/tests/test_upload_docs.py
|
151
|
import os
import zipfile
import contextlib
import pytest
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
from .textwrap import DALS
from . import contexts
SETUP_PY = DALS(
"""
from setuptools import setup
setup(name='foo')
""")
@pytest.fixture
def sample_project(tmpdir_cwd):
# setup.py
with open('setup.py', 'wt') as f:
f.write(SETUP_PY)
os.mkdir('build')
# A test document.
with open('build/index.html', 'w') as f:
f.write("Hello world.")
# An empty folder.
os.mkdir('build/empty')
@pytest.mark.usefixtures('sample_project')
@pytest.mark.usefixtures('user_override')
class TestUploadDocsTest:
def test_create_zipfile(self):
"""
Ensure zipfile creation handles common cases, including a folder
containing an empty folder.
"""
dist = Distribution()
cmd = upload_docs(dist)
cmd.target_dir = cmd.upload_dir = 'build'
with contexts.tempdir() as tmp_dir:
tmp_file = os.path.join(tmp_dir, 'foo.zip')
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
with contextlib.closing(zipfile.ZipFile(tmp_file)) as zip_file:
assert zip_file.namelist() == ['index.html']
|
ccomb/OpenUpgrade
|
refs/heads/master
|
addons/account/test/test_parent_structure.py
|
432
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# TODO: move this in a YAML test with !python tag
#
import xmlrpclib
DB = 'training3'
USERID = 1
USERPASS = 'admin'
sock = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/object' % ('localhost',8069))
ids = sock.execute(DB, USERID, USERPASS, 'account.account', 'search', [], {})
account_lists = sock.execute(DB, USERID, USERPASS, 'account.account', 'read', ids, ['parent_id','parent_left','parent_right'])
accounts = dict(map(lambda x: (x['id'],x), account_lists))
for a in account_lists:
if a['parent_id']:
assert a['parent_left'] > accounts[a['parent_id'][0]]['parent_left']
assert a['parent_right'] < accounts[a['parent_id'][0]]['parent_right']
assert a['parent_left'] < a['parent_right']
for a2 in account_lists:
assert not ((a2['parent_right']>a['parent_left']) and
(a2['parent_left']<a['parent_left']) and
(a2['parent_right']<a['parent_right']))
if a2['parent_id']==a['id']:
assert (a2['parent_left']>a['parent_left']) and (a2['parent_right']<a['parent_right'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hefen1/chromium
|
refs/heads/master
|
tools/telemetry/telemetry/user_story/user_story_filter_unittest.py
|
12
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.user_story import user_story_filter
from telemetry.page import page
from telemetry.page import page_set
class MockUrlFilterOptions(object):
def __init__(self, page_filter_include, page_filter_exclude):
self.page_filter = page_filter_include
self.page_filter_exclude = page_filter_exclude
self.page_label_filter = None
self.page_label_filter_exclude = None
class MockLabelFilterOptions(object):
def __init__(self, page_label_filter, page_label_filter_exclude):
self.page_filter = None
self.page_filter_exclude = None
self.page_label_filter = page_label_filter
self.page_label_filter_exclude = page_label_filter_exclude
class UserStoryFilterTest(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet()
self.p1 = page.Page(
'file://conformance/textures/tex-sub-image-2d.html', page_set=ps,
name='WebglConformance.conformance_textures_tex_sub_image_2d',
labels=['label1', 'label2'])
self.p2 = page.Page(
'file://othersuite/textures/tex-sub-image-3d.html', page_set=ps,
name='OtherSuite.textures_tex_sub_image_3d',
labels=['label1'])
self.p3 = page.Page(
'file://othersuite/textures/tex-sub-image-3d.html', page_set=ps,
labels=['label2'])
def testURLPattern(self):
options = MockUrlFilterOptions('conformance_textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('somethingelse', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
def testName(self):
options = MockUrlFilterOptions('somethingelse', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures_tex_sub_image', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('WebglConformance', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p2))
options = MockUrlFilterOptions('OtherSuite', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
def testNameNone(self):
options = MockUrlFilterOptions('othersuite/textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p3))
options = MockUrlFilterOptions('conformance/textures', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p3))
def testLabelFilters(self):
# Include both labels
options = MockLabelFilterOptions('label1,label2', '')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p3))
# Exclude takes priority
options = MockLabelFilterOptions('label1', 'label2')
user_story_filter.UserStoryFilter.ProcessCommandLineArgs(None, options)
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p1))
self.assertTrue(user_story_filter.UserStoryFilter.IsSelected(self.p2))
self.assertFalse(user_story_filter.UserStoryFilter.IsSelected(self.p3))
|
nijx/hypertable
|
refs/heads/master
|
src/cc/HyperPython/tests/writer.py
|
3
|
import sys
import time
import libHyperPython
from hypertable.thriftclient import *
from hyperthrift.gen.ttypes import *
try:
client = ThriftClient("localhost", 38080)
print "SerializedCellsWriter example"
namespace = client.namespace_open("test")
client.hql_query(namespace, "drop table if exists thrift_test")
client.hql_query(namespace, "create table thrift_test (col)")
# write with SerializedCellsWriter
scw = libHyperPython.SerializedCellsWriter(100, 1)
scw.add("row0", "col", "", 0, "cell0", 6, 255)
scw.add("row1", "col", "", 0, "cell1", 6, 255)
scw.add("row2", "col", "", 0, "cell2", 6, 255)
scw.add("row3", "col", "", 0, "cell3", 6, 255)
scw.add("row4", "col", "", 0, "cell4", 6, 255)
scw.add("row5", "col", "", 0, "cell5", 6, 255)
scw.add("collapse_row", "col", "a", 0, "cell6", 6, 255)
scw.add("collapse_row", "col", "b", 0, "cell7", 6, 255)
scw.add("collapse_row", "col", "c", 0, "cell8", 6, 255)
scw.finalize(0)
client.set_cells_serialized(namespace, "thrift_test", scw.get())
res = client.hql_query(namespace, "select * from thrift_test")
for cell in res.cells:
print cell.key.row, cell.key.column_family, cell.value
client.namespace_close(namespace)
except:
print sys.exc_info()
raise
|
wdmchaft/taskcoach
|
refs/heads/master
|
taskcoachlib/config/options.py
|
1
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import optparse
from taskcoachlib import meta
class OptionParser(optparse.OptionParser, object):
def __init__(self, *args, **kwargs):
super(OptionParser, self).__init__(*args, **kwargs)
self.__addOptionGroups()
self.__addOptions()
def __addOptionGroups(self):
self.__getAndAddOptions('OptionGroup', self.add_option_group)
def __addOptions(self):
self.__getAndAddOptions('Option', self.add_option)
def __getAndAddOptions(self, suffix, addOption):
for getOption in self.__methodsEndingWith(suffix):
addOption(getOption(self))
def __methodsEndingWith(self, suffix):
return [method for name, method in vars(self.__class__).items() if
name.endswith(suffix)]
class OptionGroup(optparse.OptionGroup, object):
pass
class ApplicationOptionParser(OptionParser):
def __init__(self, *args, **kwargs):
kwargs['usage'] = 'usage: %prog [options] [.tsk file]'
kwargs['version'] = '%s %s'%(meta.data.name, meta.data.version)
super(ApplicationOptionParser, self).__init__(*args, **kwargs)
def profileOption(self):
return optparse.Option('--profile', default=False,
action='store_true', help=optparse.SUPPRESS_HELP)
def profile_skipstartOption(self):
return optparse.Option('-s', '--skipstart', default=False,
action='store_true', help=optparse.SUPPRESS_HELP)
def iniOption(self):
return optparse.Option('-i', '--ini', dest='inifile',
help='use the specified INIFILE for storing settings')
def languageOption(self):
return optparse.Option('-l', '--language', dest='language',
type='choice', choices=sorted([lang for lang in \
meta.data.languages.values() if lang is not None] + ['en']),
help='use the specified LANGUAGE for the GUI (e.g. "nl" or "fr"')
def poOption(self):
return optparse.Option('-p', '--po', dest='pofile',
help='use the specified POFILE for translation of the GUI')
|
windyuuy/opera
|
refs/heads/master
|
chromium/src/tools/gyp/test/cxxflags/gyptest-cxxflags.py
|
142
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
env_stack = []
def PushEnv():
env_copy = os.environ.copy()
env_stack.append(env_copy)
def PopEnv():
os.eniron=env_stack.pop()
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
try:
PushEnv()
os.environ['CXXFLAGS'] = '-O0'
test.run_gyp('cxxflags.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
PopEnv()
test.build('cxxflags.gyp')
expect = """\
Using no optimization flag
"""
test.run_built_executable('cxxflags', stdout=expect)
test.sleep()
try:
PushEnv()
os.environ['CXXFLAGS'] = '-O2'
test.run_gyp('cxxflags.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
PopEnv()
test.build('cxxflags.gyp')
expect = """\
Using an optimization flag
"""
test.run_built_executable('cxxflags', stdout=expect)
test.pass_test()
|
ee08b397/dpark
|
refs/heads/master
|
dpark/moosefs/master.py
|
2
|
import os
import socket
import threading
import Queue
import time
import struct
import logging
from consts import *
from utils import *
logger = logging.getLogger(__name__)
# mfsmaster need to been patched with dcache
ENABLE_DCACHE = False
class StatInfo:
def __init__(self, totalspace, availspace, trashspace,
reservedspace, inodes):
self.totalspace = totalspace
self.availspace = availspace
self.trashspace = trashspace
self.reservedspace = reservedspace
self.inodes = inodes
class Chunk:
def __init__(self, id, length, version, csdata):
self.id = id
self.length = length
self.version = version
self.addrs = self._parse(csdata)
def _parse(self, csdata):
return [(socket.inet_ntoa(csdata[i:i+4]),
unpack("H", csdata[i+4:i+6])[0])
for i in range(len(csdata))[::6]]
def __repr__(self):
return "<Chunk(%d, %d, %d)>" % (self.id, self.version, self.length)
def try_again(f):
def _(self, *a, **kw):
for i in range(3):
try:
return f(self, *a, **kw)
except IOError, e:
self.close()
logger.warning("mfs master connection: %s", e)
time.sleep(2**i*0.1)
else:
raise
return _
def spawn(target, *args, **kw):
t = threading.Thread(target=target, name=target.__name__, args=args, kwargs=kw)
t.daemon = True
t.start()
return t
class MasterConn:
def __init__(self, host='mfsmaster', port=9421):
self.host = host
self.port = port
self.uid = os.getuid()
self.gid = os.getgid()
self.sessionid = 0
self.conn = None
self.packetid = 0
self.fail_count = 0
self.dcache = {}
self.dstat = {}
self.lock = threading.RLock()
self.reply = Queue.Queue()
self.is_ready = False
spawn(self.heartbeat)
spawn(self.recv_thread)
def heartbeat(self):
while True:
try:
self.nop()
except Exception, e:
self.close()
time.sleep(2)
def connect(self):
if self.conn is not None:
return
for _ in range(10):
try:
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((self.host, self.port))
break
except socket.error, e:
self.conn = None
#self.next_try = time.time() + 1.5 ** self.fail_count
self.fail_count += 1
time.sleep(1.5 ** self.fail_count)
if not self.conn:
raise IOError("mfsmaster not availbale")
regbuf = pack(CUTOMA_FUSE_REGISTER, FUSE_REGISTER_BLOB_NOACL,
self.sessionid, VERSION)
self.send(regbuf)
recv = self.recv(8)
cmd, i = unpack("II", recv)
if cmd != MATOCU_FUSE_REGISTER:
raise Exception("got incorrect answer from mfsmaster %s" % cmd)
if i not in (1, 4):
raise Exception("got incorrect size from mfsmaster")
data = self.recv(i)
if i == 1:
code, = unpack("B", data)
if code != 0:
raise Exception("mfsmaster register error: "
+ mfs_strerror(code))
if self.sessionid == 0:
self.sessionid, = unpack("I", data)
self.is_ready = True
def close(self):
with self.lock:
if self.conn:
self.conn.close()
self.conn = None
self.dcache.clear()
self.is_ready = False
def send(self, buf):
with self.lock:
conn = self.conn
if not conn:
raise IOError("not connected")
n = conn.send(buf)
while n < len(buf):
sent = conn.send(buf[n:])
if not sent:
self.close()
raise IOError("write to master failed")
n += sent
def nop(self):
with self.lock:
self.connect()
msg = pack(ANTOAN_NOP, 0)
self.send(msg)
def recv(self, n):
with self.lock:
conn = self.conn
if not conn:
raise IOError("not connected")
r = conn.recv(n)
while len(r) < n:
rr = conn.recv(n - len(r))
if not rr:
self.close()
raise IOError("unexpected error: need %d" % (n-len(r)))
r += rr
return r
def recv_cmd(self):
d = self.recv(12)
cmd, size = unpack("II", d)
data = self.recv(size-4) if size > 4 else ''
while cmd in (ANTOAN_NOP, MATOCU_FUSE_NOTIFY_ATTR, MATOCU_FUSE_NOTIFY_DIR):
if cmd == ANTOAN_NOP:
pass
elif cmd == MATOCU_FUSE_NOTIFY_ATTR:
while len(data) >= 43:
parent, inode = unpack("II", data)
attr = data[8:43]
if parent in self.dcache:
cache = self.dcache[parent]
for name in cache:
if cache[name].inode == inode:
cache[name] = attrToFileInfo(inode, attr)
break
data = data[43:]
elif cmd == MATOCU_FUSE_NOTIFY_DIR:
while len(data) >= 4:
inode, = unpack("I", data)
if inode in self.dcache:
del self.dcache[inode]
with self.lock:
self.send(pack(CUTOMA_FUSE_DIR_REMOVED, 0, inode))
data = data[4:]
d = self.recv(12)
cmd, size = unpack("II", d)
data = self.recv(size-4) if size > 4 else ''
return d, data
def recv_thread(self):
while True:
with self.lock:
if not self.is_ready:
time.sleep(0.01)
continue
try:
r = self.recv_cmd()
self.reply.put(r)
except IOError, e:
self.reply.put(e)
@try_again
def sendAndReceive(self, cmd, *args):
#print 'sendAndReceive', cmd, args
self.packetid += 1
msg = pack(cmd, self.packetid, *args)
with self.lock:
self.connect()
while not self.reply.empty():
self.reply.get_nowait()
self.send(msg)
r = self.reply.get()
if isinstance(r, Exception):
raise r
h, d = r
rcmd, size, pid = unpack("III", h)
if rcmd != cmd+1 or pid != self.packetid or size <= 4:
self.close()
raise Exception("incorrect answer (%s!=%s, %s!=%s, %d<=4",
rcmd, cmd+1, pid, self.packetid, size)
if len(d) == 1 and ord(d[0]) != 0:
raise Error(ord(d[0]))
return d
def statfs(self):
ans = self.sendAndReceive(CUTOMA_FUSE_STATFS)
return StatInfo(*unpack("QQQQI", ans))
# def access(self, inode, modemask):
# return self.sendAndReceive(CUTOMA_FUSE_ACCESS, inode,
# self.uid, self.gid, uint8(modemask))
#
def lookup(self, parent, name):
if ENABLE_DCACHE:
cache = self.dcache.get(parent)
if cache is None and self.dstat.get(parent, 0) > 1:
cache = self.getdirplus(parent)
if cache is not None:
return cache.get(name), None
self.dstat[parent] = self.dstat.get(parent, 0) + 1
ans = self.sendAndReceive(CUTOMA_FUSE_LOOKUP, parent,
uint8(len(name)), name, 0, 0)
if len(ans) == 1:
return None, ""
if len(ans) != 39:
raise Exception("bad length")
inode, = unpack("I", ans)
return attrToFileInfo(inode, ans[4:]), None
def getattr(self, inode):
ans = self.sendAndReceive(CUTOMA_FUSE_GETATTR, inode,
self.uid, self.gid)
return attrToFileInfo(inode, ans)
def readlink(self, inode):
ans = self.sendAndReceive(CUTOMA_FUSE_READLINK, inode)
length, = unpack("I", ans)
if length+4 != len(ans):
raise Exception("invalid length")
return ans[4:-1]
def getdir(self, inode):
"return: {name: (inode,type)}"
ans = self.sendAndReceive(CUTOMA_FUSE_GETDIR, inode,
self.uid, self.gid)
p = 0
names = {}
while p < len(ans):
length, = unpack("B", ans[p:p+1])
p += 1
if length + p + 5 > len(ans):
break
name = ans[p:p+length]
p += length
inode, type = unpack("IB", ans)
names[name] = (inode, type)
p += 5
return names
def getdirplus(self, inode):
"return {name: FileInfo()}"
if ENABLE_DCACHE:
infos = self.dcache.get(inode)
if infos is not None:
return infos
flag = GETDIR_FLAG_WITHATTR
if ENABLE_DCACHE:
flag |= GETDIR_FLAG_DIRCACHE
ans = self.sendAndReceive(CUTOMA_FUSE_GETDIR, inode,
self.uid, self.gid, uint8(flag))
p = 0
infos = {}
while p < len(ans):
length, = unpack("B", ans[p:p+1])
p += 1
name = ans[p:p+length]
p += length
i, = unpack("I", ans[p:p+4])
attr = ans[p+4:p+39]
infos[name] = attrToFileInfo(i, attr, name)
p += 39
if ENABLE_DCACHE:
self.dcache[inode] = infos
return infos
def opencheck(self, inode, flag=1):
ans = self.sendAndReceive(CUTOMA_FUSE_OPEN, inode,
self.uid, self.gid, uint8(flag))
return ans
def readchunk(self, inode, index):
ans = self.sendAndReceive(CUTOMA_FUSE_READ_CHUNK, inode, index)
n = len(ans)
if n < 20 or (n-20)%6 != 0:
raise Exception("read chunk: invalid length: %s" % n)
length, id, version = unpack("QQI", ans)
return Chunk(id, length, version, ans[20:])
def test():
m = MasterConn("mfsmaster")
m.connect()
m.close()
#print m.get_attr(1)
while True:
print m.getdir(1)
print m.getdirplus(1)
time.sleep(60)
info, err = m.lookup(1, "test.csv")
print info, err
#print m.opencheck(info.inode)
chunks = m.readchunk(info.inode, 0)
print chunks, chunks.addrs
for i in range(1000):
info, err = m.lookup(1, "test.csv")
chunks = m.readchunk(info.inode, 0)
print i,err, chunks
time.sleep(10)
m.close()
if __name__ == '__main__':
test()
|
andmos/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/layer3/__init__.py
|
12133432
| |
zhouzhenghui/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/tests/gdata_tests/calendar/__init__.py
|
12133432
| |
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/generic_relations_regress/__init__.py
|
12133432
| |
streettraffic/streettraffic
|
refs/heads/develop
|
server_multiple_cities.py
|
1
|
## import system module
import json
import rethinkdb as r
import time
import datetime as dt
import asyncio
## import custom module
from main_program.map_resource.utility import Utility
from main_program.database import TrafficData
from main_program import tools
from main_program.server import TrafficServer
class TestTrafficServer(TrafficServer):
async def main_crawler(self):
"""
"""
self.crawler_running = True
while self.crawler_running:
print('start crawling')
self.traffic_data.store_matrix_json(self.traffic_matrix_list)
#self.traffic_data.insert_analytics_traffic_pattern('[33.880079, 33.648894, -84.485086, -84.311365]')
# time management, we want to execute script every 30 minutes
# in order to do that we need to calculate how many seconds we should sleep
current = dt.datetime.utcnow()
if current.minute < 30:
wait_seconds = 30*60 - current.minute*60 - current.second
else:
wait_seconds = 60*60 - current.minute*60 - current.second
print('crawling finished')
await asyncio.sleep(wait_seconds)
# initialize settings
settings = {
'app_id': 'F8aPRXcW3MmyUvQ8Z3J9',
'app_code' : 'IVp1_zoGHdLdz0GvD_Eqsw',
'map_tile_base_url': 'https://1.traffic.maps.cit.api.here.com/maptile/2.1/traffictile/newest/normal.day/',
'json_tile_base_url': 'https://traffic.cit.api.here.com/traffic/6.2/flow.json?'
}
util = Utility(settings)
## initialize traffic server
traffic_server = TestTrafficServer(database_name= "Traffic", database_ip = "localhost")
## new york route to boston
San_Francisco_polygon = [[37.837174338616975,-122.48725891113281],[37.83364941345965,-122.48485565185547],[37.83093781796035,-122.4814224243164],[37.82415839321614,-122.48004913330078],[37.8203616433087,-122.47970581054688],[37.81059767530207,-122.47798919677734],[37.806122091729485,-122.47627258300781],[37.79215110146845,-122.48039245605469],[37.78726741375342,-122.48519897460938],[37.78618210598413,-122.49927520751953],[37.78645343442073,-122.50614166259766],[37.779127216982424,-122.51232147216797],[37.772614414082014,-122.51163482666016],[37.76121562849642,-122.51197814941406],[37.75171529845649,-122.51060485839844],[37.74329970164702,-122.50957489013672],[37.735969208590504,-122.50717163085938],[37.73081027834234,-122.50717163085938],[37.72293542866175,-122.50682830810547],[37.715331331027045,-122.50442504882812],[37.714244967649265,-122.49893188476562],[37.71940505182832,-122.50030517578125],[37.724564776604836,-122.5030517578125],[37.729724141962045,-122.50167846679688],[37.7324394530424,-122.49549865722656],[37.72918106779786,-122.49378204345703],[37.729724141962045,-122.48828887939453],[37.72782336496339,-122.4807357788086],[37.73271097867418,-122.37945556640625],[37.74520008134973,-122.37533569335938],[37.74655746554895,-122.39112854003906],[37.75008654795525,-122.3873519897461],[37.754972691904946,-122.38391876220703],[37.76148704857093,-122.38597869873047],[37.769629187677005,-122.3876953125],[37.78265474565738,-122.38872528076172],[37.78781006166096,-122.3880386352539],[37.79594930209237,-122.37911224365234],[37.804358908571395,-122.36984252929688],[37.812767557570204,-122.3605728149414],[37.817649559511125,-122.35130310058594],[37.82009043941308,-122.332763671875],[37.823344820392535,-122.30632781982422],[37.8271414168374,-122.30701446533203],[37.824700770115996,-122.31765747070312],[37.82253123860035,-122.33139038085938],[37.8203616433087,-122.34615325927734],[37.81792077237497,-122.35576629638672],[37.81168262440736,-122.3653793334961],[37.803002585189645,-122.37396240234375],[37.790523241426946,-122.3880386352539],[37.79594930209237,-122.39490509033203],[37.80273131752431,-122.39936828613281],[37.80815648152641,-122.40726470947266],[37.80734273233311,-122.42305755615234],[37.807071480609274,-122.43267059326172],[37.80571520704469,-122.44194030761719],[37.80463017025873,-122.45189666748047],[37.80463017025873,-122.464599609375],[37.807071480609274,-122.47421264648438],[37.815208598896255,-122.47695922851562],[37.82768377181359,-122.47798919677734],[37.835276322922695,-122.48004913330078],[37.837174338616975,-122.48725891113281]]
Boston_polygon = [[42.32453946380133,-71.13029479980469],[42.32758538845383,-71.0489273071289],[42.330631165629846,-71.03588104248047],[42.33316920061984,-71.02180480957031],[42.339513840022754,-71.02455139160156],[42.3397676122846,-71.04412078857422],[42.34611158596906,-71.02558135986328],[42.356514317057886,-71.02317810058594],[42.348648996207956,-71.00669860839844],[42.35829022102701,-71.00360870361328],[42.353469793490646,-70.99090576171875],[42.36057345238455,-70.98918914794922],[42.363110278811256,-71.00223541259766],[42.37883631647602,-70.99983215332031],[42.37756823359386,-71.00738525390625],[42.37224200585402,-71.01631164550781],[42.37680737157286,-71.02008819580078],[42.381879610913195,-71.015625],[42.38999434161929,-71.00704193115234],[42.40444610741266,-71.00395202636719],[42.40444610741266,-71.13029479980469],[42.32453946380133,-71.13029479980469]]
Pittsburgh_polygon = [[40.603526799885884,-80.09445190429688],[40.564937785967224,-80.13427734375],[40.50126945841646,-80.14801025390625],[40.42290582797254,-80.12054443359375],[40.3549167507906,-80.1287841796875],[40.330842639095756,-80.14389038085938],[40.31199603742692,-80.16311645507812],[40.319325896602095,-79.9310302734375],[40.365381076021734,-79.80056762695312],[40.57119697629581,-79.7991943359375],[40.603526799885884,-80.09445190429688]]
Greenville_polygon = [[34.96531080784271,-82.44483947753906],[34.946176590087454,-82.44140625],[34.92422301690582,-82.45994567871094],[34.89888467073924,-82.46818542480469],[34.871285134570016,-82.47367858886719],[34.837477162415986,-82.452392578125],[34.83015027082022,-82.54096984863281],[34.820004267650454,-82.53822326660156],[34.829022998858306,-82.45101928710938],[34.8047829195724,-82.44071960449219],[34.79068657192738,-82.44552612304688],[34.770383597610255,-82.47573852539062],[34.76192255039478,-82.47024536132812],[34.784483415461345,-82.44415283203125],[34.7483830709853,-82.31849670410156],[34.71847552413778,-82.25944519042969],[34.72073307506407,-82.24845886230469],[34.75740963726007,-82.28141784667969],[34.77263973038464,-82.26699829101562],[34.83184114982865,-82.28965759277344],[34.86058077988933,-82.24845886230469],[34.86790496256872,-82.25601196289062],[34.839167890957015,-82.30133056640625],[34.86903170200862,-82.34458923339844],[34.96531080784271,-82.44483947753906]]
Norfolk_polygon = [[37.00693943418586,-76.32339477539062],[36.98884240936997,-76.31034851074219],[36.982260605282676,-76.30348205566406],[36.96799807635307,-76.30210876464844],[36.95976847846004,-76.27738952636719],[36.953732874654285,-76.2725830078125],[36.94769679250732,-76.28700256347656],[36.95098926024786,-76.29661560058594],[36.95757376878687,-76.30348205566406],[36.95976847846004,-76.31584167480469],[36.9669008480318,-76.32820129394531],[36.90323455156814,-76.32888793945312],[36.91366629380721,-76.31721496582031],[36.90927415514871,-76.28631591796875],[36.89499795802219,-76.28974914550781],[36.901587303978474,-76.30348205566406],[36.90323455156814,-76.31515502929688],[36.874127942666334,-76.32888793945312],[36.8631414329529,-76.30828857421875],[36.849955535919875,-76.32682800292969],[36.856548768788954,-76.34056091308594],[36.82852360193767,-76.33094787597656],[36.820278951308744,-76.34536743164062],[36.81203341240741,-76.34124755859375],[36.83017242546416,-76.28974914550781],[36.79993834872292,-76.2835693359375],[36.80323719192363,-76.19155883789062],[36.84116367417466,-76.19499206542969],[36.82137828938333,-76.1407470703125],[36.82577548376294,-76.04873657226562],[36.84281222525469,-76.04736328125],[36.85160389745116,-76.14280700683594],[36.92739009701458,-76.16065979003906],[36.93726970584893,-76.22039794921875],[36.96799807635307,-76.27670288085938],[36.96854668458301,-76.29592895507812],[36.98390610968992,-76.29936218261719],[37.00693943418586,-76.32339477539062]]
polygon_collection = [San_Francisco_polygon, Boston_polygon, Pittsburgh_polygon, Greenville_polygon, Norfolk_polygon]
traffic_matrix = []
for polygon in polygon_collection:
traffic_matrix += [util.get_area_tile_matrix_url("traffic_json", polygon, 14, True)]
traffic_server.traffic_matrix_list = traffic_matrix
# start
traffic_server.start()
|
chemfiles/chemfiles
|
refs/heads/master
|
tests/lints/check-capi-docs.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
A small script checking that all the C API functions are documented, and have
an example.
"""
from __future__ import print_function
import os
import sys
import codecs
ROOT = os.path.join(os.path.dirname(__file__), "..", "..")
ERRORS = 0
def error(message):
global ERRORS
ERRORS += 1
print(message)
def documented_functions():
functions = []
DOCS = os.path.join(ROOT, "doc", "src", "capi")
for (root, _, paths) in os.walk(DOCS):
for path in paths:
with codecs.open(os.path.join(root, path), encoding="utf8") as fd:
for line in fd:
if line.startswith(".. doxygenfunction::"):
name = line.split()[2]
functions.append(name)
return functions
def functions_in_outline():
MISC_FUNCTIONS = [
"chfl_last_error",
"chfl_clear_errors",
"chfl_version",
"chfl_formats_list",
]
DOCS = os.path.join(ROOT, "doc", "src", "capi")
functions = MISC_FUNCTIONS
for (root, _, paths) in os.walk(DOCS):
for path in paths:
with codecs.open(os.path.join(root, path), encoding="utf8") as fd:
for line in fd:
if ":cpp:func:" in line:
name = line.split("`")[1]
functions.append(name)
return functions
def function_name(line):
assert line.startswith("CHFL_EXPORT")
splitted = line.split()
if splitted[2].startswith("chfl_"):
name = splitted[2].split("(")[0]
elif splitted[3].startswith("chfl_"):
name = splitted[3].split("(")[0]
else:
raise RuntimeError("Could not get function name in '" + line + "'")
return name
def all_functions():
functions = []
HEADERS = os.path.join(ROOT, "include", "chemfiles", "capi")
for (root, _, paths) in os.walk(HEADERS):
for path in paths:
with codecs.open(os.path.join(root, path), encoding="utf8") as fd:
for line in fd:
if line.startswith("CHFL_EXPORT"):
functions.append(function_name(line))
return functions
def check_examples():
HEADERS = os.path.join(ROOT, "include", "chemfiles", "capi")
for (root, _, paths) in os.walk(HEADERS):
for path in paths:
with codecs.open(os.path.join(root, path), encoding="utf8") as fd:
in_doc = False
example_found = False
for line in fd:
if "@example" in line and in_doc:
example_found = True
path = line.split("{")[1].split("}")[0]
if not os.path.exists(os.path.join(ROOT, "tests", "doc", path)):
error("Missing example file at {}".format(path))
if line.startswith("///"):
in_doc = True
elif line.startswith("CHFL_EXPORT"):
in_doc = False
if not example_found:
name = function_name(line)
error("Missing example for {}".format(name))
example_found = False
if __name__ == "__main__":
docs = documented_functions()
outline = functions_in_outline()
for function in all_functions():
if function not in docs:
error("Missing documentation for {}".format(function))
if function not in outline:
error("Missing outline for {}".format(function))
check_examples()
# C and fortran standard only allow extern names up to 31 characters
for function in all_functions():
if len(function) > 31:
error("Function name {} is too long".format(function))
if ERRORS != 0:
sys.exit(1)
|
Minjung/pysap
|
refs/heads/master
|
examples/ms_listener.py
|
3
|
#!/usr/bin/env python
# ===========
# pysap - Python library for crafting SAP's network protocols packets
#
# Copyright (C) 2015 by Martin Gallo, Core Security
#
# The library was designed and developed by Martin Gallo from the Security
# Consulting Services team of Core Security.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# ==============
# Standard imports
import logging
from socket import error as SocketError
from optparse import OptionParser, OptionGroup
# External imports
from scapy.config import conf
# Custom imports
import pysap
from pysap.SAPMS import SAPMS
from pysap.SAPRouter import SAPRoutedStreamSocket
# Set the verbosity to 0
conf.verb = 0
# Command line options parser
def parse_options():
description = \
"""This example script connects with the Message Server service and listen
for messages coming from the server.
"""
epilog = "pysap %(version)s - %(url)s - %(repo)s" % {"version": pysap.__version__,
"url": pysap.__url__,
"repo": pysap.__repo__}
usage = "Usage: %prog [options] -d <remote host>"
parser = OptionParser(usage=usage, description=description, epilog=epilog)
target = OptionGroup(parser, "Target")
target.add_option("-d", "--remote-host", dest="remote_host", help="Remote host")
target.add_option("-p", "--remote-port", dest="remote_port", type="int", help="Remote port [%default]", default=3900)
target.add_option("--route-string", dest="route_string", help="Route string for connecting through a SAP Router")
parser.add_option_group(target)
misc = OptionGroup(parser, "Misc options")
misc.add_option("-c", "--client", dest="client", default="pysap's-listener", help="Client name [%default]")
misc.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose output [%default]")
parser.add_option_group(misc)
(options, _) = parser.parse_args()
if not options.remote_host:
parser.error("Remote host is required")
return options
# Main function
def main():
options = parse_options()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
# Initiate the connection
conn = SAPRoutedStreamSocket.get_nisocket(options.remote_host,
options.remote_port,
options.route_string,
base_cls=SAPMS)
print("[*] Connected to the message server %s:%d" % (options.remote_host, options.remote_port))
client_string = options.client
# Send MS_LOGIN_2 packet
p = SAPMS(flag=0x00, iflag=0x08, toname=client_string, fromname=client_string)
print("[*] Sending login packet")
response = conn.sr(p)[SAPMS]
print("[*] Login performed, server string: %s" % response.fromname)
print("[*] Listening to server messages")
try:
while (True):
# Send MS_SERVER_LST packet
response = conn.recv()[SAPMS]
print("[*] Message received !")
response.show()
except SocketError:
print("[*] Connection error")
except KeyboardInterrupt:
print("[*] Cancelled by the user")
if __name__ == "__main__":
main()
|
hhatto/gruffy
|
refs/heads/master
|
test/test_stacked_bar.py
|
1
|
from gruffy import StackedBar
import os
from unittest import TestCase, main
from gruffy import StackedArea
TARGET_FILE = 'test.png'
class TestStackedBar(TestCase):
def tearDown(self):
os.remove(TARGET_FILE)
def test_writable(self):
g = StackedBar()
g.data("test1", [1, 2, 3])
g.data("test2", [3, 2, 1])
g.write(TARGET_FILE)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.