repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
djangorussia/django-1.3-branch
|
refs/heads/master
|
django/conf/locale/__init__.py
|
157
|
LANG_INFO = {
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': u'\u0627\u0644\u0639\u0631\u0628\u064a\u0651\u0629',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': u'az\u0259rbaycan dili',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': u'\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': u'\u09ac\u09be\u0982\u09b2\u09be',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': u'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': u'catal\xe0',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': u'\u010desky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': u'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': u'Dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': u'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': u'\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': u'English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': u'British English',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': u'espa\xf1ol',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': u'espa\xf1ol de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': u'espa\xf1ol de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': u'espa\xf1ol de Nicaragua',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': u'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': u'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': u'\u0641\u0627\u0631\u0633\u06cc',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': u'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': u'Fran\xe7ais',
},
'fy-nl': {
'bidi': False,
'code': 'fy-nl',
'name': 'Frisian',
'name_local': u'Frisian',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': u'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': u'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': u'\u05e2\u05d1\u05e8\u05d9\u05ea',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': u'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': u'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': u'Magyar',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': u'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': u'\xcdslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': u'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': u'\u65e5\u672c\u8a9e',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': u'\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': u'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': u'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': u'\ud55c\uad6d\uc5b4',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': u'Lithuanian',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': u'latvie\u0161u',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': u'\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': u'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': u'Mongolian',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': u'Norsk (bokm\xe5l)',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': u'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': u'Norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': u'Norsk',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': u'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': u'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': u'Portugu\xeas',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': u'Portugu\xeas Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': u'Rom\xe2n\u0103',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': u'\u0420\u0443\u0441\u0441\u043a\u0438\u0439',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': u'slovensk\xfd',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': u'Sloven\u0161\u010dina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': u'Albanian',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': u'\u0441\u0440\u043f\u0441\u043a\u0438',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': u'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': u'Svenska',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': u'\u0ba4\u0bae\u0bbf\u0bb4\u0bcd',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': u'\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': u'Thai',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': u'T\xfcrk\xe7e',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': u'\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430',
},
'ur': {
'bidi': False,
'code': 'ur',
'name': 'Urdu',
'name_local': u'\u0627\u0631\u062f\u0648',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': u'Vietnamese',
},
'zh-cn': {
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': u'\u7b80\u4f53\u4e2d\u6587',
},
'zh-tw': {
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': u'\u7e41\u9ad4\u4e2d\u6587',
}
}
|
Jackysonglanlan/devops
|
refs/heads/master
|
IDEs/sublime/shared-pkgs/Packages/SideBarEnhancements/send2trash/plat_other.py
|
4
|
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
# This is a reimplementation of plat_other.py with reference to the
# freedesktop.org trash specification:
# [1] http://www.freedesktop.org/wiki/Specifications/trash-spec
# [2] http://www.ramendik.ru/docs/trashspec.html
# See also:
# [3] http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
#
# For external volumes this implementation will raise an exception if it can't
# find or create the user's trash directory.
#import sys
import os
import os.path as op
from datetime import datetime
import stat
import shutil
from urllib.parse import quote
FILES_DIR = 'files'
INFO_DIR = 'info'
INFO_SUFFIX = '.trashinfo'
# Default of ~/.local/share [3]
XDG_DATA_HOME = op.expanduser(os.environ.get('XDG_DATA_HOME', '~/.local/share'))
HOMETRASH = op.join(XDG_DATA_HOME, 'Trash')
uid = os.getuid()
TOPDIR_TRASH = '.Trash'
TOPDIR_FALLBACK = '.Trash-' + str(uid)
def is_parent(parent, path):
path = op.realpath(path) # In case it's a symlink
parent = op.realpath(parent)
return path.startswith(parent)
def format_date(date):
return date.strftime("%Y-%m-%dT%H:%M:%S")
def info_for(src, topdir):
# ...it MUST not include a ".."" directory, and for files not "under" that
# directory, absolute pathnames must be used. [2]
if topdir is None or not is_parent(topdir, src):
src = op.abspath(src)
else:
src = op.relpath(src, topdir)
info = "[Trash Info]\n"
info += "Path=" + quote(src) + "\n"
info += "DeletionDate=" + format_date(datetime.now()) + "\n"
return info
def check_create(dir):
# use 0700 for paths [3]
if not op.exists(dir):
os.makedirs(dir, 0o700)
def trash_move(src, dst, topdir=None):
filename = op.basename(src)
filespath = op.join(dst, FILES_DIR)
infopath = op.join(dst, INFO_DIR)
base_name, ext = op.splitext(filename)
counter = 0
destname = filename
while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)):
counter += 1
destname = '%s %s%s' % (base_name, counter, ext)
check_create(filespath)
check_create(infopath)
try:
os.rename(src, op.join(filespath, destname))
except:
shutil.move(src, op.join(filespath, destname))
f = open(op.join(infopath, destname + INFO_SUFFIX), 'w')
f.write(info_for(src, topdir))
f.close()
def find_mount_point(path):
# Even if something's wrong, "/" is a mount point, so the loop will exit.
# Use realpath in case it's a symlink
path = op.realpath(path) # Required to avoid infinite loop
while not op.ismount(path):
path = op.split(path)[0]
return path
def find_ext_volume_global_trash(volume_root):
# from [2] Trash directories (1) check for a .Trash dir with the right
# permissions set.
trash_dir = op.join(volume_root, TOPDIR_TRASH)
if not op.exists(trash_dir):
return None
mode = os.lstat(trash_dir).st_mode
# vol/.Trash must be a directory, cannot be a symlink, and must have the
# sticky bit set.
if not op.isdir(trash_dir) or op.islink(trash_dir) or not (mode & stat.S_ISVTX):
return None
trash_dir = op.join(trash_dir, str(uid))
try:
check_create(trash_dir)
except OSError:
return None
return trash_dir
def find_ext_volume_fallback_trash(volume_root):
# from [2] Trash directories (1) create a .Trash-$uid dir.
trash_dir = op.join(volume_root, TOPDIR_FALLBACK)
# Try to make the directory, if we can't the OSError exception will escape
# be thrown out of send2trash.
check_create(trash_dir)
return trash_dir
def find_ext_volume_trash(volume_root):
trash_dir = find_ext_volume_global_trash(volume_root)
if trash_dir is None:
trash_dir = find_ext_volume_fallback_trash(volume_root)
return trash_dir
# Pull this out so it's easy to stub (to avoid stubbing lstat itself)
def get_dev(path):
return os.lstat(path).st_dev
def send2trash(path):
#if not isinstance(path, str):
# path = str(path, sys.getfilesystemencoding())
#if not op.exists(path):
# raise OSError("File not found: %s" % path)
# ...should check whether the user has the necessary permissions to delete
# it, before starting the trashing operation itself. [2]
#if not os.access(path, os.W_OK):
# raise OSError("Permission denied: %s" % path)
# if the file to be trashed is on the same device as HOMETRASH we
# want to move it there.
path_dev = get_dev(path)
# If XDG_DATA_HOME or HOMETRASH do not yet exist we need to stat the
# home directory, and these paths will be created further on if needed.
trash_dev = get_dev(op.expanduser('~'))
if path_dev == trash_dev or ( os.path.exists(XDG_DATA_HOME) and os.path.exists(HOMETRASH) ):
topdir = XDG_DATA_HOME
dest_trash = HOMETRASH
else:
topdir = find_mount_point(path)
trash_dev = get_dev(topdir)
if trash_dev != path_dev:
raise OSError("Couldn't find mount point for %s" % path)
dest_trash = find_ext_volume_trash(topdir)
trash_move(path, dest_trash, topdir)
|
stieizc/scrive
|
refs/heads/master
|
cmd/init.py
|
2
|
#!/usr/bin/env python3
from libscrive import helpers, options
from libscrive.project import Project
import sys, os
optspec = """
scrive init [-i importpath] [path]
---
i,import=: import file or directory
,origlang=: the language of original files
"""
optionParser = options.Options(optspec)
optionParser.usage()
opts, remains = optionParser.parse(sys.argv[1:])
paths = remains if remains else [os.getcwd()]
importpath = opts['i']
origlang= opts['origlang']
if importpath:
if not origlang:
raise Exception("Must give the language of imported files")
if not os.path.isabs(importpath):
importpath = os.path.join(os.getcwd(), importpath)
for p in paths:
if not os.path.isabs(p):
p = os.path.join(os.getcwd(), p)
if p == importpath:
raise Exception("Project path cannot be the same as import path")
proj = Project(p)
proj.create()
if(importpath):
proj.import_orig(importpath, origlang)
proj.init()
files = proj.get_status()
proj.add_to_cache(files[1]+files[2])
proj.commit("Initialized project {}".format(os.path.basename(p)))
|
salamb/girder
|
refs/heads/master
|
girder/models/api_key.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
from .model_base import AccessControlledModel, ValidationException
from girder.constants import AccessType, SettingKey
from girder.utility import genToken
class ApiKey(AccessControlledModel):
"""
This model represents API keys corresponding to users.
"""
def initialize(self):
self.name = 'api_key'
self.ensureIndices(('userId', 'key'))
self.exposeFields(level=AccessType.READ, fields={
'_id', 'active', 'created', 'key', 'lastUse', 'name', 'scope',
'tokenDuration', 'userId'
})
def validate(self, doc):
if doc['tokenDuration']:
doc['tokenDuration'] = float(doc['tokenDuration'])
else:
doc['tokenDuration'] = None
doc['name'] = doc['name'].strip()
doc['active'] = bool(doc.get('active', True))
if doc['scope'] is not None:
if not isinstance(doc['scope'], (list, tuple)):
raise ValidationException('Scope must be a list, or None.')
if not doc['scope']:
raise ValidationException(
'Custom scope list must not be empty.')
# Deactivating an already existing token
if '_id' in doc and not doc['active']:
self.model('token').clearForApiKey(doc)
return doc
def remove(self, doc):
# Clear tokens corresponding to this API key.
self.model('token').clearForApiKey(doc)
super(ApiKey, self).remove(doc)
def list(self, user, limit=0, offset=0, sort=None):
"""
List API keys for a given user.
:param user: The user whose keys to list.
:type user: dict
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
:rtype: iterable of API keys for the user.
"""
return self.find({
'userId': user['_id']
}, limit=limit, offset=offset, sort=sort)
def createApiKey(self, user, name, scope=None, days=None, active=True):
"""
Create a new API key for a user.
:param user: The user who owns the API key.
:type user: dict
:param name: A human readable name for the API key
:param days: The lifespan of the session in days. If not passed, uses
the database setting for cookie lifetime. Note that this is a
maximum duration; clients may request tokens with a shorter lifetime
than this value.
:type days: float or int
:param scope: Scope or list of scopes this API key grants. By default,
will grant tokens provided full access on behalf of the user.
:type scope: str, list of str, or set of str
:param active: Whether this key is active.
:returns: The API key document that was created.
"""
apiKey = {
'created': datetime.datetime.utcnow(),
'lastUse': None,
'tokenDuration': days,
'name': name,
'scope': scope,
'userId': user['_id'],
'key': genToken(40),
'active': active
}
return self.setUserAccess(
apiKey, user, level=AccessType.ADMIN, save=True)
def createToken(self, key, days=None):
"""
Create a token using an API key.
:param key: The API key (the key itself, not the full document).
:type key: str
:param days: You may request a token duration up to the token duration
of the API key itself, or pass None to use the API key duration.
:type days: float or None
"""
apiKey = self.findOne({
'key': key
})
if apiKey is None or not apiKey['active']:
raise ValidationException('Invalid API key.')
cap = apiKey['tokenDuration'] or self.model('setting').get(
SettingKey.COOKIE_LIFETIME)
days = min(float(days or cap), cap)
user = self.model('user').load(apiKey['userId'], force=True)
# Mark last used stamp
apiKey['lastUse'] = datetime.datetime.utcnow()
apiKey = self.save(apiKey)
token = self.model('token').createToken(
user=user, days=days, scope=apiKey['scope'], apiKey=apiKey)
return (user, token)
|
sfinucane/deviceutils
|
refs/heads/master
|
deviceutils/ieee488/arbitraryblock.py
|
1
|
#!/usr/bin/env python
"""
"""
from deviceutils.error import ProtocolError
class InvalidBlockFormatError(ProtocolError):
pass
class NeitherBlockNorDataError(Exception):
def __init__(self, message=''):
m = ''.join([message, '(You need to provide either a binary block, or binary data!)'])
Exception.__init__(self, m)
class ArbitraryBlock(object):
"""IEEE 488.2 general ``arbitrary`` block of binary data.
**Immutable**
```
IEEE-488.2 defines two different binary standards for file transfer:
``Definite Length Arbitrary Block`` and ``Indefinite Length Arbitrary
Block``. The first one starts with a # sign followed by one-number that
indicates the number of digits of the byte counts and then the actual
number of bytes that precedes the real binary data. The format could be
``#x1234...`` and is the most commonly used binary format in GPIB
instruments. The later starts with ``#0`` and ends with a new line
followed by EOI. It usually has the format of ``#0...LF+EOI``. Some
instrument may simply return the raw binary data and users are responsible
to find a properly way to define the buffer size for accepting the data
presented by the GPIB output buffer.
```
:source: https://docs.google.com/document/preview?hgd=1&id=11AsY2WixTCI0_1at-wT3YP9JeLwjFl7uFuNGxlHI6ec
**NOTE:**
The above source needs to be fully determined, or changed to the IEEE 488.2
standard itself!
"""
__block = None
__data = None
__block_id = None
def __init__(self, block=None, block_id=None, data=None):
"""Initialize a block instance.
When creating an instance: (a) if the binary block is provided to
the constructor, then the data is determined from the block; (b) if
the data (bytes) is provided to the constructor, then the binary
block is generated (header information, etc.).
:param: block
:type str:
- The binary block, as read from the bus (raw, bytes).
:param: block_id
- An optional block identifier, of any type.
:param: data
- The binary data, without the block header.
"""
if block:
self.__block = block
self.__data_slice = self._get_data_slice(self.__block)
elif data:
self.__block = self._create_block(data)
self.__data_slice = self._get_data_slice(self.__block)
else:
raise NeitherBlockNorDataError()
self.__block_id = block_id
def __str__(self):
return "<IEEE488_BINBLOCK>{0}</IEEE488_BINBLOCK>".format(repr(self.__block))
def __getitem__(self, index):
return self.__block[index]
def _get_data_slice(self, block):
"""Slice the meaningful binary bytes (data) from the block.
**Abstract**
:returns: Data slice, to be used on the binary block.
:type slice:
"""
raise NotImplemented("Attempted to use abstract method: '_get_data_slice'!")
def _create_block(self, data):
"""Construct a binary block with the given data as the payload.
**Abstract**
:returns: A raw binary block which contains the given payload data.
:type bytes:
"""
raise NotImplemented("Attempted to use abstract method: '_create_block'!")
@property
def raw(self):
"""The raw binary block.
"""
return self.__block
@property
def identifier(self):
"""An arbitrary means of identifying this block.
"""
return self.__block_id
@property
def data(self):
"""The payload binary data contained within the binary block.
"""
return self.raw[self.__data_slice]
class DefiniteLengthBlock(ArbitraryBlock):
"""IEEE 488.2 Definite Length Arbitrary Binary Data Block
This sort of block starts with a # sign followed by one-number that
indicates the number of digits of the byte counts and then the actual
number of bytes that precedes the real binary data. The format could be
``#x1234...`` and is the most commonly used binary format in GPIB
instruments.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# override
def _get_data_slice(self, block):
"""Slice the meaningful binary bytes (data) from the block.
:returns: Data slice, to be used on the binary block.
:type slice:
"""
# First character should be "#".
pound = block[0:1]
if pound != b'#':
raise InvalidBlockFormatError(self.identifier)
# Second character is number of following digits for length value.
length_digits = block[1:2]
data_length = block[2:int(length_digits)+2]
# from the given data length, and known header length, we get indices:
data_begin = int(length_digits) + 2 # 2 for the '#' and digit count
data_end = data_begin + int(data_length)
# Slice the data from the block:
sdata = slice(data_begin, data_end)
return sdata
# override
def _create_block(self, data):
"""Construct a binary block with the given data as the payload.
:returns: A raw binary block which contains the given payload data.
:type bytes:
"""
# format is: b'#<length_digits><length><payload>'
length = len(data)
length_digits = len(str(length))
header_string = '#' + str(length_digits) + str(length)
return bytes(header_string.encode('latin1')) + data
class IndefiniteLengthBlock(ArbitraryBlock):
"""IEEE 488.2 Indefinite Length Arbitrary Binary Data Block
This sort of block starts with ``#0`` and ends with a new line
followed by EOI. It usually has the format of ``#0...LF+EOI``.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# override
def _get_data_slice(self, block):
"""Slice the meaningful binary bytes (data) from the block.
:returns: Data slice, to be used on the binary block.
:type slice:
"""
raise NotImplemented()
# override
def _create_block(self, data):
"""Construct a binary block with the given data as the payload.
:returns: A raw binary block which contains the given payload data.
:type bytes:
"""
raise NotImplemented()
def read_definite_length_block(raw_recv, block_id=None,
recv_termination=None, recv_chunk=None):
"""Read an IEEE 488.2 definite length block, using given raw receive function.
The signature of ``raw_recv`` (the raw receive function) should be:
- ``bytes = raw_recv(nbytes)``
Where ``nbytes`` is the number of bytes to read for that call.
:param: raw_recv
:type function:
:param: block_id
:type any:
:description: An arbitrary block identifier.
:param: recv_termination
:type string/bytes:
:param: recv_chunk
:type int:
:returns: The definite length binary block.
:type DefiniteLengthBlock:
"""
receive_chunk = recv_chunk
receive_termination = recv_termination
# we are expecting an IEEE 488.2 Arbitrary Binary Block
pound = raw_recv(1)
if pound != b'#':
raise ProtocolError(
"Expected ``IEEE 488.2 Binary Block``! " +
"Read: ``{0}``. ".format(pound) +
"Remaining message data left in buffer.")
ndigits = raw_recv(1)
block_length = None
if ndigits not in [b'1', b'2', b'3', b'4',
b'5', b'6', b'7', b'8', b'9']:
raise ProtocolError(
"Expected ``IEEE 488.2 Binary Block``! " +
"Read: ``{0}{1}``. ".format(pound, ndigits) +
"Remaining message data left in buffer.")
elif ndigits in [b'0']:
block_length = b''
else:
# read the block length (ndigit-wide ascii integer)
block_length = raw_recv(int(ndigits))
data = b''
if block_length:
bytes_remaining = int(block_length)
if not receive_chunk:
receive_chunk = bytes_remaining
while bytes_remaining > 0:
reach = min(bytes_remaining, receive_chunk)
received_data = raw_recv(reach)
bytes_remaining -= len(received_data)
data += received_data
if receive_termination:
# clear trailing term chars
raw_recv(len(receive_termination))
block = pound + ndigits + block_length + data
return DefiniteLengthBlock(block=block, block_id=block_id)
|
Kitware/tangelo
|
refs/heads/develop
|
tangelo/tangelo/pkgdata/plugin/girder/control.py
|
2
|
from girder.utility.server import configureServer
def setup(config, store):
config = {"server": {"api_root": "/plugin/girder/girder/api/v1",
"static_root": "/plugin/girder/girder/static"}}
girder_app, config = configureServer(curConfig=config)
return {"apps": [(girder_app, config, "girder")]}
|
rtucker-mozilla/inventory
|
refs/heads/master
|
mozdns/validation.py
|
2
|
from django.core.exceptions import ValidationError
import string
import ipaddr
def do_zone_validation(domain):
"""Preform validation on domain. This function calls the following
functions::
check_for_soa_partition
check_for_master_delegation
validate_zone_soa
.. note::
The type of the domain that is passed is determined
dynamically
:param domain: The domain/reverse_domain being validated.
:type domain: :class:`Domain` or :class:`ReverseDomain`
The following code is an example of how to call this function during
*domain* introspection.
>>> do_zone_validation(self, self.master_domain) # noqa
The following code is an example of how to call this function during
*reverse_domain* introspection.
>>> do_zone_validation(self, self.master_reverse_domain) # noqa
"""
check_for_master_delegation(domain, domain.master_domain)
validate_zone_soa(domain, domain.master_domain)
check_for_soa_partition(domain, domain.domain_set.all())
def check_for_master_delegation(domain, master_domain):
"""No subdomains can be created under a domain that is delegated.
This function checks whether a domain is violating that condition.
:param domain: The domain/reverse_domain being validated.
:type domain: :class:`Domain` or :class:`ReverseDomain`
:param master_domain: The master domain/reverse_domain of the
domain/reverse_domain being validated.
:type master_domain: :class:`Domain` or :class:`ReverseDomain`
The following code is an example of how to call this function during
*domain* introspection.
>>> check_for_master_delegation(self, self.master_domain) # noqa
The following code is an example of how to call this function during
*reverse_domain* introspection.
>>> check_for_master_delegation(self, self.master_reverse_domain) # noqa
"""
if not master_domain:
return
if not master_domain.delegated:
return
if not domain.pk: # We don't exist yet.
raise ValidationError("No subdomains can be created in the {0} "
"domain. It is delegated."
.format(master_domain.name))
def validate_zone_soa(domain, master_domain):
"""Make sure the SOA assigned to this domain is the correct SOA for
this domain. Also make sure that the SOA is not used in a different
zone.
:param domain: The domain/reverse_domain being validated.
:type domain: :class:`Domain` or :class:`ReverseDomain`
:param master_domain: The master domain/reverse_domain of the
domain/reverse_domain being validated.
:type master_domain: :class:`Domain` or :class:`ReverseDomain`
The following code is an example of how to call this function during
*domain* introspection.
>>> validate_zone_soa('forward', self, self.master_domain) # noqa
The following code is an example of how to call this function during
*reverse_domain* introspection.
>>> validate_zone_soa('reverse', self, self.master_reverse_domain) # noqa
"""
if not domain:
raise Exception("You called this function wrong")
if not domain.soa:
return
zone_domains = domain.soa.domain_set.all()
root_domain = domain.soa.root_domain
if not root_domain: # No one is using this domain.
return
if not zone_domains.exists():
return # No zone uses this soa.
if master_domain and master_domain.soa != domain.soa:
# Someone uses this soa, make sure the domain is part of that
# zone (i.e. has a parent in the zone or is the root domain of
# the zone).
if root_domain == domain or root_domain.master_domain == domain:
return
raise ValidationError("This SOA is used for a different zone.")
if domain.master_domain is None and domain != root_domain:
if root_domain.master_domain == domain:
return
# If we are at the root of the tree and we aren't the root domain,
# something is wrong.
raise ValidationError("This SOA is used for a different zone.")
def check_for_soa_partition(domain, child_domains):
"""This function determines if changing your soa causes sub domains
to become their own zones and if those zones share a common SOA (not
allowed).
:param domain: The domain/reverse_domain being validated.
:type domain: :class:`Domain` or :class:`ReverseDomain`
:param child_domains: A Queryset containing child objects of the
:class:`Domain`/:class:`ReverseDomain` object.
:type child_domains: :class:`Domain` or :class:`ReverseDomain`
:raises: ValidationError
The following code is an example of how to call this function during
*domain* introspection.
>>> check_for_soa_partition(self, self.domain_set.all()) # noqa
The following code is an example of how to call this function during
*reverse_domain* introspection.
>>> check_for_soa_partition(self, self.reversedomain_set.all()) # noqa
"""
for i_domain in child_domains:
if i_domain.soa == domain.soa:
continue # Valid child.
for j_domain in child_domains:
# Make sure the child domain does not share an SOA with one
# of it's siblings.
if i_domain == j_domain:
continue
if i_domain.soa == j_domain.soa and i_domain.soa is not None:
raise ValidationError(
"Changing the SOA for the {0} "
"domain would cause the child domains {1} and {2} to "
"become two zones that share the same SOA. Change "
"{3} or {4}'s SOA before changing this SOA".
format(domain.name, i_domain.name, j_domain.name,
i_domain.name, j_domain.name))
def find_root_domain(soa):
"""
It is nessicary to know which domain is at the top of a zone. This
function returns that domain.
:param soa: A zone's :class:`SOA` object.
:type soa: :class:`SOA`
The following code is an example of how to call this function using
a Domain as ``domain``.
>>> find_root_domain('forward', domain.soa) # noqa
The following code is an example of how to call this function using
a ReverseDomain as ``domain``.
>>> find_root_domain('reverse', reverse_domain.soa) # noqa
"""
if soa is None:
return None
domains = soa.domain_set.all()
if domains:
key = lambda domain: len(domain.name.split('.'))
return sorted(domains, key=key)[0] # Sort by number of labels
else:
return None
###################################################################
# Functions that validate labels and names #
###################################################################
"""
MozAddressValueError
This exception is thrown when an attempt is made to create/update a
record with an invlaid IP.
InvalidRecordNameError
This exception is thrown when an attempt is made to create/update a
record with an invlaid name.
RecordExistsError
This exception is thrown when an attempt is made to create a record
that already exists. All records that can support the
unique_together constraint do so. These models will raise an
IntegretyError. Some models, ones that have to span foreign keys to
check for uniqueness, need to still raise ValidationError.
RecordExistsError will be raised in these cases.
An AddressRecord is an example of a model that raises this Exception.
"""
def validate_first_label(label, valid_chars=None):
"""This function is just :fun:`validate_label` except it is called on just
the first label. The first label *can* start with a '*' while a normal
label cannot."""
if label != '' and label[0] == '*':
if len(label) == 1:
return
else:
validate_label(label[1:])
else:
validate_label(label)
def validate_label(label, valid_chars=None):
"""Validate a label.
:param label: The label to be tested.
:type label: str
"Allowable characters in a label for a host name are only ASCII
letters, digits, and the '-' character."
"Labels may not be all numbers, but may have a leading digit"
"Labels must end and begin only with a letter or digit"
-- `RFC <http://tools.ietf.org/html/rfc1912>`__
"[T]he following characters are recommended for use in a host
name: "A-Z", "a-z", "0-9", dash and underscore"
-- `RFC <http://tools.ietf.org/html/rfc1033>`__
"""
_name_type_check(label)
if not valid_chars:
# "Allowable characters in a label for a host name are only
# ASCII letters, digits, and the `-' character." "[T]he
# following characters are recommended for use in a host name:
# "A-Z", "a-z", "0-9", dash and underscore"
valid_chars = string.ascii_letters + "0123456789" + "-" + "_"
# Labels may not be all numbers, but may have a leading digit TODO
# Labels must end and begin only with a letter or digit TODO
for char in label:
if char == '.':
raise ValidationError("Invalid name {0}. Please do not span "
"multiple domains when creating records."
.format(label))
if valid_chars.find(char) < 0:
raise ValidationError("Invalid name {0}. Character '{1}' is "
"invalid.".format(label, char))
end_chars = string.ascii_letters + "0123456789"
if (
label and
not label.endswith(tuple(end_chars)) or
# SRV records can start with '_'
not label.startswith(tuple(end_chars + '_'))
):
raise ValidationError(
"Labels must end and begin only with a letter or digit"
)
return
def validate_domain_name(name):
"""Domain names are different. They are allowed to have '_' in them.
:param name: The domain name to be tested.
:type name: str
"""
_name_type_check(name)
for label in name.split('.'):
if not label:
raise ValidationError("Error: Invalid name {0}. Empty label."
.format(name))
valid_chars = string.ascii_letters + "0123456789" + "-_"
validate_label(label, valid_chars=valid_chars)
def validate_name(fqdn):
"""Run test on a name to make sure that the new name is constructed
with valid syntax.
:param fqdn: The fqdn to be tested.
:type fqdn: str
"DNS domain names consist of "labels" separated by single dots."
-- `RFC <http://tools.ietf.org/html/rfc1912>`__
.. note::
DNS name hostname grammar::
<domain> ::= <subdomain> | " "
<subdomain> ::= <label> | <subdomain> "." <label>
<label> ::= <letter> [ [ <ldh-str> ] <let-dig> ]
<ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
<let-dig-hyp> ::= <let-dig> | "-"
<let-dig> ::= <letter> | <digit>
<letter> ::= any one of the 52 alphabetic characters A
through Z in upper case and a through z in lower case
<digit> ::= any one of the ten digits 0 through 9
--`RFC 1034 <http://www.ietf.org/rfc/rfc1034.txt>`__
"""
# TODO, make sure the grammar is followed.
_name_type_check(fqdn)
# Star records are allowed. Remove them during validation.
if fqdn[0] == '*':
fqdn = fqdn[1:]
fqdn = fqdn.strip('.')
for label in fqdn.split('.'):
if not label:
raise ValidationError("Invalid name {0}. Empty label."
.format(fqdn))
validate_label(label)
def validate_reverse_name(reverse_name, ip_type):
"""Validate a reverse name to make sure that the name is constructed
with valid syntax.
:param reverse_name: The reverse name to be tested.
:type reverse_name: str
"""
_name_type_check(reverse_name)
valid_ipv6 = "0123456789AaBbCcDdEeFf"
if ip_type == '4' and len(reverse_name.split('.')) > 4:
raise ValidationError("IPv4 reverse domains should have a "
"maximum of 4 octets")
if ip_type == '6' and len(reverse_name.split('.')) > 32:
raise ValidationError("IPv6 reverse domains should have a "
"maximum of 32 nibbles")
for nibble in reverse_name.split('.'):
if ip_type == '6':
if valid_ipv6.find(nibble) < 0:
raise ValidationError(
"Error: Invalid Ipv6 name {0}. Character '{1}' is "
"invalid." .format(reverse_name, nibble)
)
else:
if not(int(nibble) <= 255 and int(nibble) >= 0):
raise ValidationError(
"Error: Invalid Ipv4 name {0}. Character '{1}' is "
"invalid." .format(reverse_name, nibble)
)
def validate_ttl(ttl):
"""
"It is hereby specified that a TTL value is an unsigned number,
with a minimum value of 0, and a maximum value of 2147483647."
-- `RFC <http://www.ietf.org/rfc/rfc2181.txt>`__
:param ttl: The TTL to be validated.
:type ttl: int
:raises: ValidationError
"""
if ttl is None:
return
if ttl < 0 or ttl > 2147483647: # See RFC 2181
raise ValidationError("TTLs must be within the 0 to "
"2147483647 range.")
# Works for labels too.
def _name_type_check(name):
if type(name) not in (str, unicode):
raise ValidationError("Error: A name must be of type str.")
###################################################################
# Functions that Validate SRV fields #
###################################################################
def validate_srv_port(port):
"""Port must be within the 0 to 65535 range."""
if port > 65535 or port < 0:
raise ValidationError("SRV port must be within 0 and 65535. "
"See RFC 1035")
# TODO, is this a duplicate of MX ttl?
def validate_srv_priority(priority):
"""Priority must be within the 0 to 65535 range."""
if priority > 65535 or priority < 0:
raise ValidationError("SRV priority must be within 0 and 65535. "
"See RFC 1035")
def validate_srv_weight(weight):
"""Weight must be within the 0 to 65535 range."""
if weight > 65535 or weight < 0:
raise ValidationError("SRV weight must be within 0 and 65535. "
"See RFC 1035")
def validate_srv_label(srv_label):
"""This function is the same as :func:`validate_label` expect
:class:`SRV` records can have a ``_`` preceding its label.
"""
if srv_label == "" or srv_label == "*":
return
if srv_label and srv_label[0] != '_':
raise ValidationError("Error: SRV label must start with '_'")
validate_label(srv_label[1:]) # Get rid of '_'
def validate_srv_name(srv_name):
"""This function is the same as :func:`validate_name` expect
:class:`SRV` records can have a ``_`` preceding is name.
"""
if srv_name and srv_name[0] != '_' and srv_name[0] != '*':
raise ValidationError("Error: SRV label must start with '_'")
if not srv_name:
raise ValidationError("Error: SRV label must not be None")
if srv_name[0] == '*':
mod_srv_name = srv_name # Get rid of '_'
else:
mod_srv_name = srv_name[1:] # Get rid of '_'
validate_name(mod_srv_name)
def validate_srv_target(srv_target):
if srv_target == "":
return
else:
validate_name(srv_target)
###################################################################
# Functions that Validate MX fields #
###################################################################
def validate_mx_priority(priority):
"""
Priority must be within the 0 to 65535 range.
"""
# This is pretty much the same as validate_srv_priority. It just has
# a different error messege.
if priority > 65535 or priority < 0:
raise ValidationError("MX priority must be within the 0 to 65535 "
"range. See RFC 1035")
###################################################################
# Functions Validate ip_type fields #
###################################################################
def validate_ip_type(ip_type):
"""
An ``ip_type`` field must be either '4' or '6'.
"""
if ip_type not in ('4', '6'):
raise ValidationError("Error: Plase provide a valid ip type.")
###################################################################
# Functions Validate RFC1918 requirements #
###################################################################
def is_rfc1918(ip_str):
"""Returns True if the IP is private. If the IP isn't a valid IPv4 address
this function will raise a :class:`ValidationError`.
"""
private_networks = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]
try:
ip_str_network = ipaddr.IPv4Network(ip_str)
except ipaddr.AddressValueError:
raise ValidationError("{0} is not a valid IPv4 address".format(ip_str))
for network in private_networks:
if ipaddr.IPv4Network(network).overlaps(ip_str_network):
return True
return False
def is_rfc4193(ip_str):
"""Returns True if the IP is private. If the IP isn't a valid IPv6 address
this function will raise a :class:`ValidationError`.
"""
private_networks = ["fc00::/7"]
try:
ip_str_network = ipaddr.IPv6Network(ip_str)
except ipaddr.AddressValueError:
raise ValidationError("{0} is not a valid IPv6 address".format(ip_str))
for network in private_networks:
if ipaddr.IPv6Network(network).overlaps(ip_str_network):
return True
return False
def validate_views(views, ip_str, ip_type):
"""If the 'private' :class:`View` object is in ``views`` and ``ip_str`` is
in one of the RFC 1918 networks, raise a :class:`ValidationError`.
"""
if views.filter(name="public").exists():
if ip_type == '4' and is_rfc1918(ip_str):
raise ValidationError(
"{0} is a private IP address. You"
"cannot put a record that contains private data into"
"a public view.")
if ip_type == '6' and is_rfc4193(ip_str):
raise ValidationError(
"{0} is a private IP address. You"
"cannot put a record that contains private data into"
"a public view.")
def validate_view(view, ip_str, ip_type):
"""If view is the private view and ``ip_str`` is
in one of the RFC 1918 networks, raise a :class:`ValidationError`.
"""
if ip_type == '4' and is_rfc1918(ip_str):
raise ValidationError(
"{0} is a private IP address. You"
"cannot put a record that contains private data into"
"a public view.")
if ip_type == '6' and is_rfc4193(ip_str):
raise ValidationError(
"{0} is a private IP address. You"
"cannot put a record that contains private data into"
"a public view.")
def validate_txt_data(data):
if data.find('"') > -1:
raise ValidationError(
"Don't put quotes in TXT data."
)
def validate_soa_serial(serial):
if len(str(serial)) != 10:
raise ValidationError("Serial values must be 10 digits")
|
pdellaert/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/awall.py
|
50
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ted Trask <ttrask01@yahoo.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: awall
short_description: Manage awall policies
version_added: "2.4"
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
description:
- This modules allows for enable/disable/activate of I(awall) policies.
- Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
and activates the configuration on the system.
options:
name:
description:
- One or more policy names.
type: list
state:
description:
- Whether the policies should be enabled or disabled.
type: str
choices: [ disabled, enabled ]
default: enabled
activate:
description:
- Activate the new firewall rules.
- Can be run with other steps or on its own.
type: bool
default: no
'''
EXAMPLES = r'''
- name: Enable "foo" and "bar" policy
awall:
name: [ foo bar ]
state: enabled
- name: Disable "foo" and "bar" policy and activate new rules
awall:
name:
- foo
- bar
state: disabled
activate: no
- name: Activate currently enabled firewall rules
awall:
activate: yes
'''
RETURN = ''' # '''
import re
from ansible.module_utils.basic import AnsibleModule
def activate(module):
cmd = "%s activate --force" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
return True
else:
module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
def is_policy_enabled(module, name):
cmd = "%s list" % (AWALL_PATH)
rc, stdout, stderr = module.run_command(cmd)
if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
return True
return False
def enable_policy(module, names, act):
policies = []
for name in names:
if not is_policy_enabled(module, name):
policies.append(name)
if not policies:
module.exit_json(changed=False, msg="policy(ies) already enabled")
names = " ".join(policies)
if module.check_mode:
cmd = "%s list" % (AWALL_PATH)
else:
cmd = "%s enable %s" % (AWALL_PATH, names)
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
if act and not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
def disable_policy(module, names, act):
policies = []
for name in names:
if is_policy_enabled(module, name):
policies.append(name)
if not policies:
module.exit_json(changed=False, msg="policy(ies) already disabled")
names = " ".join(policies)
if module.check_mode:
cmd = "%s list" % (AWALL_PATH)
else:
cmd = "%s disable %s" % (AWALL_PATH, names)
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
if act and not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
name=dict(type='list'),
activate=dict(type='bool', default=False),
),
required_one_of=[['name', 'activate']],
supports_check_mode=True,
)
global AWALL_PATH
AWALL_PATH = module.get_bin_path('awall', required=True)
p = module.params
if p['name']:
if p['state'] == 'enabled':
enable_policy(module, p['name'], p['activate'])
elif p['state'] == 'disabled':
disable_policy(module, p['name'], p['activate'])
if p['activate']:
if not module.check_mode:
activate(module)
module.exit_json(changed=True, msg="activated awall rules")
module.fail_json(msg="no action defined")
if __name__ == '__main__':
main()
|
jeffzhengye/pylearn
|
refs/heads/master
|
IPC.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 15:53:02 2012
@author: zheng
"""
import sys
import socket
a =''
print(a + "a+")
class IPC(object):
def __init__(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(("localhost", 32000))
self.fid = self.s.makefile() # file wrapper to read lines
self.listenLoop() # wait listening for updates from server
def listenLoop(self):
fid = self.fid
print "connected"
while True:
while True:
line = fid.readline()
if line[0]=='.':
break
fid.write('.\n')
fid.flush()
if __name__ == '__main__':
st = IPC()
|
Don42/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/drbonanza.py
|
109
|
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class DRBonanzaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/bonanza/(?:[^/]+/)+(?:[^/])+?(?:assetId=(?P<id>\d+))?(?:[#&]|$)'
_TESTS = [{
'url': 'http://www.dr.dk/bonanza/serie/portraetter/Talkshowet.htm?assetId=65517',
'info_dict': {
'id': '65517',
'ext': 'mp4',
'title': 'Talkshowet - Leonard Cohen',
'description': 'md5:8f34194fb30cd8c8a30ad8b27b70c0ca',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
'timestamp': 1295537932,
'upload_date': '20110120',
'duration': 3664,
},
'params': {
'skip_download': True, # requires rtmp
},
}, {
'url': 'http://www.dr.dk/bonanza/radio/serie/sport/fodbold.htm?assetId=59410',
'md5': '6dfe039417e76795fb783c52da3de11d',
'info_dict': {
'id': '59410',
'ext': 'mp3',
'title': 'EM fodbold 1992 Danmark - Tyskland finale Transmission',
'description': 'md5:501e5a195749480552e214fbbed16c4e',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
'timestamp': 1223274900,
'upload_date': '20081006',
'duration': 7369,
},
}]
def _real_extract(self, url):
url_id = self._match_id(url)
webpage = self._download_webpage(url, url_id)
if url_id:
info = json.loads(self._html_search_regex(r'({.*?%s.*})' % url_id, webpage, 'json'))
else:
# Just fetch the first video on that page
info = json.loads(self._html_search_regex(r'bonanzaFunctions.newPlaylist\(({.*})\)', webpage, 'json'))
asset_id = str(info['AssetId'])
title = info['Title'].rstrip(' \'\"-,.:;!?')
duration = int_or_none(info.get('Duration'), scale=1000)
# First published online. "FirstPublished" contains the date for original airing.
timestamp = parse_iso8601(
re.sub(r'\.\d+$', '', info['Created']))
def parse_filename_info(url):
match = re.search(r'/\d+_(?P<width>\d+)x(?P<height>\d+)x(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
if match:
return {
'width': int(match.group('width')),
'height': int(match.group('height')),
'vbr': int(match.group('bitrate')),
'ext': match.group('ext')
}
match = re.search(r'/\d+_(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
if match:
return {
'vbr': int(match.group('bitrate')),
'ext': match.group(2)
}
return {}
video_types = ['VideoHigh', 'VideoMid', 'VideoLow']
preferencemap = {
'VideoHigh': -1,
'VideoMid': -2,
'VideoLow': -3,
'Audio': -4,
}
formats = []
for file in info['Files']:
if info['Type'] == "Video":
if file['Type'] in video_types:
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
'format_id': file['Type'].replace('Video', ''),
'preference': preferencemap.get(file['Type'], -10),
})
if format['url'].startswith('rtmp'):
rtmp_url = format['url']
format['rtmp_live'] = True # --resume does not work
if '/bonanza/' in rtmp_url:
format['play_path'] = rtmp_url.split('/bonanza/')[1]
formats.append(format)
elif file['Type'] == "Thumb":
thumbnail = file['Location']
elif info['Type'] == "Audio":
if file['Type'] == "Audio":
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
'format_id': file['Type'],
'vcodec': 'none',
})
formats.append(format)
elif file['Type'] == "Thumb":
thumbnail = file['Location']
description = '%s\n%s\n%s\n' % (
info['Description'], info['Actors'], info['Colophon'])
self._sort_formats(formats)
display_id = re.sub(r'[^\w\d-]', '', re.sub(r' ', '-', title.lower())) + '-' + asset_id
display_id = re.sub(r'-+', '-', display_id)
return {
'id': asset_id,
'display_id': display_id,
'title': title,
'formats': formats,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
}
|
jazkarta/edx-platform-for-isc
|
refs/heads/backport-auto-certification
|
common/djangoapps/student/tests/test_create_account.py
|
1
|
"Tests for account creation"
import ddt
import unittest
from django.contrib.auth import get_user
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from django.utils.importlib import import_module
from django.test import TestCase, TransactionTestCase
import mock
from openedx.core.djangoapps.user_api.models import UserPreference
from lang_pref import LANGUAGE_KEY
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
import student
TEST_CS_URL = 'https://comments.service.test:123/'
@ddt.ddt
class TestCreateAccount(TestCase):
"Tests for account creation"
def setUp(self):
self.username = "test_user"
self.url = reverse("create_account")
self.request_factory = RequestFactory()
self.params = {
"username": self.username,
"email": "test@example.org",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
@ddt.data("en", "eo")
def test_default_lang_pref_saved(self, lang):
with mock.patch("django.conf.settings.LANGUAGE_CODE", lang):
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang)
@ddt.data("en", "eo")
def test_header_lang_pref_saved(self, lang):
response = self.client.post(self.url, self.params, HTTP_ACCEPT_LANGUAGE=lang)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang)
def base_extauth_bypass_sending_activation_email(self, bypass_activation_email_for_extauth_setting):
"""
Tests user creation without sending activation email when
doing external auth
"""
request = self.request_factory.post(self.url, self.params)
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='withmap@stanford.edu',
internal_password=self.params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
mako_middleware_process_request(request)
with mock.patch('django.contrib.auth.models.User.email_user') as mock_send_mail:
student.views.create_account(request)
# check that send_mail is called
if bypass_activation_email_for_extauth_setting:
self.assertFalse(mock_send_mail.called)
else:
self.assertTrue(mock_send_mail.called)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'AUTOMATIC_AUTH_FOR_TESTING': False})
def test_extauth_bypass_sending_activation_email_with_bypass(self):
"""
Tests user creation without sending activation email when
settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=True and doing external auth
"""
self.base_extauth_bypass_sending_activation_email(True)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': False, 'AUTOMATIC_AUTH_FOR_TESTING': False})
def test_extauth_bypass_sending_activation_email_without_bypass(self):
"""
Tests user creation without sending activation email when
settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=False and doing external auth
"""
self.base_extauth_bypass_sending_activation_email(False)
def test_not_logged_in_after_create(self):
"""
Test user not automatically logged in after user creation
"""
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
user = get_user(self.client)
self.assertTrue(user.is_anonymous())
@mock.patch.dict("student.models.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@mock.patch("lms.lib.comment_client.User.base_url", TEST_CS_URL)
@mock.patch("lms.lib.comment_client.utils.requests.request", return_value=mock.Mock(status_code=200, text='{}'))
class TestCreateCommentsServiceUser(TransactionTestCase):
def setUp(self):
self.username = "test_user"
self.url = reverse("create_account")
self.params = {
"username": self.username,
"email": "test@example.org",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
def test_cs_user_created(self, request):
"If user account creation succeeds, we should create a comments service user"
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertTrue(request.called)
args, kwargs = request.call_args
self.assertEqual(args[0], 'put')
self.assertTrue(args[1].startswith(TEST_CS_URL))
self.assertEqual(kwargs['data']['username'], self.params['username'])
@mock.patch("student.models.Registration.register", side_effect=Exception)
def test_cs_user_not_created(self, register, request):
"If user account creation fails, we should not create a comments service user"
try:
response = self.client.post(self.url, self.params)
except:
pass
with self.assertRaises(User.DoesNotExist):
User.objects.get(username=self.username)
self.assertTrue(register.called)
self.assertFalse(request.called)
|
yarda/dslib
|
refs/heads/master
|
tests.py
|
1
|
# encoding: utf-8
#* dslib - Python library for Datove schranky
#* Copyright (C) 2009-2012 CZ.NIC, z.s.p.o. (http://www.nic.cz)
#*
#* This library is free software; you can redistribute it and/or
#* modify it under the terms of the GNU Library General Public
#* License as published by the Free Software Foundation; either
#* version 2 of the License, or (at your option) any later version.
#*
#* This library is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#* Library General Public License for more details.
#*
#* You should have received a copy of the GNU Library General Public
#* License along with this library; if not, write to the Free
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#*
import sys
sys.path.insert(0, "../")
from dslib.client import Client
from dslib.certs.cert_manager import CertificateManager
from dslib import models
def active(f):
"""decorator to activate a test"""
f.active = True
return f
# ============================== Tests start here ==============================
@active
def GetListOfSentMessages():
template = "%(dmID)-8s %(dmSender)-20s %(dmRecipient)-20s %(dmAnnotation)-20s %(dmDeliveryTime)-20s"
heading = {"dmID":"ID",
"dmSender":"Sender",
"dmRecipient":"Recipient",
"dmDeliveryTime":"DeliveryTime",
"dmAnnotation":"Annotation",
}
print (template % heading).encode("utf-8")
print "------------------------------------------------------------------------------------"
reply = ds_client.GetListOfSentMessages()
print reply
for message in ds_client.GetListOfSentMessages().data:
print (template % (message.__dict__)).encode("utf-8")
@active
def GetListOfReceivedMessages():
template = "%(dmID)-8s %(dmSender)-20s %(dmRecipient)-20s %(dmAnnotation)-20s %(dmDeliveryTime)-20s"
heading = {"dmSender":"Sender",
"dmRecipient":"Recipient",
"dmDeliveryTime":"DeliveryTime",
"dmAnnotation":"Annotation",
"dmID":"ID",
}
print (template % heading).encode("utf-8")
print "------------------------------------------------------------------------------------"
for message in ds_client.GetListOfReceivedMessages().data:
print (template % (message.__dict__)).encode("utf-8")
@active
def MessageDownload():
for envelope in ds_client.GetListOfReceivedMessages().data:
message = ds_client.MessageDownload(envelope.dmID).data
print "dmID:", message.dmID
print "dmSender:", message.dmSender.encode('utf-8')
print "dmAnnotation:", message.dmAnnotation.encode('utf-8')
print "dmDeliveryTime:", type(message.dmDeliveryTime), message.dmDeliveryTime
print "Attachments:"
for f in message.dmFiles:
print " '%s' saved" % f.save_file("./")
break # just the first one
@active
def MessageEnvelopeDownload():
for envelope in ds_client.GetListOfReceivedMessages().data:
message = ds_client.MessageEnvelopeDownload(envelope.dmID).data
print "dmID:", message.dmID
print "dmSender:", message.dmSender.encode('utf-8')
print "dmAnnotation:", message.dmAnnotation.encode('utf-8')
@active
def GetDeliveryInfo():
for envelope in ds_client.GetListOfSentMessages().data:
message = ds_client.GetDeliveryInfo(envelope.dmID).data
print "dmID:", message.dmID
print "dmSender:", message.dmSender.encode('utf-8')
print "dmAnnotation:", message.dmAnnotation.encode('utf-8')
print "dmHash:", message.dmHash
for event in message.dmEvents:
print event
print "----------------------------------------"
@active
def DummyOperation():
print "Should be None None"
reply = ds_client.DummyOperation()
print "Actually is", reply.status, reply.data
@active
def FindDataBox():
# part 1
info = models.dbOwnerInfo()
info.dbType = "OVM"
info.firmName = u"Město Kladno"
info.adZipCode = "27201"
reply = ds_client.FindDataBox(info)
print " * Should find one hit for Kladno"
print (u"%-10s %-20s %-20s %-20s" % ("ID","Name","Street","City")).encode("utf-8")
for owner in reply.data:
print (u"%-10s %-20s %-20s %-20s" % (owner.dbID, owner.firmName, owner.adStreet, owner.adCity)).encode('utf-8')
# part 2
info = models.dbOwnerInfo()
info.dbType = "OVM"
info.firmName = u"Město"
reply = ds_client.FindDataBox(info)
print
print " * Now much more hits starting with 'Město'"
print (u"%-10s %-30s %-20s %-20s" % ("ID","Name","Street","City")).encode("utf-8")
for owner in reply.data:
print (u"%-10s %-30s %-20s %-20s" % (owner.dbID, owner.firmName, owner.adStreet, owner.adCity)).encode('utf-8')
# part 3
info = models.dbOwnerInfo()
info.dbType = "OVM"
info.firmName = u"Prase"
reply = ds_client.FindDataBox(info)
print
print " * Should not find anything for 'Prase'"
print "Result:", reply.data
# part 4
info = models.dbOwnerInfo()
info.dbType = "OVM"
info.dbID = u"hjyaavk"
reply = ds_client.FindDataBox(info)
print
print " * Searching using dbID - should find Milotice only"
print (u"%-10s %-30s %-20s %-20s" % ("ID","Name","Street","City")).encode("utf-8")
for owner in reply.data:
print (u"%-10s %-30s %-20s %-20s" % (owner.dbID, owner.firmName, owner.adStreet, owner.adCity)).encode('utf-8')
# part 5
info = models.dbOwnerInfo()
info.dbType = "OVM"
info.ic = u"00282651"
reply = ds_client.FindDataBox(info)
print
print " * Searching using IC - should find Slapanice"
print (u"%-10s %-30s %-20s %-20s" % ("ID","Name","Street","City")).encode("utf-8")
for owner in reply.data:
print (u"%-10s %-30s %-20s %-20s" % (owner.dbID, owner.firmName, owner.adStreet, owner.adCity)).encode('utf-8')
@active
def CreateMessage():
envelope = models.dmEnvelope()
envelope.dbIDRecipient = "hjyaavk"
envelope.dmAnnotation = "tohle je dalsi pokus posilany z pythonu"
dmfile = models.dmFile()
dmfile._dmMimeType = "text/plain"
dmfile._dmFileMetaType = "main"
dmfile._dmFileDescr = "prilozeny_soubor.txt"
import base64
dmfile.dmEncodedContent = base64.standard_b64encode("tohle je pokusny text v pokusne priloze")
dmfiles = [dmfile]
reply = ds_client.CreateMessage(envelope, dmfiles)
print reply.status
print "Message ID is:", reply.data
@active
def GetOwnerInfoFromLogin():
reply = ds_client.GetOwnerInfoFromLogin()
print reply.status
print reply.data
@active
def GetUserInfoFromLogin():
reply = ds_client.GetUserInfoFromLogin()
print reply.status
print reply.data
@active
def SignedMessageDownload():
for envelope in ds_client.GetListOfReceivedMessages().data:
print "ID:", envelope.dmID
reply = ds_client.SignedMessageDownload(envelope.dmID)
print reply.status
print "ID matches:", reply.data.dmID, reply.data.dmID == envelope.dmID
print "Verified message: %s" % reply.data.is_message_verified()
print "Verified certificate: %s" % reply.data.pkcs7_data.certificates[0].is_verified()
print reply.data.check_timestamp()
break
@active
def SignedSentMessageDownload():
for envelope in ds_client.GetListOfSentMessages().data:
print "ID:", envelope.dmID
reply = ds_client.SignedSentMessageDownload(envelope.dmID)
print reply.status
print "ID matches:", reply.data.dmID, reply.data.dmID == envelope.dmID
print "Verified message: %s" % reply.data.is_message_verified()
print "Verified certificate: %s" % reply.data.pkcs7_data.certificates[0].is_verified()
print "Attachments:"
for f in reply.data.dmFiles:
print " Attachment '%s'" % f._dmFileDescr
break # just the first one
@active
def GetSignedDeliveryInfo():
for envelope in ds_client.GetListOfSentMessages().data:
print type(envelope)
print "ID:", envelope.dmID
reply = ds_client.GetSignedDeliveryInfo(envelope.dmID)
print reply.status
print reply.data
print "ID matches:", reply.data.dmID, reply.data.dmID == envelope.dmID
print "Verified message: %s" % reply.data.is_verified
print "Verified certificate: %s" % reply.data.pkcs7_data.certificates[0].is_verified()
print "Timestamp verified: %s" % reply.data.check_timestamp()
for event in reply.data.dmEvents:
print " Event", event
f = file("dodejka.zfo","w")
import base64
f.write(base64.b64decode(reply.additional_data['raw_data']))
f.close()
break
@active
def GetPasswordInfo():
reply = ds_client.GetPasswordInfo()
print "Password expires: %s" %reply.data
@active
def ChangeISDSPassword():
import getpass
old_pass = getpass.getpass("Current password:")
new_pass = getpass.getpass("New password:")
reply = ds_client.ChangeISDSPassword(old_pass, new_pass)
print "%s : %s"% (reply.status.dbStatusCode, reply.status.dbStatusMessage)
@active
def AuthenticateMessage():
import base64
print "This should return None"
reply = ds_client.AuthenticateMessage(base64.b64encode("Hello DS"))
print "Actually is", reply.data
print "-----------------------------------------------"
print "This should complete successfully"
import local
test_dir = local.find_data_directory("test_msgs")
f = file(os.path.join(test_dir,"AuthenticateMessage-test.txt"),"r")
text = f.read()
f.close()
reply = ds_client.AuthenticateMessage(text)
print "Actually is", reply.status
print "Message verified successfully:", reply.data
@active
def MarkMessageAsDownloaded():
i = 0
for envelope in ds_client.GetListOfReceivedMessages().data:
print "ID:", envelope.dmID
reply = ds_client.MarkMessageAsDownloaded(envelope.dmID)
print reply.status
print reply.data
i += 1
if i > 2:
break
@active
def ConfirmDelivery():
for envelope in ds_client.GetListOfReceivedMessages().data:
print "*ID:", envelope.dmID, envelope._dmType, envelope.dmMessageStatus
if envelope._dmType == "K" and envelope.dmMessageStatus == 4:
reply = ds_client.ConfirmDelivery(envelope.dmID)
print reply.status
print reply.data
break
if __name__ == "__main__":
import logging
#logging.basicConfig(level=logging.DEBUG)
#logging.getLogger('suds').setLevel(logging.DEBUG)
def otp_callback(last_problem=None):
if last_problem:
print last_problem
x = raw_input("Generated code: ")
return x
def list_tests(tests):
print "Available tests:"
for i,test in enumerate(tests):
print " %2d. %s" % (i, test.__name__)
print
# get list of tests
import sys, inspect
tests = []
for name, f in inspect.getmembers(sys.modules[__name__], inspect.isfunction):
if hasattr(f, "active") and f.active:
tests.append(f)
# parse options
from optparse import OptionParser
import os
op = OptionParser(usage="""python %prog [options] [username] test+\n
username - the login name do DS - not given when certificate login is used
test - either a number or name of a test or 'ALL'""")
op.add_option( "-t", action="store_true",
dest="test_account", default=False,
help="the account is a test account, not a standard one.")
op.add_option( "-k", action="store",
dest="keyfile", default=None,
help="Client private key file to use for 'certificate' or \
'user_certificate' login methods.")
op.add_option( "-c", action="store",
dest="certfile", default=None,
help="Client certificate file to use for 'certificate' or \
'user_certificate' login methods.")
op.add_option( "-P", action="store",
dest="p12file", default=None,
help="Client certificate and key in a PKCS12 file\
to use for 'certificate' or 'user_certificate' login methods.")
op.add_option( "-p", action="store",
dest="proxy", default="",
help="address of HTTP proxy to be used\
(use 'SYSTEM' for default system setting).")
op.add_option( "-m", action="store",
dest="login_method", default="",
help="login method to use - defaults to 'username' or to \
'user_certificate' (when -P or -k and -c is given). \
Possible values are 'username', 'certificate' and 'user_certificate'.")
(options, args) = op.parse_args()
# select the login_method
if options.p12file or (options.keyfile and options.certfile):
login_method = options.login_method or 'user_certificate'
else:
login_method = options.login_method or 'username'
if login_method == "certificate":
username = None
args = args[:]
else:
if len(args) < 1:
list_tests(tests)
op.error("Too few arguments - when certificates are not given,\
username must be present.")
else:
username = args[0]
args = args[1:]
# setup proxy
proxy = options.proxy
if proxy == "SYSTEM":
proxy = -1
from dslib.network import ProxyManager
ProxyManager.HTTPS_PROXY.set_uri(proxy)
# read the tests
to_run = []
if 'ALL' in args:
to_run = tests
else:
for test_name in args:
if test_name.isdigit():
test_id = int(test_name)
if test_id < len(tests):
to_run.append(tests[test_id])
else:
sys.stderr.write("Test %d does not exist!\n" % test_id)
else:
for test in tests:
if test.__name__ == test_name:
to_run.append(test)
break
else:
sys.stderr.write("Test '%s' does not exist!\n" % test_name)
# run the tests
if to_run:
# setup the client argument and attributes
import local
cert_dir = local.find_data_directory("trusted_certificates")
args = dict(test_environment=options.test_account,
server_certs=os.path.join(cert_dir, "all_trusted.pem"),
login_method=login_method
)
# process specific things
if login_method in ("certificate", "user_certificate"):
if options.p12file:
# PKCS12 file certificate and key storage
import OpenSSL
f = file(options.p12file, 'rb')
p12text = f.read()
f.close()
import getpass
password = getpass.getpass("Enter PKSC12 file password:")
try:
p12obj = OpenSSL.crypto.load_pkcs12(p12text, password)
except OpenSSL.crypto.Error, e:
a = e.args
if type(a) in (list,tuple) and type(a[0]) in (list,tuple) and \
type(a[0][0]) in (list,tuple) and e.args[0][0][2] == 'mac verify failure':
print "Wrong password! Exiting."
sys.exit()
except Exception, e:
print "Error:", e
sys.exit()
login_method = options.login_method or 'user_certificate'
args.update(client_certobj=p12obj.get_certificate(),
client_keyobj=p12obj.get_privatekey())
elif options.keyfile and options.certfile:
# PEM file certificate and key storage
args.update(client_certfile=options.certfile,
client_keyfile=options.keyfile)
else:
# no certificates were given
sys.stderr.write("For login method '%s' certificate (either -P or -k \
and -c) is needed!\n" % login_method)
sys.exit(101)
if login_method in ("username", "user_certificate", "hotp", "totp"):
# username and password login
# try to find a stored password
passfile = "./.isds_password"
if os.path.exists(passfile):
print "Using password from '%s'" % passfile
password = file(passfile,'r').read().strip()
else:
import getpass
password = getpass.getpass("Enter login password:")
args.update(login=username, password=password)
if login_method in ("hotp", "totp"):
args.update(otp_callback=otp_callback)
CertificateManager.read_trusted_certificates_from_dir("trusted_certificates")
# create the client
ds_client = Client(**args)
# run the tests
for test in to_run:
print "==================== %s ====================" % test.__name__
# if testing password change, pass current password
test()
print "==================== end of %s ====================" % test.__name__
print
print
ds_client.logout_from_server()
else:
list_tests(tests)
print op.get_usage()
|
vlachoudis/sl4a
|
refs/heads/master
|
python/src/Demo/curses/ncurses.py
|
32
|
#!/usr/bin/env python
#
# $Id: ncurses.py 66424 2008-09-13 01:22:08Z andrew.kuchling $
#
# (n)curses exerciser in Python, an interactive test for the curses
# module. Currently, only the panel demos are ported.
import curses
from curses import panel
def wGetchar(win = None):
if win is None: win = stdscr
return win.getch()
def Getchar():
wGetchar()
#
# Panels tester
#
def wait_a_while():
if nap_msec == 1:
Getchar()
else:
curses.napms(nap_msec)
def saywhat(text):
stdscr.move(curses.LINES - 1, 0)
stdscr.clrtoeol()
stdscr.addstr(text)
def mkpanel(color, rows, cols, tly, tlx):
win = curses.newwin(rows, cols, tly, tlx)
pan = panel.new_panel(win)
if curses.has_colors():
if color == curses.COLOR_BLUE:
fg = curses.COLOR_WHITE
else:
fg = curses.COLOR_BLACK
bg = color
curses.init_pair(color, fg, bg)
win.bkgdset(ord(' '), curses.color_pair(color))
else:
win.bkgdset(ord(' '), curses.A_BOLD)
return pan
def pflush():
panel.update_panels()
curses.doupdate()
def fill_panel(pan):
win = pan.window()
num = pan.userptr()[1]
win.move(1, 1)
win.addstr("-pan%c-" % num)
win.clrtoeol()
win.box()
maxy, maxx = win.getmaxyx()
for y in range(2, maxy - 1):
for x in range(1, maxx - 1):
win.move(y, x)
win.addch(num)
def demo_panels(win):
global stdscr, nap_msec, mod
stdscr = win
nap_msec = 1
mod = ["test", "TEST", "(**)", "*()*", "<-->", "LAST"]
stdscr.refresh()
for y in range(0, curses.LINES - 1):
for x in range(0, curses.COLS):
stdscr.addstr("%d" % ((y + x) % 10))
for y in range(0, 1):
p1 = mkpanel(curses.COLOR_RED,
curses.LINES // 2 - 2,
curses.COLS // 8 + 1,
0,
0)
p1.set_userptr("p1")
p2 = mkpanel(curses.COLOR_GREEN,
curses.LINES // 2 + 1,
curses.COLS // 7,
curses.LINES // 4,
curses.COLS // 10)
p2.set_userptr("p2")
p3 = mkpanel(curses.COLOR_YELLOW,
curses.LINES // 4,
curses.COLS // 10,
curses.LINES // 2,
curses.COLS // 9)
p3.set_userptr("p3")
p4 = mkpanel(curses.COLOR_BLUE,
curses.LINES // 2 - 2,
curses.COLS // 8,
curses.LINES // 2 - 2,
curses.COLS // 3)
p4.set_userptr("p4")
p5 = mkpanel(curses.COLOR_MAGENTA,
curses.LINES // 2 - 2,
curses.COLS // 8,
curses.LINES // 2,
curses.COLS // 2 - 2)
p5.set_userptr("p5")
fill_panel(p1)
fill_panel(p2)
fill_panel(p3)
fill_panel(p4)
fill_panel(p5)
p4.hide()
p5.hide()
pflush()
saywhat("press any key to continue")
wait_a_while()
saywhat("h3 s1 s2 s4 s5;press any key to continue")
p1.move(0, 0)
p3.hide()
p1.show()
p2.show()
p4.show()
p5.show()
pflush()
wait_a_while()
saywhat("s1; press any key to continue")
p1.show()
pflush()
wait_a_while()
saywhat("s2; press any key to continue")
p2.show()
pflush()
wait_a_while()
saywhat("m2; press any key to continue")
p2.move(curses.LINES // 3 + 1, curses.COLS // 8)
pflush()
wait_a_while()
saywhat("s3; press any key to continue")
p3.show()
pflush()
wait_a_while()
saywhat("m3; press any key to continue")
p3.move(curses.LINES // 4 + 1, curses.COLS // 15)
pflush()
wait_a_while()
saywhat("b3; press any key to continue")
p3.bottom()
pflush()
wait_a_while()
saywhat("s4; press any key to continue")
p4.show()
pflush()
wait_a_while()
saywhat("s5; press any key to continue")
p5.show()
pflush()
wait_a_while()
saywhat("t3; press any key to continue")
p3.top()
pflush()
wait_a_while()
saywhat("t1; press any key to continue")
p1.show()
pflush()
wait_a_while()
saywhat("t2; press any key to continue")
p2.show()
pflush()
wait_a_while()
saywhat("t3; press any key to continue")
p3.show()
pflush()
wait_a_while()
saywhat("t4; press any key to continue")
p4.show()
pflush()
wait_a_while()
for itmp in range(0, 6):
w4 = p4.window()
w5 = p5.window()
saywhat("m4; press any key to continue")
w4.move(curses.LINES // 8, 1)
w4.addstr(mod[itmp])
p4.move(curses.LINES // 6, itmp * curses.COLS // 8)
w5.move(curses.LINES // 6, 1)
w5.addstr(mod[itmp])
pflush()
wait_a_while()
saywhat("m5; press any key to continue")
w4.move(curses.LINES // 6, 1)
w4.addstr(mod[itmp])
p5.move(curses.LINES // 3 - 1, itmp * 10 + 6)
w5.move(curses.LINES // 8, 1)
w5.addstr(mod[itmp])
pflush()
wait_a_while()
saywhat("m4; press any key to continue")
p4.move(curses.LINES // 6, (itmp + 1) * curses.COLS // 8)
pflush()
wait_a_while()
saywhat("t5; press any key to continue")
p5.top()
pflush()
wait_a_while()
saywhat("t2; press any key to continue")
p2.top()
pflush()
wait_a_while()
saywhat("t1; press any key to continue")
p1.top()
pflush()
wait_a_while()
saywhat("d2; press any key to continue")
del p2
pflush()
wait_a_while()
saywhat("h3; press any key to continue")
p3.hide()
pflush()
wait_a_while()
saywhat("d1; press any key to continue")
del p1
pflush()
wait_a_while()
saywhat("d4; press any key to continue")
del p4
pflush()
wait_a_while()
saywhat("d5; press any key to continue")
del p5
pflush()
wait_a_while()
if nap_msec == 1:
break
nap_msec = 100
#
# one fine day there'll be the menu at this place
#
curses.wrapper(demo_panels)
|
mcdaniel67/sympy
|
refs/heads/master
|
examples/advanced/dense_coding_example.py
|
113
|
#!/usr/bin/env python
"""Demonstration of quantum dense coding."""
from sympy import sqrt, pprint
from sympy.physics.quantum import qapply
from sympy.physics.quantum.gate import H, X, Z, CNOT
from sympy.physics.quantum.qubit import Qubit
from sympy.physics.quantum.circuitplot import circuit_plot
from sympy.physics.quantum.grover import superposition_basis
def main():
psi = superposition_basis(2)
psi
# Dense coding demo:
# Assume Alice has the left QBit in psi
print("An even superposition of 2 qubits. Assume Alice has the left QBit.")
pprint(psi)
# The corresponding gates applied to Alice's QBit are:
# Identity Gate (1), Not Gate (X), Z Gate (Z), Z Gate and Not Gate (ZX)
# Then there's the controlled not gate (with Alice's as control):CNOT(1, 0)
# And the Hadamard gate applied to Alice's Qbit: H(1)
# To Send Bob the message |0>|0>
print("To Send Bob the message |00>.")
circuit = H(1)*CNOT(1, 0)
result = qapply(circuit*psi)
result
pprint(result)
# To send Bob the message |0>|1>
print("To Send Bob the message |01>.")
circuit = H(1)*CNOT(1, 0)*X(1)
result = qapply(circuit*psi)
result
pprint(result)
# To send Bob the message |1>|0>
print("To Send Bob the message |10>.")
circuit = H(1)*CNOT(1, 0)*Z(1)
result = qapply(circuit*psi)
result
pprint(result)
# To send Bob the message |1>|1>
print("To Send Bob the message |11>.")
circuit = H(1)*CNOT(1, 0)*Z(1)*X(1)
result = qapply(circuit*psi)
result
pprint(result)
if __name__ == "__main__":
main()
|
ol-loginov/intellij-community
|
refs/heads/master
|
python/helpers/pydev/pydevd_attach_to_process/linux/gdb_threads_settrace.py
|
82
|
# This file is meant to be run inside GDB as a command after
# the attach_linux.so dll has already been loaded to settrace for all threads.
if __name__ == '__main__':
#print('Startup GDB in Python!')
try:
show_debug_info = 0
is_debug = 0
for t in list(gdb.selected_inferior().threads()):
t.switch()
if t.is_stopped():
#print('Will settrace in: %s' % (t,))
gdb.execute("call SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug))
except:
import traceback;traceback.print_exc()
|
dignan/control
|
refs/heads/master
|
plugins/jamendo/jamendo/JamendoSource.py
|
1
|
# -*- coding: utf-8 -*-
# JamendoSource.py
#
# Copyright (C) 2007 - Guillaume Desmottes
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Parts from "Magnatune Rhythmbox plugin" (stolen from rhythmbox's MagnatuneSource.py)
# Copyright (C), 2006 Adam Zimmerman <adam_zimmerman@sfu.ca>
import rb, rhythmdb
from JamendoSaxHandler import JamendoSaxHandler
import JamendoConfigureDialog
import os
import gobject
import gtk
import gnome, gconf
import xml
import gzip
import datetime
# URIs
jamendo_song_info_uri = "http://img.jamendo.com/data/dbdump_artistalbumtrack.xml.gz"
mp32_uri = "http://api.jamendo.com/get2/bittorrent/file/plain/?type=archive&class=mp32&album_id="
ogg3_uri = "http://api.jamendo.com/get2/bittorrent/file/plain/?type=archive&class=ogg3&album_id="
# MP3s for streaming : http://api.jamendo.com/get2/stream/track/redirect/?id={TRACKID}&streamencoding=mp31
# OGGs for streaming : http://api.jamendo.com/get2/stream/track/redirect/?id={TRACKID}&streamencoding=ogg2
# .torrent file for download (MP3 archive) : http://api.jamendo.com/get2/bittorrent/file/plain/?album_id={ALBUMID}&type=archive&class=mp32
# .torrent file for download (OGG archive) : http://api.jamendo.com/get2/bittorrent/file/plain/?album_id={ALBUMID}&type=archive&class=ogg3
# Album Covers are available here: http://api.jamendo.com/get2/image/album/redirect/?id={ALBUMID}&imagesize={100-600}
artwork_url = "http://api.jamendo.com/get2/image/album/redirect/?id=%s&imagesize=200"
artist_url = "http://www.jamendo.com/get/artist/id/album/page/plain/"
class JamendoSource(rb.BrowserSource):
__gproperties__ = {
'plugin': (rb.Plugin, 'plugin', 'plugin', gobject.PARAM_WRITABLE|gobject.PARAM_CONSTRUCT_ONLY),
}
def __init__(self):
rb.BrowserSource.__init__(self, name=_("Jamendo"))
# catalogue stuff
self.__db = None
self.__saxHandler = None
self.__activated = False
self.__notify_id = 0
self.__update_id = 0
self.__info_screen = None
self.__updating = True
self.__load_current_size = 0
self.__load_total_size = 0
self.__db_load_finished = False
self.__catalogue_loader = None
self.__catalogue_check = None
self.__jamendo_dir = rb.find_user_cache_file("jamendo")
if os.path.exists(self.__jamendo_dir) is False:
os.makedirs(self.__jamendo_dir, 0700)
self.__local_catalogue_path = os.path.join(self.__jamendo_dir, "dbdump.xml")
self.__local_catalogue_temp = os.path.join(self.__jamendo_dir, "dbdump.xml.tmp")
def do_set_property(self, property, value):
if property.name == 'plugin':
self.__plugin = value
else:
raise AttributeError, 'unknown property %s' % property.name
def do_impl_get_browser_key (self):
return "/apps/rhythmbox/plugins/jamendo/show_browser"
def do_impl_get_paned_key (self):
return "/apps/rhythmbox/plugins/jamendo/paned_position"
def do_impl_can_delete (self):
return False
def do_impl_pack_paned (self, paned):
self.__paned_box = gtk.VBox(False, 5)
self.pack_start(self.__paned_box)
self.__paned_box.pack_start(paned)
#
# RBSource methods
#
def do_impl_show_entry_popup(self):
self.show_source_popup ("/JamendoSourceViewPopup")
def do_impl_get_ui_actions(self):
return ["JamendoDownloadAlbum","JamendoDonateArtist"]
def do_impl_get_status(self):
if self.__updating:
if self.__load_total_size > 0:
progress = min (float(self.__load_current_size) / self.__load_total_size, 1.0)
else:
progress = -1.0
return (_("Loading Jamendo catalog"), None, progress)
else:
qm = self.get_property("query-model")
return (qm.compute_status_normal("%d song", "%d songs"), None, 2.0)
def do_impl_activate(self):
if not self.__activated:
shell = self.get_property('shell')
self.__db = shell.get_property('db')
self.__entry_type = self.get_property('entry-type')
self.__activated = True
self.__show_loading_screen (True)
# start our catalogue updates
self.__update_id = gobject.timeout_add_seconds(6 * 60 * 60, self.__update_catalogue)
self.__update_catalogue()
sort_key = gconf.client_get_default().get_string(JamendoConfigureDialog.gconf_keys['sorting'])
if not sort_key:
sort_key = "Artist,ascending"
self.get_entry_view().set_sorting_type(sort_key)
rb.BrowserSource.do_impl_activate (self)
def do_impl_delete_thyself(self):
if self.__update_id != 0:
gobject.source_remove (self.__update_id)
self.__update_id = 0
if self.__notify_id != 0:
gobject.source_remove (self.__notify_id)
self.__notify_id = 0
if self.__catalogue_loader:
self.__catalogue_loader.cancel()
self.__catalogue_loader = None
if self.__catalogue_check:
self.__catalogue_check.cancel()
self.__catalogue_check = None
gconf.client_get_default().set_string(JamendoConfigureDialog.gconf_keys['sorting'], self.get_entry_view().get_sorting_type())
rb.BrowserSource.do_impl_delete_thyself (self)
#
# internal catalogue downloading and loading
#
def __catalogue_chunk_cb(self, result, total):
if not result or isinstance (result, Exception):
if result:
# report error somehow?
print "error loading catalogue: %s" % result
self.__parser.close()
self.__db_load_finished = True
self.__updating = False
self.__saxHandler = None
self.__show_loading_screen (False)
self.__catalogue_loader = None
return
self.__parser.feed(result)
self.__load_current_size += len(result)
self.__load_total_size = total
self.__notify_status_changed()
def __load_catalogue(self):
print "loading catalogue %s" % self.__local_catalogue_path
self.__notify_status_changed()
self.__db_load_finished = False
self.__saxHandler = JamendoSaxHandler(self.__db, self.__entry_type)
self.__parser = xml.sax.make_parser()
self.__parser.setContentHandler(self.__saxHandler)
self.__catalogue_loader = rb.ChunkLoader()
self.__catalogue_loader.get_url_chunks(self.__local_catalogue_path, 64*1024, True, self.__catalogue_chunk_cb)
def __download_catalogue_chunk_cb (self, result, total, out):
if not result:
# done downloading, unzip to real location
out.close()
catalog = gzip.open(self.__local_catalogue_temp)
out = open(self.__local_catalogue_path, 'w')
while True:
s = catalog.read(4096)
if s == "":
break
out.write(s)
out.close()
catalog.close()
os.unlink(self.__local_catalogue_temp)
self.__db_load_finished = True
self.__show_loading_screen (False)
self.__catalogue_loader = None
self.__load_catalogue ()
elif isinstance(result, Exception):
# complain
pass
else:
out.write(result)
self.__load_current_size += len(result)
self.__load_total_size = total
self.__notify_status_changed()
def __download_catalogue(self):
print "downloading catalogue"
self.__updating = True
out = open(self.__local_catalogue_temp, 'w')
self.__catalogue_loader = rb.ChunkLoader()
self.__catalogue_loader.get_url_chunks(jamendo_song_info_uri, 4*1024, True, self.__download_catalogue_chunk_cb, out)
def __update_catalogue(self):
def update_cb (result):
self.__catalogue_check = None
if result is True:
self.__download_catalogue()
elif self.__db_load_finished is False:
self.__load_catalogue()
self.__catalogue_check = rb.UpdateCheck()
self.__catalogue_check.check_for_update(self.__local_catalogue_path, jamendo_song_info_uri, update_cb)
def __show_loading_screen(self, show):
if self.__info_screen is None:
# load the builder stuff
builder = gtk.Builder()
builder.add_from_file(self.__plugin.find_file("jamendo-loading.ui"))
self.__info_screen = builder.get_object("jamendo_loading_scrolledwindow")
self.pack_start(self.__info_screen)
self.get_entry_view().set_no_show_all (True)
self.__info_screen.set_no_show_all (True)
self.__info_screen.set_property("visible", show)
self.__paned_box.set_property("visible", not show)
def __notify_status_changed(self):
def change_idle_cb():
self.notify_status_changed()
self.__notify_id = 0
return False
if self.__notify_id == 0:
self.__notify_id = gobject.idle_add(change_idle_cb)
# Download album
def download_album (self):
tracks = self.get_entry_view().get_selected_entries()
format = gconf.client_get_default().get_string(JamendoConfigureDialog.gconf_keys['format'])
if not format or format not in JamendoConfigureDialog.format_list:
format = 'ogg3'
#TODO: this should work if the album was selected in the browser
#without any track selected
if len(tracks) == 1:
track = tracks[0]
albumid = self.__db.entry_get(track, rhythmdb.PROP_MUSICBRAINZ_ALBUMID)
formats = {}
formats["mp32"] = mp32_uri + albumid
formats["ogg3"] = ogg3_uri + albumid
p2plink = formats[format]
l = rb.Loader()
l.get_url(p2plink, self.__download_p2plink, albumid)
def __download_p2plink (self, result, albumid):
if result is None:
emsg = _("Error looking up p2plink for album %s on jamendo.com") % (albumid)
gtk.MessageDialog(None, 0, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, emsg).run()
return
gtk.show_uri(self.props.shell.props.window.get_screen(), result, gtk.gdk.CURRENT_TIME)
# Donate to Artist
def launch_donate (self):
tracks = self.get_entry_view().get_selected_entries()
#TODO: this should work if the artist was selected in the browser
#without any track selected
if len(tracks) == 1:
track = tracks[0]
# The Album ID can be used to lookup the artist, and issue a clean redirect.
albumid = self.__db.entry_get(track, rhythmdb.PROP_MUSICBRAINZ_ALBUMID)
artist = self.__db.entry_get(track, rhythmdb.PROP_ARTIST)
url = artist_url + albumid.__str__() + "/"
l = rb.Loader()
l.get_url(url, self.__open_donate, artist)
def __open_donate (self, result, artist):
if result is None:
emsg = _("Error looking up artist %s on jamendo.com") % (artist)
gtk.MessageDialog(None, 0, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, emsg).run()
return
gtk.show_uri(self.props.shell.props.window.get_screen(), result + "donate/", gtk.gdk.CURRENT_TIME)
def playing_entry_changed (self, entry):
if not self.__db or not entry:
return
if entry.get_entry_type() != self.__db.entry_type_get_by_name("JamendoEntryType"):
return
gobject.idle_add(self.emit_cover_art_uri, entry)
def emit_cover_art_uri (self, entry):
stream = self.__db.entry_get (entry, rhythmdb.PROP_LOCATION)
albumid = self.__db.entry_get (entry, rhythmdb.PROP_MUSICBRAINZ_ALBUMID)
url = artwork_url % albumid
self.__db.emit_entry_extra_metadata_notify (entry, "rb:coverArt-uri", str(url))
return False
gobject.type_register(JamendoSource)
|
kumar303/jstestnet
|
refs/heads/master
|
jstestnet/settings/__init__.py
|
49
|
from .base import *
try:
from .local import *
except ImportError, exc:
exc.args = tuple(['%s (did you rename settings/local.py-dist?)' % exc.args[0]])
raise exc
|
Nepherhotep/django
|
refs/heads/master
|
tests/model_inheritance_regress/tests.py
|
150
|
"""
Regression tests for Model inheritance behavior.
"""
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor, BachelorParty, BirthdayParty, BusStation, Child,
DerivedM, InternalCertificationAudit, ItalianRestaurant, M2MChild,
MessyBachelorParty, ParkingLot, ParkingLot2, ParkingLot3, ParkingLot4A,
ParkingLot4B, Person, Place, Profile, QualityControl, Restaurant,
SelfRefChild, SelfRefParent, Senator, Supplier, TrainStation, User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# Check that when you create a Parent object with a specific reference
# to an existent child instance, saving the Parent doesn't duplicate
# the child. This behavior is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# Check that no extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': 'Main St',
}])
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = 'Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': 'Derelict lot',
}])
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test_issue_7105(self):
# Regressions tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
datetimes = list(Child.objects.datetimes('created', 'month'))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
self.assertRaises(
Place.DoesNotExist,
Place.objects.get,
pk=ident)
self.assertRaises(
ItalianRestaurant.DoesNotExist,
ItalianRestaurant.objects.get,
pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
# equivalent of what the admin interface has to do for the edit-inline
# case.
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='ArticleWithAuthor 2',
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0))
c2.save()
c3 = ArticleWithAuthor(
headline='ArticleWithAuthor 3',
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2))
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
self.assertRaises(
ArticleWithAuthor.DoesNotExist,
c3.get_next_by_pub_date)
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
self.assertRaises(
ArticleWithAuthor.DoesNotExist,
c1.get_previous_by_pub_date)
def test_inherited_fields(self):
"""
Regression test for #8825 and #9390
Make sure all inherited fields (esp. m2m fields, in this case) appear
on the child class.
"""
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
# Ordering should not include any database column more than once (this
# is most likely to occur naturally with model inheritance, so we
# check it here). Regression test for #9390. This necessarily pokes at
# the SQL string for the query, since the duplicate problems are only
# apparent at that late stage.
qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk')
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find('ORDER BY'):]
pos = fragment.find('pub_date')
self.assertEqual(fragment.find('pub_date', pos + 1), -1)
def test_queryset_update_on_parent_model(self):
"""
Regression test for #10362
It is possible to call update() and only change a field in
an ancestor model.
"""
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0))
update = ArticleWithAuthor.objects.filter(
author="fred").update(headline="Oh, no!")
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(
pk=article.pk).update(headline="Oh, no!")
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1")
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, 'b1')
self.assertEqual(derivedm1.derived_name, 'd1')
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
"""
Regression tests for #10406
If there's a one-to-one link between a child model and the parent and
no explicit pk declared, we can use the one-to-one link as the pk on
the child.
"""
self.assertEqual(ParkingLot2._meta.pk.name, "parent")
# However, the connector from child to parent need not be the pk on
# the child at all.
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
# the child->parent link
self.assertEqual(
ParkingLot3._meta.get_ancestor_link(Place).name,
"parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address='21 Jump Street',
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address='21 Jump Street',
)
def test_all_fields_from_abstract_base_class(self):
"""
Regression tests for #7588
"""
# All fields from an ABC, including those inherited non-abstractly
# should be available on child classes (#7588). Creating this instance
# should work without error.
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian")
def test_abstract_base_class_m2m_relation_inheritance(self):
# Check that many-to-many relations defined on an abstract base class
# are correctly inherited (and created) on the child class.
p1 = Person.objects.create(name='Alice')
p2 = Person.objects.create(name='Bob')
p3 = Person.objects.create(name='Carol')
p4 = Person.objects.create(name='Dave')
birthday = BirthdayParty.objects.create(
name='Birthday party for Alice')
birthday.attendees = [p1, p3]
bachelor = BachelorParty.objects.create(name='Bachelor party for Bob')
bachelor.attendees = [p2, p4]
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# Check that a subclass of a subclass of an abstract model doesn't get
# its own accessor.
self.assertFalse(hasattr(p2, 'messybachelorparty_set'))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(
name='Bachelor party for Dave')
messy.attendees = [p4]
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
"""
verbose_name_plural correctly inherited from ABC if inheritance chain
includes an abstract model.
"""
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(
InternalCertificationAudit._meta.verbose_name_plural,
'Audits'
)
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
"""
Primary key set correctly with concrete->abstract->concrete inheritance.
"""
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len([field for field in BusStation._meta.local_fields if field.primary_key]),
1
)
self.assertEqual(
len([field for field in TrainStation._meta.local_fields if field.primary_key]),
1
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
"""
Test that a model which has different primary key for the parent model
passes unique field checking correctly. Refs #17615.
"""
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm({'username': "user_with_profile", 'extra': "hello"},
instance=p)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count('JOIN'), 2)
qs = ItalianRestaurant.objects.filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 2)
@expectedFailure
def test_inheritance_values_joins(self):
# It would be nice (but not too important) to skip the middle join in
# this case. Skipping is possible as nothing from the middle model is
# used in the qs and top contains direct pointer to the bottom model.
qs = ItalianRestaurant.objects.values_list('serves_gnocchi').filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_issue_21554(self):
senator = Senator.objects.create(
name='John Doe', title='X', state='Y'
)
senator = Senator.objects.get(pk=senator.pk)
self.assertEqual(senator.name, 'John Doe')
self.assertEqual(senator.title, 'X')
self.assertEqual(senator.state, 'Y')
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(name='Bobs Cafe', address="Somewhere",
serves_pizza=True, serves_hot_dogs=True)
p = Place.objects.all().select_related('restaurant')[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
# Regression test for #7246
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(), [
"Jane",
"John",
],
attrgetter("name")
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
|
kwanty/sdrdab
|
refs/heads/master
|
test/gtest/test/gtest_xml_outfiles_test.py
|
2526
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
tehtechguy/mHTM
|
refs/heads/master
|
setup.py
|
1
|
# setup.py
#
# Author : James Mnatzaganian
# Contact : http://techtorials.me
# Organization : NanoComputing Research Lab - Rochester Institute of
# Technology
# Website : https://www.rit.edu/kgcoe/nanolab/
# Date Created : 12/02/15
#
# Description : Installs the mHTM project
# Python Version : 2.7.X
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2016 James Mnatzaganian
# Native imports
from distutils.core import setup
from distutils.sysconfig import get_python_lib
import shutil, os
# Remove any old versions
print 'Removing old versions...'
py_libs = get_python_lib()
for path in os.listdir(py_libs):
if path[:4] == 'mHTM':
full_path = os.path.join(py_libs, path)
if os.path.isfile(full_path):
try:
os.remove(full_path)
except OSError:
pass
else: shutil.rmtree(full_path, True)
# Install the program
print 'Installing...'
setup(
name='mHTM',
version='0.11.1',
description="HTM CLA Implementation",
author='James Mnatzaganian',
author_email='jamesmnatzaganian@outlook.com',
url='http://techtorials.me',
package_dir={'mHTM':'src', 'mHTM.datasets':'src/datasets',
'mHTM.examples':'src/examples'},
packages=['mHTM', 'mHTM.datasets', 'mHTM.examples'],
package_data={'mHTM.datasets':['mnist.pkl']}
)
# Remove the unnecessary build folder
print 'Cleaning up...'
shutil.rmtree('build', True)
|
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vmware_host_lockdown.py
|
56
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_lockdown
short_description: Manage administrator permission for the local administrative account for the ESXi host
description:
- This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given.
- All parameters and VMware objects values are case sensitive.
- This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed.
- Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of cluster.
- All host systems from given cluster used to manage lockdown.
- Required parameter, if C(esxi_hostname) is not set.
esxi_hostname:
description:
- List of ESXi hostname to manage lockdown.
- Required parameter, if C(cluster_name) is not set.
- See examples for specifications.
state:
description:
- State of hosts system
- If set to C(present), all host systems will be set in lockdown mode.
- If host system is already in lockdown mode and set to C(present), no action will be taken.
- If set to C(absent), all host systems will be removed from lockdown mode.
- If host system is already out of lockdown mode and set to C(absent), no action will be taken.
default: present
choices: [ present, absent ]
version_added: 2.5
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enter host system into lockdown mode
vmware_host_lockdown:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: present
delegate_to: localhost
- name: Exit host systems from lockdown mode
vmware_host_lockdown:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: absent
delegate_to: localhost
- name: Enter host systems into lockdown mode
vmware_host_lockdown:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname:
- '{{ esxi_hostname_1 }}'
- '{{ esxi_hostname_2 }}'
state: present
delegate_to: localhost
- name: Exit host systems from lockdown mode
vmware_host_lockdown:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname:
- '{{ esxi_hostname_1 }}'
- '{{ esxi_hostname_2 }}'
state: absent
delegate_to: localhost
- name: Enter all host system from cluster into lockdown mode
vmware_host_lockdown:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
state: present
delegate_to: localhost
'''
RETURN = r'''
results:
description: metadata about state of Host system lock down
returned: always
type: dict
sample: {
"host_lockdown_state": {
"DC0_C0": {
"current_state": "present",
"previous_state": "absent",
"desired_state": "present",
},
}
}
'''
try:
from pyvmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareLockdownManager(PyVmomi):
def __init__(self, module):
super(VmwareLockdownManager, self).__init__(module)
if not self.is_vcenter():
self.module.fail_json(msg="Lockdown operations are performed from vCenter only. "
"hostname %s is an ESXi server. Please specify hostname "
"as vCenter server." % self.module.params['hostname'])
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
def ensure(self):
"""
Function to manage internal state management
"""
results = dict(changed=False, host_lockdown_state=dict())
change_list = []
desired_state = self.params.get('state')
for host in self.hosts:
results['host_lockdown_state'][host.name] = dict(current_state='',
desired_state=desired_state,
previous_state=''
)
changed = False
try:
if host.config.adminDisabled:
results['host_lockdown_state'][host.name]['previous_state'] = 'present'
if desired_state == 'absent':
host.ExitLockdownMode()
results['host_lockdown_state'][host.name]['current_state'] = 'absent'
changed = True
else:
results['host_lockdown_state'][host.name]['current_state'] = 'present'
elif not host.config.adminDisabled:
results['host_lockdown_state'][host.name]['previous_state'] = 'absent'
if desired_state == 'present':
host.EnterLockdownMode()
results['host_lockdown_state'][host.name]['current_state'] = 'present'
changed = True
else:
results['host_lockdown_state'][host.name]['current_state'] = 'absent'
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to manage lockdown mode for esxi"
" hostname %s : %s" % (host.name, to_native(host_config_fault.msg)))
except vim.fault.AdminDisabled as admin_disabled:
self.module.fail_json(msg="Failed to manage lockdown mode as administrator "
"permission has been disabled for "
"esxi hostname %s : %s" % (host.name, to_native(admin_disabled.msg)))
except Exception as generic_exception:
self.module.fail_json(msg="Failed to manage lockdown mode due to generic exception for esxi "
"hostname %s : %s" % (host.name, to_native(generic_exception)))
change_list.append(changed)
if any(change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='list', required=False),
state=dict(str='str', default='present', choices=['present', 'absent'], required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_lockdown_mgr = VmwareLockdownManager(module)
vmware_lockdown_mgr.ensure()
if __name__ == "__main__":
main()
|
suneeshtr/persona
|
refs/heads/master
|
node_modules/l/node_modules/hook.io/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py
|
107
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': 'NOT_USED_ON_ANDROID',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
make.ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)' %
main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)' %
main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)'
% main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)'
% main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
self.WriteLn('%s: %s' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -r $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
config = configs[spec['default_configuration']]
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags'))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS')
cflags_c, includes_from_cflags_c = self.ExtractIncludesFromCFlags(
config.get('cflags_c'))
extracted_includes.extend(includes_from_cflags_c)
self.WriteList(cflags_c, 'MY_CFLAGS_C')
self.WriteList(config.get('defines'), 'MY_DEFS', prefix='-D',
quoter=make.EscapeCppDefine)
self.WriteLn('LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host or
# target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES)')
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeLdFlags(self, ld_flags):
""" Clean up ldflags from gyp file.
Remove any ldflags that contain android_top_dir.
Args:
ld_flags: ldflags from gyp files.
Returns:
clean ldflags
"""
clean_ldflags = []
for flag in ld_flags:
if self.android_top_dir in flag:
continue
clean_ldflags.append(flag)
return clean_ldflags
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
if cflags:
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
config = configs[spec['default_configuration']]
# LDFLAGS
ldflags = list(config.get('ldflags', []))
static_flags, dynamic_flags = self.ComputeAndroidLibraryModuleNames(
ldflags)
self.WriteLn('')
self.WriteList(self.NormalizeLdFlags(ldflags), 'LOCAL_LDFLAGS')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_flags + static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_flags + dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
# Sort to avoid non-functional changes to makefile.
build_files = sorted([os.path.join('$(LOCAL_PATH)', f) for f in build_files])
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
build_files_args = [os.path.join('$(PRIVATE_LOCAL_PATH)', f)
for f in build_files_args]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
makefile_path = os.path.join('$(LOCAL_PATH)', makefile_name)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write('GYP_FILES := \\\n %s\n\n' %
'\\\n '.join(map(Sourceify, build_files)))
root_makefile.write('%s: PRIVATE_LOCAL_PATH := $(LOCAL_PATH)\n' %
makefile_path)
root_makefile.write('%s: $(GYP_FILES)\n' % makefile_path)
root_makefile.write('\techo ACTION Regenerating $@\n\t%s\n\n' %
gyp.common.EncodePOSIXShellList([gyp_binary, '-fandroid'] +
gyp.RegenerateFlags(options) +
build_files_args))
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid.mk' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
make.ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, base_path, output_file,
spec, configs, part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Some tools need to know the absolute path of the top directory.
root_makefile.write('GYP_ABS_ANDROID_TOP_DIR := $(shell pwd)\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if generator_flags.get('auto_regeneration', True):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
wilkerwma/codeschool
|
refs/heads/master
|
src/cs_questions/migrations/old/0005_auto_20160511_1211.py
|
4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-11 15:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cs_questions', '0004_auto_20160508_2233'),
]
operations = [
migrations.AlterField(
model_name='questionactivity',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='cs_questions.Question'),
),
]
|
staranjeet/fjord
|
refs/heads/master
|
vendor/packages/click/tests/conftest.py
|
50
|
from click.testing import CliRunner
import pytest
@pytest.fixture(scope='function')
def runner(request):
return CliRunner()
|
johnjohndoe/spendb
|
refs/heads/master
|
spendb/__init__.py
|
5
|
# this is a namespace package
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
import warnings
warnings.filterwarnings('ignore', 'Options will be ignored.')
# Silence SQLAlchemy warning:
import warnings
warnings.filterwarnings(
'ignore',
'Unicode type received non-unicode bind param value.')
warnings.filterwarnings(
'ignore',
'Unicode type received non-unicodebind param value.')
__version__ = '0.19'
|
tayfun/django
|
refs/heads/master
|
django/contrib/sessions/backends/base.py
|
298
|
from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=None):
self.modified = self.modified or key in self._session
return self._session.pop(key, default)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
|
younesnait/Embouteillage_repo
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/_exts/configurationblock.py
|
2577
|
#Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
maestrano/odoo
|
refs/heads/master
|
addons/portal_stock/__openerp__.py
|
437
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Stock',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds access rules to your portal if stock and portal are installed.
==========================================================================================
""",
'author': 'OpenERP SA',
'depends': ['sale_stock','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
leonardowolf/bookfree
|
refs/heads/master
|
flask/lib/python3.5/site-packages/werkzeug/contrib/securecookie.py
|
254
|
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if b'=' not in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
|
amir343/ansible
|
refs/heads/devel
|
v1/ansible/module_utils/gce.py
|
305
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pprint
USER_AGENT_PRODUCT="Ansible-gce"
USER_AGENT_VERSION="v1"
def gce_connect(module, provider=None):
"""Return a Google Cloud Engine connection."""
service_account_email = module.params.get('service_account_email', None)
pem_file = module.params.get('pem_file', None)
project_id = module.params.get('project_id', None)
# If any of the values are not given as parameters, check the appropriate
# environment variables.
if not service_account_email:
service_account_email = os.environ.get('GCE_EMAIL', None)
if not project_id:
project_id = os.environ.get('GCE_PROJECT', None)
if not pem_file:
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
# If we still don't have one or more of our credentials, attempt to
# get the remaining values from the libcloud secrets file.
if service_account_email is None or pem_file is None:
try:
import secrets
except ImportError:
secrets = None
if hasattr(secrets, 'GCE_PARAMS'):
if not service_account_email:
service_account_email = secrets.GCE_PARAMS[0]
if not pem_file:
pem_file = secrets.GCE_PARAMS[1]
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not project_id:
project_id = keyword_params.get('project', None)
# If we *still* don't have the credentials we need, then it's time to
# just fail out.
if service_account_email is None or pem_file is None or project_id is None:
module.fail_json(msg='Missing GCE connection parameters in libcloud '
'secrets file.')
return None
# Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
if provider is None:
provider = Provider.GCE
try:
gce = get_driver(provider)(service_account_email, pem_file,
datacenter=module.params.get('zone', None),
project=project_id)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except (RuntimeError, ValueError), e:
module.fail_json(msg=str(e), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return gce
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return 'Unexpected response: ' + pprint.pformat(vars(error))
|
alecbenson/coup
|
refs/heads/master
|
src/server.py
|
2
|
#Authors: Joe DiSabito, Ryan Hartman, Alec Benson
import SocketServer
from collections import deque
import sys, threading, urllib
from deck import Deck
from vote import Vote
from player import Player, PlayerQueue
from error import *
class CoupServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class CoupRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, callback, *args, **keys):
self.cg = callback
SocketServer.BaseRequestHandler.__init__(self, *args, **keys)
'''
When a client connects, a thread is spawned for the client and handle() is called.
handle() will, as the name suggests, handle the data that the client sends and act accordingly.
'''
def handle(self):
q = self.cg.players
conn = self.request
while True:
try:
self.data = conn.recv(1024).strip()
player = q.getPlayer(conn)
self.parseRequest(player, self.data)
#If the player issuing the request is in the game...
if not q.isClientRegistered(conn):
raise UnregisteredPlayerError(conn)
except IOError:
conn.close()
q.removePlayer(conn)
return
except UnregisteredPlayerError:
pass
'''
Sends a chat message from player to all connected clients. If the user is unregistered, the message is Anonymous
'''
def chatMessage(self, player, parts):
if len(parts) >= 2:
if player is None:
self.broadcast_message("Anonymous: {0}\n".format(parts[1]))
else:
self.broadcast_message("{0}: {1}\n".format(player.name, parts[1]))
'''Broadcasts message to all connected players'''
def broadcast_message(self, message):
for player in self.cg.players.listPlayers():
player.conn.sendall(message)
'''
Boots a player from the server
'''
def kick(self, player, parts):
return player.conn.close()
'''
Prints the target player's current hand, or display's the current player's hand if no name is provided
'''
def showHand(self, player, parts):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
if len(parts) >= 2:
name = parts[1]
#If the player enters their own name
if name == player.name:
return player.conn.sendall(player.getHand(True))
#If the player enters another player's name
target = self.cg.players.getPlayerByName(name)
if target == None:
raise NoSuchPlayerError(self.request, name)
return player.conn.sendall(target.getHand(False))
else:
#The player enters no name (default)
return player.conn.sendall(player.getHand(True))
except (UnregisteredPlayerError, NoSuchPlayerError) as e:
pass
'''
Prints the number of coins the player has
'''
def showCoins(self, player, parts):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
message = "Coins: {}\n".format(player.coins)
player.conn.sendall(message)
except UnregisteredPlayerError as e:
pass
'''
Lists all of the players and the number of coins that they have
'''
def listplayers(self, parts):
formatted_list = ""
for player in self.cg.players.list():
formatted_list += "{0} ({1} Coins)\n".format(player.name, player.coins)
if not formatted_list:
return self.request.sendall("No registered players.\n")
self.request.sendall(formatted_list)
'''
Performs either a Duke tax, Foreign Aid, or Income.
'''
def getCoins(self, player, parts, coins):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
if not self.cg.players.isPlayersTurn(player):
raise NotYourTurnError(self.request)
if self.cg.treasury < coins:
raise NotEnoughTreasuryCoinsError(self.request)
if len(player.cards) > 2:
raise AlreadyExchangingError(self.request)
if player.coins >= 10:
raise MustCoupError(self.request)
return True
except (AlreadyExchangingError, UnregisteredPlayerError, NotYourTurnError, NotEnoughTreasuryCoinsError, MustCoupError) as e:
return False
'''
Functions (duke, foreignAid, income) using getCoins as helper function
'''
def tax(self, player, parts):
if self.getCoins(player, parts, 3):
self.broadcast_message("{} called TAX, the Duke ability, and will get 3 coins. Other players type \"/challenge\" or \"/pass\" to continue.\n".format(player.name))
def failFunc(handler, passers, player):
player.coins += 3
handler.cg.treasury -= 3
handler.broadcast_message("No challengers, {} has gained 3 coins.\n".format(player.name))
handler.broadcast_message(handler.cg.players.advanceTurn())
def successFunc(handler, challengers, player):
card = player.checkForCard('Duke')
if card != -1:
#player exchanges Duke with deck
handler.cg.deck.swapCard(player, card)
#player gets 3 coins
player.coins += 3
handler.cg.treasury -= 3
#challenger loses a card
target = challengers[0]
handler.destroy(player, target, 0)
handler.broadcast_message("Challenge failed! {0} reveals a Duke from his hand, exchanges it with the deck, and still gains 3 coins. {1} loses a card.\n".format(player.name, target.name))
else:
#player loses a card
handler.broadcast_message("Challenge succeeded! {0} loses a card.\n".format(player.name))
handler.broadcast_message(handler.cg.players.advanceTurn())
#TODO: Challenge vote given Tax
voteQueue = PlayerQueue()
for voter in self.cg.players.listPlayers():
if not (voter.name == player.name):
voteQueue.addPlayer(player)
passThreshold = (1 - self.cg.players.numPlayers()) * 100
successArgs = [player]
failArgs = [player]
challenge = Vote(self, voteQueue, "challenge", 20, passThreshold, successFunc, successArgs, failFunc, failArgs)
def foreignAid(self, player, parts):
if self.getCoins(player, parts, 2):
self.broadcast_message("{} receieved FOREIGN AID.\n".format(player.name))
def income(self, player, parts):
if self.getCoins(player, parts, 1):
self.broadcast_message("{} called INCOME.\n".format(player.name))
'''
Stealing, CAPTAIN ability
'''
def steal(self, player, parts):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
if not self.cg.players.isPlayersTurn(player):
raise NotYourTurnError(self.request)
if player.coins >= 10:
raise MustCoupError(self.request)
if len(player.cards) > 2:
raise AlreadyExchangingError(self.request)
name = parts[1]
if name == player.name:
raise InvalidCommandError(self.request, "You cannot target yourself.\n")
target = self.cg.players.getPlayerByName(name)
if target == None:
raise NoSuchPlayerError(self.request, name)
if target.coins < 2:
raise NotEnoughCoinsError(self.request, target.name)
message = player.name + " is claiming CAPTAIN, stealing from " + target.name + ".\n"
self.broadcast_message(message)
#TODO:Challenge and block
player.coins += 2
target.coins -= 2
self.broadcast_message(self.cg.players.advanceTurn())
except (UnregisteredPlayerError, NotYourTurnError, InvalidCommandError, NoSuchPlayerError, NotEnoughCoinsError, MustCoupError, AlreadyExchangingError) as e:
pass
'''
Exchanging cards with deck, AMBASSADOR ability
'''
def exchange(self, player, parts):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
if not self.cg.players.isPlayersTurn(player):
raise NotYourTurnError(self.request)
if len(player.cards) > 2:
raise AlreadyExchangingError(self.request)
if player.coins >= 10:
raise MustCoupError(self.request)
message = player.name + " is claiming AMBASSADOR, exchanging cards with the deck.\n"
self.broadcast_message(message)
player.cards.append(cg.deck.deal())
player.cards.append(cg.deck.deal())
self.showHand(player, ["",player.name])
message = player.name + " has been dealt two cards to exchange.\n"
self.broadcast_message(message)
player.conn.sendall("Select cards to remove (1 to {}, where 1 is the top card)" \
"from least to greatest without a space. Ex. /remove 23\n".format(str(len(player.cards))))
except (AlreadyExchangingError, UnregisteredPlayerError, NotYourTurnError,
InvalidCommandError, NoSuchPlayerError, NotEnoughCoinsError, MustCoupError) as e:
pass
'''
Remove function to carry out the second half of Ambassador ability.
'''
def remove(self, player, parts):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
if not self.cg.players.isPlayersTurn(player):
raise NotYourTurnError(self.request)
if len(player.cards) <= 2:
raise CannotRemoveError(self.request)
card1 = int(parts[1]) % 10 - 1
card2 = int(parts[1]) / 10 - 1
cg.deck.addCard(player.cards[card1])
cg.deck.addCard(player.cards[card2])
del player.cards[card1]
del player.cards[card2]
self.showHand(player, ["",player.name])
message = player.name + " has returned 2 cards to the deck.\n"
self.broadcast_message(message)
self.broadcast_message(self.cg.players.advanceTurn())
cg.deck.shuffle()
except (CannotRemoveError, UnregisteredPlayerError, NotYourTurnError,
InvalidCommandError, NoSuchPlayerError, NotEnoughCoinsError, MustCoupError, NotEnoughArguments) as e:
pass
'''
Performs card destruction (coup, assassination, challenge)
'''
def destroy(self, player, target, coins):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
if not self.cg.players.isPlayersTurn(player) and coins != 0:
raise NotYourTurnError(self.request)
if player.coins >= 10 and coins == 3:
raise MustCoupError(self.request)
if len(parts) < 2:
raise InvalidCommandError(self.request, "You need to specify a player (by name) that you want to target\n")
name = parts[1]
if name == player.name:
raise InvalidCommandError(self.request, "You cannot target yourself. Nice try.\n")
if player.coins < coins:
raise NotEnoughCoinsError(self.request, "")
target = self.cg.players.getPlayerByName(name)
if target == None:
raise NoSuchPlayerError(self.request, name)
player.coins -= coins
self.cg.treasury += coins
#TODO: ADD CHALLENGE/PROTECTION CHANCE HERE
self.broadcast_message(".\n".format(player.name, target.name))
self.broadcast_message(target.killCardInHand())
self.broadcast_message(self.cg.players.advanceTurn())
return target
except (UnregisteredPlayerError, NotYourTurnError, InvalidCommandError, NoSuchPlayerError, NotEnoughCoinsError, MustCoupError) as e:
return None
'''
Assassination (using destroy as helper function), card destruction with loss of 3 coins
'''
def assassinate(self, player, parts):
target = self.cg.players.getPlayerByName(parts[1])
target = self.destroy(player, target, 3)
if target is not None:
self.broadcast_message("{0} will ASSASSINATE {1}.\n".format(player.name, target.name))
'''
Coup (using destroy as helper function), card destruction with loss of 7 coins
'''
def coup(self,player,parts):
target = self.cg.players.getPlayerByName(parts[1])
target = self.destroy(player, parts, 7)
if target is not None:
self.broadcast_message("{0} called a COUP on {1}.\n".format(player.name, target.name))
'''
Ends the player's turn, or raises a NotYourTurnError if it is not the player's turn to move
'''
def endturn(self, player, parts):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
if not self.cg.players.isPlayersTurn(player):
raise NotYourTurnError(self.request)
self.broadcast_message("{} ended his turn.\n".format(player.name))
self.broadcast_message(self.cg.players.advanceTurn())
except (UnregisteredPlayerError, NotYourTurnError) as e:
pass
'''
A helper to verify that a requested name is valid before it is registered
'''
def isValidName(self, name):
try:
strname = str(name)
length = len(strname)
if length <= 0 or length >= 20:
raise InvalidCommandError(self.request, "Name must be between 1 and 20 characters in length.\n")
if self.cg.players.getPlayerByName(name):
raise InvalidCommandError(self.request, "A user with this name is already registered.\n")
return True
except (InvalidCommandError) as e:
return False
'''
Registers the client with the name provided
'''
def register(self, parts):
try:
if len(parts) < 2:
raise InvalidCommandError(self.request, "Could not register: please provide a name.")
name = parts[1]
if self.cg.players.isClientRegistered(self.request):
raise AlreadyRegisteredPlayerError(self.request)
if self.isValidName(name):
newPlayer = Player(self.request, name, self.cg.deck.deal(), self.cg.deck.deal())
msg = self.cg.players.addPlayer(newPlayer)
self.broadcast_message(msg)
except (InvalidCommandError, AlreadyRegisteredPlayerError) as e:
pass
'''Sets a player as ready or unready and announces to all clients'''
def ready(self, player, parts):
try:
if player is None:
raise UnregisteredPlayerError(self.request)
self.broadcast_message(player.toggleReady())
except UnregisteredPlayerError:
pass
'''
Prints a help message for clients
'''
def help(self, player, parts):
message = "\nCOMMANDS:\n/say\n/exit\n/help\n/hand\n/tax\n/register\n/exchange\n/income\n/aid\n/steal\n/assassinate\n/ready\n/endturn\n"
player.conn.sendall(message)
'''
Parses the client's request and dispatches to the correct function
'''
def parseRequest(self, player, message):
parts = message.split(' ',1)
command = parts[0]
if command == "/say":
self.chatMessage(player, parts)
elif command == "/exit":
self.kick(player, parts)
elif command == "/help":
self.help(player,parts)
elif command == "/hand":
self.showHand(player, parts)
elif command == "/coins":
self.showCoins(player, parts)
elif command == "/tax":
self.tax(player, parts)
elif command == "/income":
self.income(player, parts)
elif command == "/aid":
self.foreignAid(player, parts)
elif command == "/coup":
self.coup(player, parts)
elif command == "/assassinate":
self.assassinate(player, parts)
elif command == "/exchange":
self.exchange(player, parts)
elif command == "/remove":
self.remove(player, parts)
elif command == "/steal":
self.steal(player, parts)
elif command == "/register":
self.register(parts)
elif command == "/ready":
self.ready(player, parts)
elif command == "/endturn":
self.endturn(player, parts)
elif command == "/players":
self.listplayers(parts)
elif command == "/challenge":
currentVote = self.players.getVote("challenge")
currentVote.vote(player, True)
elif command == "/pass":
currentVote = self.players.getVote("challenge")
currentVote.vote(player, False)
elif command != "":
self.request.sendall("Unrecognized command.\n")
class CoupGame(object):
def __init__(self):
self.deck = Deck()
self.destroyedCards = []
self.players = PlayerQueue()
#coins dispersed
self.treasury = 50 - 2 * self.players.numPlayers() #50 is starting amt
#deck shuffled
self.deck.shuffle()
'''
handler_factory() creates a function called create_handler.
The function is handed to the CoupServer.
The function gets invoked when a new handler is created (when a new client connects).
'''
def handler_factory(callback):
def createHandler(*args, **keys):
return CoupRequestHandler(callback, *args, **keys)
return createHandler
if __name__ == "__main__":
print "Welcome to COUP!\n"
HOST, PORT = sys.argv[1], int(sys.argv[2])
if sys.argv[1] == "external":
HOST = urllib.urlopen('http://canihazip.com/s').read()
print "Network-facing IP:", HOST
cg = CoupGame()
try:
server = CoupServer((HOST, PORT), handler_factory(cg) )
except Exception as e:
server = CoupServer(('localhost', PORT), handler_factory(cg) )
print "External binding FAILED. Running LOCALLY on port", PORT
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
server_thread.join()
|
bashburn/openshift-ansible
|
refs/heads/master
|
filter_plugins/openshift_master.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-master
'''
import copy
import sys
import yaml
from ansible import errors
from ansible.runner.filter_plugins.core import bool as ansible_bool
class IdentityProviderBase(object):
""" IdentityProviderBase
Attributes:
name (str): Identity provider Name
login (bool): Is this identity provider a login provider?
challenge (bool): Is this identity provider a challenge provider?
provider (dict): Provider specific config
_idp (dict): internal copy of the IDP dict passed in
_required (list): List of lists of strings for required attributes
_optional (list): List of lists of strings for optional attributes
_allow_additional (bool): Does this provider support attributes
not in _required and _optional
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
# disabling this check since the number of instance attributes are
# necessary for this class
# pylint: disable=too-many-instance-attributes
def __init__(self, api_version, idp):
if api_version not in ['v1']:
raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version))
self._idp = copy.deepcopy(idp)
if 'name' not in self._idp:
raise errors.AnsibleFilterError("|failed identity provider missing a name")
if 'kind' not in self._idp:
raise errors.AnsibleFilterError("|failed identity provider missing a kind")
self.name = self._idp.pop('name')
self.login = ansible_bool(self._idp.pop('login', False))
self.challenge = ansible_bool(self._idp.pop('challenge', False))
self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))
self._required = [['mappingMethod', 'mapping_method']]
self._optional = []
self._allow_additional = True
@staticmethod
def validate_idp_list(idp_list):
''' validates a list of idps '''
login_providers = [x.name for x in idp_list if x.login]
if len(login_providers) > 1:
raise errors.AnsibleFilterError("|failed multiple providers are "
"not allowed for login. login "
"providers: {0}".format(', '.join(login_providers)))
names = [x.name for x in idp_list]
if len(set(names)) != len(names):
raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
for idp in idp_list:
idp.validate()
def validate(self):
''' validate an instance of this idp class '''
valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
if self.provider['mappingMethod'] not in valid_mapping_methods:
raise errors.AnsibleFilterError("|failed unkown mapping method "
"for provider {0}".format(self.__class__.__name__))
@staticmethod
def get_default(key):
''' get a default value for a given key '''
if key == 'mappingMethod':
return 'claim'
else:
return None
def set_provider_item(self, items, required=False):
''' set a provider item based on the list of item names provided. '''
for item in items:
provider_key = items[0]
if item in self._idp:
self.provider[provider_key] = self._idp.pop(item)
break
else:
default = self.get_default(provider_key)
if default is not None:
self.provider[provider_key] = default
elif required:
raise errors.AnsibleFilterError("|failed provider {0} missing "
"required key {1}".format(self.__class__.__name__, provider_key))
def set_provider_items(self):
''' set the provider items for this idp '''
for items in self._required:
self.set_provider_item(items, True)
for items in self._optional:
self.set_provider_item(items)
if self._allow_additional:
for key in self._idp.keys():
self.set_provider_item([key])
else:
if len(self._idp) > 0:
raise errors.AnsibleFilterError("|failed provider {0} "
"contains unknown keys "
"{1}".format(self.__class__.__name__, ', '.join(self._idp.keys())))
def to_dict(self):
''' translate this idp to a dictionary '''
return dict(name=self.name, challenge=self.challenge,
login=self.login, provider=self.provider)
class LDAPPasswordIdentityProvider(IdentityProviderBase):
""" LDAPPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
self._required += [['attributes'], ['url'], ['insecure']]
self._optional += [['ca'],
['bindDN', 'bind_dn'],
['bindPassword', 'bind_password']]
self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))
if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:
pref_user = self._idp['attributes'].pop('preferred_username')
self._idp['attributes']['preferredUsername'] = pref_user
def validate(self):
''' validate this idp instance '''
IdentityProviderBase.validate(self)
if not isinstance(self.provider['attributes'], dict):
raise errors.AnsibleFilterError("|failed attributes for provider "
"{0} must be a dictionary".format(self.__class__.__name__))
attrs = ['id', 'email', 'name', 'preferredUsername']
for attr in attrs:
if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):
raise errors.AnsibleFilterError("|failed {0} attribute for "
"provider {1} must be a list".format(attr, self.__class__.__name__))
unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)
if len(unknown_attrs) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs)))
class KeystonePasswordIdentityProvider(IdentityProviderBase):
""" KeystoneIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
self._required += [['url'], ['domainName', 'domain_name']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
class RequestHeaderIdentityProvider(IdentityProviderBase):
""" RequestHeaderIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
self._required += [['headers']]
self._optional += [['challengeURL', 'challenge_url'],
['loginURL', 'login_url'],
['clientCA', 'client_ca']]
def validate(self):
''' validate this idp instance '''
IdentityProviderBase.validate(self)
if not isinstance(self.provider['headers'], list):
raise errors.AnsibleFilterError("|failed headers for provider {0} "
"must be a list".format(self.__class__.__name__))
class AllowAllPasswordIdentityProvider(IdentityProviderBase):
""" AllowAllPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
class DenyAllPasswordIdentityProvider(IdentityProviderBase):
""" DenyAllPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
""" HTPasswdPasswordIdentity
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
self._required += [['file', 'filename', 'fileName', 'file_name']]
@staticmethod
def get_default(key):
if key == 'file':
return '/etc/origin/htpasswd'
else:
return IdentityProviderBase.get_default(key)
class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
""" BasicAuthPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
self._required += [['url']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
class IdentityProviderOauthBase(IdentityProviderBase):
""" IdentityProviderOauthBase
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderBase.__init__(self, api_version, idp)
self._allow_additional = False
self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
def validate(self):
''' validate this idp instance '''
IdentityProviderBase.validate(self)
if self.challenge:
raise errors.AnsibleFilterError("|failed provider {0} does not "
"allow challenge authentication".format(self.__class__.__name__))
class OpenIDIdentityProvider(IdentityProviderOauthBase):
""" OpenIDIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._required += [['claims'], ['urls']]
self._optional += [['ca'],
['extraScopes'],
['extraAuthorizeParameters']]
if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:
pref_user = self._idp['claims'].pop('preferred_username')
self._idp['claims']['preferredUsername'] = pref_user
if 'urls' in self._idp and 'user_info' in self._idp['urls']:
user_info = self._idp['urls'].pop('user_info')
self._idp['urls']['userInfo'] = user_info
if 'extra_scopes' in self._idp:
self._idp['extraScopes'] = self._idp.pop('extra_scopes')
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
if 'extraAuthorizeParameters' in self._idp:
if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
if not isinstance(self.provider['claims'], dict):
raise errors.AnsibleFilterError("|failed claims for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
if 'extraScopes' not in self.provider['extraScopes'] and not isinstance(self.provider['extraScopes'], list):
raise errors.AnsibleFilterError("|failed extraScopes for provider "
"{0} must be a list".format(self.__class__.__name__))
if ('extraAuthorizeParameters' not in self.provider['extraAuthorizeParameters']
and not isinstance(self.provider['extraAuthorizeParameters'], dict)):
raise errors.AnsibleFilterError("|failed extraAuthorizeParameters "
"for provider {0} must be a dictionary".format(self.__class__.__name__))
required_claims = ['id']
optional_claims = ['email', 'name', 'preferredUsername']
all_claims = required_claims + optional_claims
for claim in required_claims:
if claim in required_claims and claim not in self.provider['claims']:
raise errors.AnsibleFilterError("|failed {0} claim missing "
"for provider {1}".format(claim, self.__class__.__name__))
for claim in all_claims:
if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):
raise errors.AnsibleFilterError("|failed {0} claims for "
"provider {1} must be a list".format(claim, self.__class__.__name__))
unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)
if len(unknown_claims) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims)))
if not isinstance(self.provider['urls'], dict):
raise errors.AnsibleFilterError("|failed urls for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
required_urls = ['authorize', 'token']
optional_urls = ['userInfo']
all_urls = required_urls + optional_urls
for url in required_urls:
if url not in self.provider['urls']:
raise errors.AnsibleFilterError("|failed {0} url missing for "
"provider {1}".format(url, self.__class__.__name__))
unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)
if len(unknown_urls) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls)))
class GoogleIdentityProvider(IdentityProviderOauthBase):
""" GoogleIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._optional += [['hostedDomain', 'hosted_domain']]
class GitHubIdentityProvider(IdentityProviderOauthBase):
""" GitHubIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
pass
class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@staticmethod
def translate_idps(idps, api_version):
''' Translates a list of dictionaries into a valid identityProviders config '''
idp_list = []
if not isinstance(idps, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers")
for idp in idps:
if not isinstance(idp, dict):
raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries")
cur_module = sys.modules[__name__]
idp_class = getattr(cur_module, idp['kind'], None)
idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)
idp_inst.set_provider_items()
idp_list.append(idp_inst)
IdentityProviderBase.validate_idp_list(idp_list)
return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps}
|
rspavel/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/perl-clone/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlClone(PerlPackage):
"""Clone - recursively copy Perl datatypes"""
homepage = "https://metacpan.org/pod/Clone"
url = "https://cpan.metacpan.org/authors/id/G/GA/GARU/Clone-0.41.tar.gz"
version('0.41', sha256='e8c056dcf4bc8889079a09412af70194a54a269689ba72edcd91291a46a51518')
|
mkaluza/external_chromium_org
|
refs/heads/kk44
|
tools/telemetry/telemetry/core/heap/chrome_js_heap_snapshot_parser.py
|
38
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from telemetry.core.heap import live_heap_object
from telemetry.core.heap import retaining_edge
class ChromeJsHeapSnapshotParser(object):
""" Parser for the heap snapshot.
The heap snapshot JSON format is defined by HeapSnapshotJSONSerializer in V8.
The snapshot contains a list of integers describing nodes (types, names, etc.)
and a list of integers describing edges (types, the node the edge points to,
etc.) and a string table. All strings are expressed as indices to the string
table.
In addition, the snapshot contains meta information describing the data fields
for nodes and the data fields for edges.
Attributes:
_node_dict: {int -> LiveHeapObject}, maps integer ids to LiveHeapObject
objects.
_node_list: [int], the raw node data of the heap snapshot.
_edge_list: [int], the raw edge data of the heap snapshot.
_node_types: [str], the possible node types in the heap snapshot.
_edge_types: [str], the possible edge types in the heap snapshot.
_node_fields: [str], the fields present in the heap snapshot for each node.
_edge_fields: [str], the fields present in the heap snapshot for each node.
_node_type_ix: int, index of the node type field.
_node_name_ix: int, index of the node name field.
_node_id_ix: int, index of the node id field.
_node_edge_count_ix: int, index of the node edge count field.
_node_field_count: int, number of node fields.
_edge_type_ix: int, index of the edge type field.
_edge_name_or_ix_ix: int, index of the "edge name or index" field.
_edge_to_node_ix: int, index of the "to node for an edge" field.
_edge_field_count: int, number of edge fields.
"""
def __init__(self, raw_data):
heap = json.loads(raw_data)
self._node_dict = {}
# Read the snapshot components (nodes, edges, strings, metadata).
self._node_list = heap['nodes']
self._edge_list = heap['edges']
self._strings = heap['strings']
self._node_types = heap['snapshot']['meta']['node_types'][0]
self._edge_types = heap['snapshot']['meta']['edge_types'][0]
node_fields = heap['snapshot']['meta']['node_fields']
edge_fields = heap['snapshot']['meta']['edge_fields']
# Find the indices of the required node and edge fields based on the
# metadata.
self._node_type_ix = node_fields.index('type')
self._node_name_ix = node_fields.index('name')
self._node_id_ix = node_fields.index('id')
self._node_edge_count_ix = node_fields.index('edge_count')
self._node_field_count = len(node_fields)
self._edge_type_ix = edge_fields.index('type')
self._edge_name_or_ix_ix = edge_fields.index('name_or_index')
self._edge_to_node_ix = edge_fields.index('to_node')
self._edge_field_count = len(edge_fields)
self._ParseSnapshot()
@staticmethod
def CanImport(raw_data):
heap = json.loads(raw_data)
if ('nodes' not in heap or 'edges' not in heap or 'strings' not in heap or
'snapshot' not in heap or 'meta' not in heap['snapshot']):
return False
meta = heap['snapshot']['meta']
if ('node_types' not in meta or 'edge_types' not in meta or
'node_fields' not in meta or 'edge_fields' not in meta):
return False
node_fields = meta['node_fields']
edge_fields = meta['edge_fields']
if ('type' not in node_fields or 'name' not in node_fields or
'id' not in node_fields or 'edge_count' not in node_fields):
return False
if ('type' not in edge_fields or 'name_or_index' not in edge_fields or
'to_node' not in edge_fields):
return False
return True
def GetAllLiveHeapObjects(self):
return self._node_dict.values()
@staticmethod
def LiveHeapObjectToJavaScript(heap_object):
return heap_object.name or str(heap_object)
@staticmethod
def RetainingEdgeToJavaScript(edge):
if edge.type_string == 'property':
return '.' + edge.name_string
if edge.type_string == 'element':
return '[' + edge.name_string + ']'
return str(edge)
def _ParseSnapshot(self):
"""Parses the stored JSON snapshot data.
Fills in self._node_dict with LiveHeapObject objects constructed based on
the heap snapshot. The LiveHeapObject objects contain the associated
RetainingEdge objects.
"""
edge_start_ix = 0
for ix in xrange(0, len(self._node_list), self._node_field_count):
edge_start_ix = self._ReadNodeFromIndex(ix, edge_start_ix)
# Add pointers to the endpoints to the edges, and associate the edges with
# the "to" nodes.
for node_id in self._node_dict:
n = self._node_dict[node_id]
for e in n.edges_from:
self._node_dict[e.to_object_id].AddEdgeTo(e)
e.SetFromObject(n)
e.SetToObject(self._node_dict[e.to_object_id])
def _ReadNodeFromIndex(self, ix, edges_start):
"""Reads the data for a node from the heap snapshot.
If the index contains an interesting node, constructs a Node object and adds
it to self._node_dict.
Args:
ix: int, index into the self._node_list array.
edges_start: int, the index of the edge array where the edges for the node
start.
Returns:
int, the edge start index for the next node.
Raises:
Exception: The node list of the snapshot is malformed.
"""
if ix + self._node_field_count > len(self._node_list):
raise Exception('Snapshot node list too short')
type_ix = self._node_list[ix + self._node_type_ix]
type_string = self._node_types[int(type_ix)]
# edges_end is noninclusive (the index of the first edge that is not part of
# this node).
edge_count = self._node_list[ix + self._node_edge_count_ix]
edges_end = edges_start + edge_count * self._edge_field_count
if ChromeJsHeapSnapshotParser._IsNodeTypeUninteresting(type_string):
return edges_end
name_ix = self._node_list[ix + self._node_name_ix]
node_id = self._node_list[ix + self._node_id_ix]
def ConstructorName(type_string, node_name_ix):
if type_string == 'object':
return self._strings[int(node_name_ix)]
return '(%s)' % type_string
ctor_name = ConstructorName(type_string, name_ix)
n = live_heap_object.LiveHeapObject(node_id, type_string, ctor_name)
if type_string == 'string':
n.string = self._strings[int(name_ix)]
for edge_ix in xrange(edges_start, edges_end, self._edge_field_count):
edge = self._ReadEdgeFromIndex(node_id, edge_ix)
if edge:
# The edge will be associated with the other endpoint when all the data
# has been read.
n.AddEdgeFrom(edge)
self._node_dict[node_id] = n
return edges_end
@staticmethod
def _IsNodeTypeUninteresting(type_string):
"""Helper function for filtering out nodes from the heap snapshot.
Args:
type_string: str, type of the node.
Returns:
bool, True if the node is of an uninteresting type and shouldn't be
included in the heap snapshot analysis.
"""
uninteresting_types = ('hidden', 'code', 'number', 'native', 'synthetic')
return type_string in uninteresting_types
@staticmethod
def _IsEdgeTypeUninteresting(edge_type_string):
"""Helper function for filtering out edges from the heap snapshot.
Args:
edge_type_string: str, type of the edge.
Returns:
bool, True if the edge is of an uninteresting type and shouldn't be
included in the heap snapshot analysis.
"""
uninteresting_types = ('weak', 'hidden', 'internal')
return edge_type_string in uninteresting_types
def _ReadEdgeFromIndex(self, node_id, edge_ix):
"""Reads the data for an edge from the heap snapshot.
Args:
node_id: int, id of the node which is the starting point of the edge.
edge_ix: int, index into the self._edge_list array.
Returns:
Edge, if the index contains an interesting edge, otherwise None.
Raises:
Exception: The node list of the snapshot is malformed.
"""
if edge_ix + self._edge_field_count > len(self._edge_list):
raise Exception('Snapshot edge list too short')
edge_type_ix = self._edge_list[edge_ix + self._edge_type_ix]
edge_type_string = self._edge_types[int(edge_type_ix)]
if ChromeJsHeapSnapshotParser._IsEdgeTypeUninteresting(edge_type_string):
return None
child_name_or_ix = self._edge_list[edge_ix + self._edge_name_or_ix_ix]
child_node_ix = self._edge_list[edge_ix + self._edge_to_node_ix]
# The child_node_ix is an index into the node list. Read the actual
# node information.
child_node_type_ix = self._node_list[child_node_ix + self._node_type_ix]
child_node_type_string = self._node_types[int(child_node_type_ix)]
child_node_id = self._node_list[child_node_ix + self._node_id_ix]
if ChromeJsHeapSnapshotParser._IsNodeTypeUninteresting(
child_node_type_string):
return None
child_name_string = ''
# For element nodes, the child has no name (only an index).
if (edge_type_string == 'element' or
int(child_name_or_ix) >= len(self._strings)):
child_name_string = str(child_name_or_ix)
else:
child_name_string = self._strings[int(child_name_or_ix)]
return retaining_edge.RetainingEdge(node_id, child_node_id,
edge_type_string, child_name_string)
|
clumsy/intellij-community
|
refs/heads/master
|
python/helpers/profiler/thrift/transport/TSSLSocket.py
|
9
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import os
import socket
import ssl
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error, e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error, e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION)
except ssl.SSLError, ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
|
AntonPashkowskiy/Tars
|
refs/heads/master
|
Program/common/settings/setting_keys.py
|
2
|
#!/usr/bin/python3
LOG_FILE_NAME = "log_file_name"
|
daira/tahoe-lafs-debian
|
refs/heads/master
|
src/allmydata/frontends/sftpd.py
|
7
|
import heapq, traceback, array, stat, struct
from types import NoneType
from stat import S_IFREG, S_IFDIR
from time import time, strftime, localtime
from zope.interface import implements
from twisted.python import components
from twisted.application import service, strports
from twisted.conch.ssh import factory, keys, session
from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \
FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \
FX_BAD_MESSAGE, FX_FAILURE, FX_OK
from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \
FXF_CREAT, FXF_TRUNC, FXF_EXCL
from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession
from twisted.conch.avatar import ConchUser
from twisted.conch.openssh_compat import primes
from twisted.cred import portal
from twisted.internet.error import ProcessDone, ProcessTerminated
from twisted.python.failure import Failure
from twisted.internet.interfaces import ITransport
from twisted.internet import defer
from twisted.internet.interfaces import IConsumer
from foolscap.api import eventually
from allmydata.util import deferredutil
from allmydata.util.assertutil import _assert, precondition
from allmydata.util.consumer import download_to_data
from allmydata.util.encodingutil import get_filesystem_encoding
from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \
NoSuchChildError, ChildOfWrongTypeError
from allmydata.mutable.common import NotWriteableError
from allmydata.mutable.publish import MutableFileHandle
from allmydata.immutable.upload import FileHandle
from allmydata.dirnode import update_metadata
from allmydata.util.fileutil import EncryptedTemporaryFile
noisy = True
use_foolscap_logging = True
from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \
msg as _msg, err as _err, PrefixingLogMixin as _PrefixingLogMixin
if use_foolscap_logging:
(logmsg, logerr, PrefixingLogMixin) = (_msg, _err, _PrefixingLogMixin)
else: # pragma: no cover
def logmsg(s, level=None):
print s
def logerr(s, level=None):
print s
class PrefixingLogMixin:
def __init__(self, facility=None, prefix=''):
self.prefix = prefix
def log(self, s, level=None):
print "%r %s" % (self.prefix, s)
def eventually_callback(d):
return lambda res: eventually(d.callback, res)
def eventually_errback(d):
return lambda err: eventually(d.errback, err)
def _utf8(x):
if isinstance(x, unicode):
return x.encode('utf-8')
if isinstance(x, str):
return x
return repr(x)
def _to_sftp_time(t):
"""SFTP times are unsigned 32-bit integers representing UTC seconds
(ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
A Tahoe time is the corresponding float."""
return long(t) & 0xFFFFFFFFL
def _convert_error(res, request):
"""If res is not a Failure, return it, otherwise reraise the appropriate
SFTPError."""
if not isinstance(res, Failure):
logged_res = res
if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
return res
err = res
logmsg("RAISE %r %r" % (request, err.value), level=OPERATIONAL)
try:
if noisy: logmsg(traceback.format_exc(err.value), level=NOISY)
except Exception: # pragma: no cover
pass
# The message argument to SFTPError must not reveal information that
# might compromise anonymity, if we are running over an anonymous network.
if err.check(SFTPError):
# original raiser of SFTPError has responsibility to ensure anonymity
raise err
if err.check(NoSuchChildError):
childname = _utf8(err.value.args[0])
raise SFTPError(FX_NO_SUCH_FILE, childname)
if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
msg = _utf8(err.value.args[0])
raise SFTPError(FX_PERMISSION_DENIED, msg)
if err.check(ExistingChildError):
# Versions of SFTP after v3 (which is what twisted.conch implements)
# define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
# However v3 doesn't; instead, other servers such as sshd return
# FX_FAILURE. The gvfs SFTP backend, for example, depends on this
# to translate the error to the equivalent of POSIX EEXIST, which is
# necessary for some picky programs (such as gedit).
msg = _utf8(err.value.args[0])
raise SFTPError(FX_FAILURE, msg)
if err.check(NotImplementedError):
raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
if err.check(EOFError):
raise SFTPError(FX_EOF, "end of file reached")
if err.check(defer.FirstError):
_convert_error(err.value.subFailure, request)
# We assume that the error message is not anonymity-sensitive.
raise SFTPError(FX_FAILURE, _utf8(err.value))
def _repr_flags(flags):
return "|".join([f for f in
[(flags & FXF_READ) and "FXF_READ" or None,
(flags & FXF_WRITE) and "FXF_WRITE" or None,
(flags & FXF_APPEND) and "FXF_APPEND" or None,
(flags & FXF_CREAT) and "FXF_CREAT" or None,
(flags & FXF_TRUNC) and "FXF_TRUNC" or None,
(flags & FXF_EXCL) and "FXF_EXCL" or None,
]
if f])
def _lsLine(name, attrs):
st_uid = "tahoe"
st_gid = "tahoe"
st_mtime = attrs.get("mtime", 0)
st_mode = attrs["permissions"]
# Some clients won't tolerate '?' in the size field (#1337).
st_size = attrs.get("size", 0)
# We don't know how many links there really are to this object.
st_nlink = 1
# Based on <https://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
# We previously could not call the version in Twisted because we needed the change
# <https://twistedmatrix.com/trac/changeset/25412> (released in Twisted v8.2).
# Since we now depend on Twisted v10.1, consider calling Twisted's version.
mode = st_mode
perms = array.array('c', '-'*10)
ft = stat.S_IFMT(mode)
if stat.S_ISDIR(ft): perms[0] = 'd'
elif stat.S_ISREG(ft): perms[0] = '-'
else: perms[0] = '?'
# user
if mode&stat.S_IRUSR: perms[1] = 'r'
if mode&stat.S_IWUSR: perms[2] = 'w'
if mode&stat.S_IXUSR: perms[3] = 'x'
# group
if mode&stat.S_IRGRP: perms[4] = 'r'
if mode&stat.S_IWGRP: perms[5] = 'w'
if mode&stat.S_IXGRP: perms[6] = 'x'
# other
if mode&stat.S_IROTH: perms[7] = 'r'
if mode&stat.S_IWOTH: perms[8] = 'w'
if mode&stat.S_IXOTH: perms[9] = 'x'
# suid/sgid never set
l = perms.tostring()
l += str(st_nlink).rjust(5) + ' '
un = str(st_uid)
l += un.ljust(9)
gr = str(st_gid)
l += gr.ljust(9)
sz = str(st_size)
l += sz.rjust(8)
l += ' '
day = 60 * 60 * 24
sixmo = day * 7 * 26
now = time()
if st_mtime + sixmo < now or st_mtime > now + day:
# mtime is more than 6 months ago, or more than one day in the future
l += strftime("%b %d %Y ", localtime(st_mtime))
else:
l += strftime("%b %d %H:%M ", localtime(st_mtime))
l += name
return l
def _no_write(parent_readonly, child, metadata=None):
"""Whether child should be listed as having read-only permissions in parent."""
if child.is_unknown():
return True
elif child.is_mutable():
return child.is_readonly()
elif parent_readonly or IDirectoryNode.providedBy(child):
return True
else:
return metadata is not None and metadata.get('no-write', False)
def _populate_attrs(childnode, metadata, size=None):
attrs = {}
# The permissions must have the S_IFDIR (040000) or S_IFREG (0100000)
# bits, otherwise the client may refuse to open a directory.
# Also, sshfs run as a non-root user requires files and directories
# to be world-readable/writeable.
# It is important that we never set the executable bits on files.
#
# Directories and unknown nodes have no size, and SFTP doesn't
# require us to make one up.
#
# childnode might be None, meaning that the file doesn't exist yet,
# but we're going to write it later.
if childnode and childnode.is_unknown():
perms = 0
elif childnode and IDirectoryNode.providedBy(childnode):
perms = S_IFDIR | 0777
else:
# For files, omit the size if we don't immediately know it.
if childnode and size is None:
size = childnode.get_size()
if size is not None:
_assert(isinstance(size, (int, long)) and not isinstance(size, bool), size=size)
attrs['size'] = size
perms = S_IFREG | 0666
if metadata:
if metadata.get('no-write', False):
perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
# See webapi.txt for what these times mean.
# We would prefer to omit atime, but SFTP version 3 can only
# accept mtime if atime is also set.
if 'linkmotime' in metadata.get('tahoe', {}):
attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['tahoe']['linkmotime'])
elif 'mtime' in metadata:
attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['mtime'])
if 'linkcrtime' in metadata.get('tahoe', {}):
attrs['createtime'] = _to_sftp_time(metadata['tahoe']['linkcrtime'])
attrs['permissions'] = perms
# twisted.conch.ssh.filetransfer only implements SFTP version 3,
# which doesn't include SSH_FILEXFER_ATTR_FLAGS.
return attrs
def _attrs_to_metadata(attrs):
metadata = {}
for key in attrs:
if key == "mtime" or key == "ctime" or key == "createtime":
metadata[key] = long(attrs[key])
elif key.startswith("ext_"):
metadata[key] = str(attrs[key])
perms = attrs.get('permissions', stat.S_IWUSR)
if not (perms & stat.S_IWUSR):
metadata['no-write'] = True
return metadata
def _direntry_for(filenode_or_parent, childname, filenode=None):
precondition(isinstance(childname, (unicode, NoneType)), childname=childname)
if childname is None:
filenode_or_parent = filenode
if filenode_or_parent:
rw_uri = filenode_or_parent.get_write_uri()
if rw_uri and childname:
return rw_uri + "/" + childname.encode('utf-8')
else:
return rw_uri
return None
class OverwriteableFileConsumer(PrefixingLogMixin):
implements(IConsumer)
"""I act both as a consumer for the download of the original file contents, and as a
wrapper for a temporary file that records the downloaded data and any overwrites.
I use a priority queue to keep track of which regions of the file have been overwritten
but not yet downloaded, so that the download does not clobber overwritten data.
I use another priority queue to record milestones at which to make callbacks
indicating that a given number of bytes have been downloaded.
The temporary file reflects the contents of the file that I represent, except that:
- regions that have neither been downloaded nor overwritten, if present,
contain garbage.
- the temporary file may be shorter than the represented file (it is never longer).
The latter's current size is stored in self.current_size.
This abstraction is mostly independent of SFTP. Consider moving it, if it is found
useful for other frontends."""
def __init__(self, download_size, tempfile_maker):
PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
if noisy: self.log(".__init__(%r, %r)" % (download_size, tempfile_maker), level=NOISY)
self.download_size = download_size
self.current_size = download_size
self.f = tempfile_maker()
self.downloaded = 0
self.milestones = [] # empty heap of (offset, d)
self.overwrites = [] # empty heap of (start, end)
self.is_closed = False
self.done = defer.Deferred()
self.done_status = None # None -> not complete, Failure -> download failed, str -> download succeeded
self.producer = None
def get_file(self):
return self.f
def get_current_size(self):
return self.current_size
def set_current_size(self, size):
if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
(size, self.current_size, self.downloaded), level=NOISY)
if size < self.current_size or size < self.downloaded:
self.f.truncate(size)
if size > self.current_size:
self.overwrite(self.current_size, "\x00" * (size - self.current_size))
self.current_size = size
# make the invariant self.download_size <= self.current_size be true again
if size < self.download_size:
self.download_size = size
if self.downloaded >= self.download_size:
self.download_done("size changed")
def registerProducer(self, p, streaming):
if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
if self.producer is not None:
raise RuntimeError("producer is already registered")
self.producer = p
if streaming:
# call resumeProducing once to start things off
p.resumeProducing()
else:
def _iterate():
if self.done_status is None:
p.resumeProducing()
eventually(_iterate)
_iterate()
def write(self, data):
if noisy: self.log(".write(<data of length %r>)" % (len(data),), level=NOISY)
if self.is_closed:
return
if self.downloaded >= self.download_size:
return
next_downloaded = self.downloaded + len(data)
if next_downloaded > self.download_size:
data = data[:(self.download_size - self.downloaded)]
while len(self.overwrites) > 0:
(start, end) = self.overwrites[0]
if start >= next_downloaded:
# This and all remaining overwrites are after the data we just downloaded.
break
if start > self.downloaded:
# The data we just downloaded has been partially overwritten.
# Write the prefix of it that precedes the overwritten region.
self.f.seek(self.downloaded)
self.f.write(data[:(start - self.downloaded)])
# This merges consecutive overwrites if possible, which allows us to detect the
# case where the download can be stopped early because the remaining region
# to download has already been fully overwritten.
heapq.heappop(self.overwrites)
while len(self.overwrites) > 0:
(start1, end1) = self.overwrites[0]
if start1 > end:
break
end = end1
heapq.heappop(self.overwrites)
if end >= next_downloaded:
# This overwrite extends past the downloaded data, so there is no
# more data to consider on this call.
heapq.heappush(self.overwrites, (next_downloaded, end))
self._update_downloaded(next_downloaded)
return
elif end >= self.downloaded:
data = data[(end - self.downloaded):]
self._update_downloaded(end)
self.f.seek(self.downloaded)
self.f.write(data)
self._update_downloaded(next_downloaded)
def _update_downloaded(self, new_downloaded):
self.downloaded = new_downloaded
milestone = new_downloaded
if len(self.overwrites) > 0:
(start, end) = self.overwrites[0]
if start <= new_downloaded and end > milestone:
milestone = end
while len(self.milestones) > 0:
(next, d) = self.milestones[0]
if next > milestone:
return
if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
heapq.heappop(self.milestones)
eventually_callback(d)("reached")
if milestone >= self.download_size:
self.download_done("reached download size")
def overwrite(self, offset, data):
if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
if self.is_closed:
self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD)
raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
if offset > self.current_size:
# Normally writing at an offset beyond the current end-of-file
# would leave a hole that appears filled with zeroes. However, an
# EncryptedTemporaryFile doesn't behave like that (if there is a
# hole in the file on disk, the zeroes that are read back will be
# XORed with the keystream). So we must explicitly write zeroes in
# the gap between the current EOF and the offset.
self.f.seek(self.current_size)
self.f.write("\x00" * (offset - self.current_size))
start = self.current_size
else:
self.f.seek(offset)
start = offset
self.f.write(data)
end = offset + len(data)
self.current_size = max(self.current_size, end)
if end > self.downloaded:
heapq.heappush(self.overwrites, (start, end))
def read(self, offset, length):
"""When the data has been read, callback the Deferred that we return with this data.
Otherwise errback the Deferred that we return.
The caller must perform no more overwrites until the Deferred has fired."""
if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
if self.is_closed:
self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD)
raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
# Note that the overwrite method is synchronous. When a write request is processed
# (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will
# be called and will update self.current_size if necessary before returning. Therefore,
# self.current_size will be up-to-date for a subsequent call to this read method, and
# so it is correct to do the check for a read past the end-of-file here.
if offset >= self.current_size:
def _eof(): raise EOFError("read past end of file")
return defer.execute(_eof)
if offset + length > self.current_size:
length = self.current_size - offset
if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY)
needed = min(offset + length, self.download_size)
# If we fail to reach the needed number of bytes, the read request will fail.
d = self.when_reached_or_failed(needed)
def _reached_in_read(res):
# It is not necessarily the case that self.downloaded >= needed, because
# the file might have been truncated (thus truncating the download) and
# then extended.
_assert(self.current_size >= offset + length,
current_size=self.current_size, offset=offset, length=length)
if noisy: self.log("_reached_in_read(%r), self.f = %r" % (res, self.f,), level=NOISY)
self.f.seek(offset)
return self.f.read(length)
d.addCallback(_reached_in_read)
return d
def when_reached_or_failed(self, index):
if noisy: self.log(".when_reached_or_failed(%r)" % (index,), level=NOISY)
def _reached(res):
if noisy: self.log("reached %r with result %r" % (index, res), level=NOISY)
return res
if self.done_status is not None:
return defer.execute(_reached, self.done_status)
if index <= self.downloaded: # already reached successfully
if noisy: self.log("already reached %r successfully" % (index,), level=NOISY)
return defer.succeed("already reached successfully")
d = defer.Deferred()
d.addCallback(_reached)
heapq.heappush(self.milestones, (index, d))
return d
def when_done(self):
d = defer.Deferred()
self.done.addCallback(lambda ign: eventually_callback(d)(self.done_status))
return d
def download_done(self, res):
_assert(isinstance(res, (str, Failure)), res=res)
# Only the first call to download_done counts, but we log subsequent calls
# (multiple calls are normal).
if self.done_status is not None:
self.log("IGNORING extra call to download_done with result %r; previous result was %r"
% (res, self.done_status), level=OPERATIONAL)
return
self.log("DONE with result %r" % (res,), level=OPERATIONAL)
# We avoid errbacking self.done so that we are not left with an 'Unhandled error in Deferred'
# in case when_done() is never called. Instead we stash the failure in self.done_status,
# from where the callback added in when_done() can retrieve it.
self.done_status = res
eventually_callback(self.done)(None)
while len(self.milestones) > 0:
(next, d) = self.milestones[0]
if noisy: self.log("MILESTONE FINISH %r %r %r" % (next, d, res), level=NOISY)
heapq.heappop(self.milestones)
# The callback means that the milestone has been reached if
# it is ever going to be. Note that the file may have been
# truncated to before the milestone.
eventually_callback(d)(res)
def close(self):
if not self.is_closed:
self.is_closed = True
try:
self.f.close()
except Exception, e:
self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
self.download_done("closed")
return self.done_status
def unregisterProducer(self):
# This will happen just before our client calls download_done, which will tell
# us the outcome of the download; we don't know the outcome at this point.
self.producer = None
self.log("producer unregistered", level=NOISY)
SIZE_THRESHOLD = 1000
class ShortReadOnlySFTPFile(PrefixingLogMixin):
implements(ISFTPFile)
"""I represent a file handle to a particular file on an SFTP connection.
I am used only for short immutable files opened in read-only mode.
When I am created, the file contents start to be downloaded to memory.
self.async is used to delay read requests until the download has finished."""
def __init__(self, userpath, filenode, metadata):
PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
precondition(isinstance(userpath, str) and IFileNode.providedBy(filenode),
userpath=userpath, filenode=filenode)
self.filenode = filenode
self.metadata = metadata
self.async = download_to_data(filenode)
self.closed = False
def readChunk(self, offset, length):
request = ".readChunk(%r, %r)" % (offset, length)
self.log(request, level=OPERATIONAL)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
return defer.execute(_closed)
d = defer.Deferred()
def _read(data):
if noisy: self.log("_read(<data of length %r>) in readChunk(%r, %r)" % (len(data), offset, length), level=NOISY)
# "In response to this request, the server will read as many bytes as it
# can from the file (up to 'len'), and return them in a SSH_FXP_DATA
# message. If an error occurs or EOF is encountered before reading any
# data, the server will respond with SSH_FXP_STATUS. For normal disk
# files, it is guaranteed that this will read the specified number of
# bytes, or up to end of file."
#
# i.e. we respond with an EOF error iff offset is already at EOF.
if offset >= len(data):
eventually_errback(d)(Failure(SFTPError(FX_EOF, "read at or past end of file")))
else:
eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data)
return data
self.async.addCallbacks(_read, eventually_errback(d))
d.addBoth(_convert_error, request)
return d
def writeChunk(self, offset, data):
self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
def close(self):
self.log(".close()", level=OPERATIONAL)
self.closed = True
return defer.succeed(None)
def getAttrs(self):
request = ".getAttrs()"
self.log(request, level=OPERATIONAL)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
return defer.execute(_closed)
d = defer.execute(_populate_attrs, self.filenode, self.metadata)
d.addBoth(_convert_error, request)
return d
def setAttrs(self, attrs):
self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
class GeneralSFTPFile(PrefixingLogMixin):
implements(ISFTPFile)
"""I represent a file handle to a particular file on an SFTP connection.
I wrap an instance of OverwriteableFileConsumer, which is responsible for
storing the file contents. In order to allow write requests to be satisfied
immediately, there is effectively a FIFO queue between requests made to this
file handle, and requests to my OverwriteableFileConsumer. This queue is
implemented by the callback chain of self.async.
When first constructed, I am in an 'unopened' state that causes most
operations to be delayed until 'open' is called."""
def __init__(self, userpath, flags, close_notify, convergence):
PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
(userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
precondition(isinstance(userpath, str), userpath=userpath)
self.userpath = userpath
self.flags = flags
self.close_notify = close_notify
self.convergence = convergence
self.async = defer.Deferred()
# Creating or truncating the file is a change, but if FXF_EXCL is set, a zero-length file has already been created.
self.has_changed = (flags & (FXF_CREAT | FXF_TRUNC)) and not (flags & FXF_EXCL)
self.closed = False
self.abandoned = False
self.parent = None
self.childname = None
self.filenode = None
self.metadata = None
# self.consumer should only be relied on in callbacks for self.async, since it might
# not be set before then.
self.consumer = None
def open(self, parent=None, childname=None, filenode=None, metadata=None):
self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
(parent, childname, filenode, metadata), level=OPERATIONAL)
precondition(isinstance(childname, (unicode, NoneType)), childname=childname)
precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode)
precondition(not self.closed, sftpfile=self)
# If the file has been renamed, the new (parent, childname) takes precedence.
if self.parent is None:
self.parent = parent
if self.childname is None:
self.childname = childname
self.filenode = filenode
self.metadata = metadata
tempfile_maker = EncryptedTemporaryFile
if (self.flags & FXF_TRUNC) or not filenode:
# We're either truncating or creating the file, so we don't need the old contents.
self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
self.consumer.download_done("download not needed")
else:
self.async.addCallback(lambda ignored: filenode.get_best_readable_version())
def _read(version):
if noisy: self.log("_read", level=NOISY)
download_size = version.get_size()
_assert(download_size is not None)
self.consumer = OverwriteableFileConsumer(download_size, tempfile_maker)
d = version.read(self.consumer, 0, None)
def _finished(res):
if not isinstance(res, Failure):
res = "download finished"
self.consumer.download_done(res)
d.addBoth(_finished)
# It is correct to drop d here.
self.async.addCallback(_read)
eventually_callback(self.async)(None)
if noisy: self.log("open done", level=NOISY)
return self
def get_userpath(self):
return self.userpath
def get_direntry(self):
return _direntry_for(self.parent, self.childname)
def rename(self, new_userpath, new_parent, new_childname):
self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
precondition(isinstance(new_userpath, str) and isinstance(new_childname, unicode),
new_userpath=new_userpath, new_childname=new_childname)
self.userpath = new_userpath
self.parent = new_parent
self.childname = new_childname
def abandon(self):
self.log(".abandon()", level=OPERATIONAL)
self.abandoned = True
def sync(self, ign=None):
# The ign argument allows some_file.sync to be used as a callback.
self.log(".sync()", level=OPERATIONAL)
d = defer.Deferred()
self.async.addBoth(eventually_callback(d))
def _done(res):
if noisy: self.log("_done(%r) in .sync()" % (res,), level=NOISY)
return res
d.addBoth(_done)
return d
def readChunk(self, offset, length):
request = ".readChunk(%r, %r)" % (offset, length)
self.log(request, level=OPERATIONAL)
if not (self.flags & FXF_READ):
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
return defer.execute(_denied)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
return defer.execute(_closed)
d = defer.Deferred()
def _read(ign):
if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
d2 = self.consumer.read(offset, length)
d2.addBoth(eventually_callback(d))
# It is correct to drop d2 here.
return None
self.async.addCallbacks(_read, eventually_errback(d))
d.addBoth(_convert_error, request)
return d
def writeChunk(self, offset, data):
self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
if not (self.flags & FXF_WRITE):
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
return defer.execute(_closed)
self.has_changed = True
# Note that we return without waiting for the write to occur. Reads and
# close wait for prior writes, and will fail if any prior operation failed.
# This is ok because SFTP makes no guarantee that the write completes
# before the request does. In fact it explicitly allows write errors to be
# delayed until close:
# "One should note that on some server platforms even a close can fail.
# This can happen e.g. if the server operating system caches writes,
# and an error occurs while flushing cached writes during the close."
def _write(ign):
if noisy: self.log("_write in .writeChunk(%r, <data of length %r>), current_size = %r" %
(offset, len(data), self.consumer.get_current_size()), level=NOISY)
# FXF_APPEND means that we should always write at the current end of file.
write_offset = offset
if self.flags & FXF_APPEND:
write_offset = self.consumer.get_current_size()
self.consumer.overwrite(write_offset, data)
if noisy: self.log("overwrite done", level=NOISY)
return None
self.async.addCallback(_write)
# don't addErrback to self.async, just allow subsequent async ops to fail.
return defer.succeed(None)
def _do_close(self, res, d=None):
if noisy: self.log("_do_close(%r)" % (res,), level=NOISY)
status = None
if self.consumer:
status = self.consumer.close()
# We must close_notify before re-firing self.async.
if self.close_notify:
self.close_notify(self.userpath, self.parent, self.childname, self)
if not isinstance(res, Failure) and isinstance(status, Failure):
res = status
if d:
eventually_callback(d)(res)
elif isinstance(res, Failure):
self.log("suppressing %r" % (res,), level=OPERATIONAL)
def close(self):
request = ".close()"
self.log(request, level=OPERATIONAL)
if self.closed:
return defer.succeed(None)
# This means that close has been called, not that the close has succeeded.
self.closed = True
if not (self.flags & (FXF_WRITE | FXF_CREAT)):
# We never fail a close of a handle opened only for reading, even if the file
# failed to download. (We could not do so deterministically, because it would
# depend on whether we reached the point of failure before abandoning the
# download.) Any reads that depended on file content that could not be downloaded
# will have failed. It is important that we don't close the consumer until
# previous read operations have completed.
self.async.addBoth(self._do_close)
return defer.succeed(None)
# We must capture the abandoned, parent, and childname variables synchronously
# at the close call. This is needed by the correctness arguments in the comments
# for _abandon_any_heisenfiles and _rename_heisenfiles.
# Note that the file must have been opened before it can be closed.
abandoned = self.abandoned
parent = self.parent
childname = self.childname
# has_changed is set when writeChunk is called, not when the write occurs, so
# it is correct to optimize out the commit if it is False at the close call.
has_changed = self.has_changed
def _commit(ign):
d2 = self.consumer.when_done()
if self.filenode and self.filenode.is_mutable():
self.log("update mutable file %r childname=%r metadata=%r"
% (self.filenode, childname, self.metadata), level=OPERATIONAL)
if self.metadata.get('no-write', False) and not self.filenode.is_readonly():
_assert(parent and childname, parent=parent, childname=childname, metadata=self.metadata)
d2.addCallback(lambda ign: parent.set_metadata_for(childname, self.metadata))
d2.addCallback(lambda ign: self.filenode.overwrite(MutableFileHandle(self.consumer.get_file())))
else:
def _add_file(ign):
self.log("_add_file childname=%r" % (childname,), level=OPERATIONAL)
u = FileHandle(self.consumer.get_file(), self.convergence)
return parent.add_file(childname, u, metadata=self.metadata)
d2.addCallback(_add_file)
return d2
# If the file has been abandoned, we don't want the close operation to get "stuck",
# even if self.async fails to re-fire. Completing the close independently of self.async
# in that case should ensure that dropping an ssh connection is sufficient to abandon
# any heisenfiles that were not explicitly closed in that connection.
if abandoned or not has_changed:
d = defer.succeed(None)
self.async.addBoth(self._do_close)
else:
d = defer.Deferred()
self.async.addCallback(_commit)
self.async.addBoth(self._do_close, d)
d.addBoth(_convert_error, request)
return d
def getAttrs(self):
request = ".getAttrs()"
self.log(request, level=OPERATIONAL)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
return defer.execute(_closed)
# Optimization for read-only handles, when we already know the metadata.
if not (self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable():
return defer.succeed(_populate_attrs(self.filenode, self.metadata))
d = defer.Deferred()
def _get(ign):
if noisy: self.log("_get(%r) in %r, filenode = %r, metadata = %r" % (ign, request, self.filenode, self.metadata), level=NOISY)
# self.filenode might be None, but that's ok.
attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size())
eventually_callback(d)(attrs)
return None
self.async.addCallbacks(_get, eventually_errback(d))
d.addBoth(_convert_error, request)
return d
def setAttrs(self, attrs, only_if_at=None):
request = ".setAttrs(%r, only_if_at=%r)" % (attrs, only_if_at)
self.log(request, level=OPERATIONAL)
if not (self.flags & FXF_WRITE):
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
return defer.execute(_closed)
size = attrs.get("size", None)
if size is not None and (not isinstance(size, (int, long)) or size < 0):
def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
return defer.execute(_bad)
d = defer.Deferred()
def _set(ign):
if noisy: self.log("_set(%r) in %r" % (ign, request), level=NOISY)
current_direntry = _direntry_for(self.parent, self.childname, self.filenode)
if only_if_at and only_if_at != current_direntry:
if noisy: self.log("not setting attributes: current_direntry=%r in %r" %
(current_direntry, request), level=NOISY)
return None
now = time()
self.metadata = update_metadata(self.metadata, _attrs_to_metadata(attrs), now)
if size is not None:
# TODO: should we refuse to truncate a file opened with FXF_APPEND?
# <http://allmydata.org/trac/tahoe-lafs/ticket/1037#comment:20>
self.consumer.set_current_size(size)
eventually_callback(d)(None)
return None
self.async.addCallbacks(_set, eventually_errback(d))
d.addBoth(_convert_error, request)
return d
class StoppableList:
def __init__(self, items):
self.items = items
def __iter__(self):
for i in self.items:
yield i
def close(self):
pass
class Reason:
def __init__(self, value):
self.value = value
# A "heisenfile" is a file that has been opened with write flags
# (FXF_WRITE and/or FXF_CREAT) and not yet close-notified.
# 'all_heisenfiles' maps from a direntry string to a list of
# GeneralSFTPFile.
#
# A direntry string is parent_write_uri + "/" + childname_utf8 for
# an immutable file, or file_write_uri for a mutable file.
# Updates to this dict are single-threaded.
all_heisenfiles = {}
def _reload():
global all_heisenfiles
all_heisenfiles = {}
class SFTPUserHandler(ConchUser, PrefixingLogMixin):
implements(ISFTPServer)
def __init__(self, client, rootnode, username):
ConchUser.__init__(self)
PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=username)
if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY)
self.channelLookup["session"] = session.SSHSession
self.subsystemLookup["sftp"] = FileTransferServer
self._client = client
self._root = rootnode
self._username = username
self._convergence = client.convergence
# maps from UTF-8 paths for this user, to files written and still open
self._heisenfiles = {}
def gotVersion(self, otherVersion, extData):
self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
# advertise the same extensions as the OpenSSH SFTP server
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
return {'posix-rename@openssh.com': '1',
'statvfs@openssh.com': '2',
'fstatvfs@openssh.com': '2',
}
def logout(self):
self.log(".logout()", level=OPERATIONAL)
for files in self._heisenfiles.itervalues():
for f in files:
f.abandon()
def _add_heisenfile_by_path(self, file):
self.log("._add_heisenfile_by_path(%r)" % (file,), level=OPERATIONAL)
userpath = file.get_userpath()
if userpath in self._heisenfiles:
self._heisenfiles[userpath] += [file]
else:
self._heisenfiles[userpath] = [file]
def _add_heisenfile_by_direntry(self, file):
self.log("._add_heisenfile_by_direntry(%r)" % (file,), level=OPERATIONAL)
direntry = file.get_direntry()
if direntry:
if direntry in all_heisenfiles:
all_heisenfiles[direntry] += [file]
else:
all_heisenfiles[direntry] = [file]
def _abandon_any_heisenfiles(self, userpath, direntry):
request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry)
self.log(request, level=OPERATIONAL)
precondition(isinstance(userpath, str), userpath=userpath)
# First we synchronously mark all heisenfiles matching the userpath or direntry
# as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
# each file that we abandoned.
#
# For each file, the call to .abandon() occurs:
# * before the file is closed, in which case it will never be committed
# (uploaded+linked or published); or
# * after it is closed but before it has been close_notified, in which case the
# .sync() ensures that it has been committed (successfully or not) before we
# return.
#
# This avoids a race that might otherwise cause the file to be committed after
# the remove operation has completed.
#
# We return a Deferred that fires with True if any files were abandoned (this
# does not mean that they were not committed; it is used to determine whether
# a NoSuchChildError from the attempt to delete the file should be suppressed).
files = []
if direntry in all_heisenfiles:
files = all_heisenfiles[direntry]
del all_heisenfiles[direntry]
if userpath in self._heisenfiles:
files += self._heisenfiles[userpath]
del self._heisenfiles[userpath]
if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
for f in files:
f.abandon()
d = defer.succeed(None)
for f in files:
d.addBoth(f.sync)
def _done(ign):
self.log("done %r" % (request,), level=OPERATIONAL)
return len(files) > 0
d.addBoth(_done)
return d
def _rename_heisenfiles(self, from_userpath, from_parent, from_childname,
to_userpath, to_parent, to_childname, overwrite=True):
request = ("._rename_heisenfiles(%r, %r, %r, %r, %r, %r, overwrite=%r)" %
(from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite))
self.log(request, level=OPERATIONAL)
precondition((isinstance(from_userpath, str) and isinstance(from_childname, unicode) and
isinstance(to_userpath, str) and isinstance(to_childname, unicode)),
from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname)
if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
# First we synchronously rename all heisenfiles matching the userpath or direntry.
# Then we .sync() each file that we renamed.
#
# For each file, the call to .rename occurs:
# * before the file is closed, in which case it will be committed at the
# new direntry; or
# * after it is closed but before it has been close_notified, in which case the
# .sync() ensures that it has been committed (successfully or not) before we
# return.
#
# This avoids a race that might otherwise cause the file to be committed at the
# old name after the rename operation has completed.
#
# Note that if overwrite is False, the caller should already have checked
# whether a real direntry exists at the destination. It is possible that another
# direntry (heisen or real) comes to exist at the destination after that check,
# but in that case it is correct for the rename to succeed (and for the commit
# of the heisenfile at the destination to possibly clobber the other entry, since
# that can happen anyway when we have concurrent write handles to the same direntry).
#
# We return a Deferred that fires with True if any files were renamed (this
# does not mean that they were not committed; it is used to determine whether
# a NoSuchChildError from the rename attempt should be suppressed). If overwrite
# is False and there were already heisenfiles at the destination userpath or
# direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED).
from_direntry = _direntry_for(from_parent, from_childname)
to_direntry = _direntry_for(to_parent, to_childname)
if noisy: self.log("from_direntry = %r, to_direntry = %r, len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" %
(from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
if noisy: self.log("existing", level=NOISY)
return defer.execute(_existing)
from_files = []
if from_direntry in all_heisenfiles:
from_files = all_heisenfiles[from_direntry]
del all_heisenfiles[from_direntry]
if from_userpath in self._heisenfiles:
from_files += self._heisenfiles[from_userpath]
del self._heisenfiles[from_userpath]
if noisy: self.log("from_files = %r in %r" % (from_files, request), level=NOISY)
for f in from_files:
f.rename(to_userpath, to_parent, to_childname)
self._add_heisenfile_by_path(f)
self._add_heisenfile_by_direntry(f)
d = defer.succeed(None)
for f in from_files:
d.addBoth(f.sync)
def _done(ign):
if noisy: self.log("done: len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" %
(len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
return len(from_files) > 0
d.addBoth(_done)
return d
def _update_attrs_for_heisenfiles(self, userpath, direntry, attrs):
request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs)
self.log(request, level=OPERATIONAL)
_assert(isinstance(userpath, str) and isinstance(direntry, str),
userpath=userpath, direntry=direntry)
files = []
if direntry in all_heisenfiles:
files = all_heisenfiles[direntry]
if userpath in self._heisenfiles:
files += self._heisenfiles[userpath]
if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
# We set the metadata for all heisenfiles at this path or direntry.
# Since a direntry includes a write URI, we must have authority to
# change the metadata of heisenfiles found in the all_heisenfiles dict.
# However that's not necessarily the case for heisenfiles found by
# path. Therefore we tell the setAttrs method of each file to only
# perform the update if the file is at the correct direntry.
d = defer.succeed(None)
for f in files:
d.addBoth(f.setAttrs, attrs, only_if_at=direntry)
def _done(ign):
self.log("done %r" % (request,), level=OPERATIONAL)
# TODO: this should not return True if only_if_at caused all files to be skipped.
return len(files) > 0
d.addBoth(_done)
return d
def _sync_heisenfiles(self, userpath, direntry, ignore=None):
request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore)
self.log(request, level=OPERATIONAL)
_assert(isinstance(userpath, str) and isinstance(direntry, (str, NoneType)),
userpath=userpath, direntry=direntry)
files = []
if direntry in all_heisenfiles:
files = all_heisenfiles[direntry]
if userpath in self._heisenfiles:
files += self._heisenfiles[userpath]
if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
d = defer.succeed(None)
for f in files:
if f is not ignore:
d.addBoth(f.sync)
def _done(ign):
self.log("done %r" % (request,), level=OPERATIONAL)
return None
d.addBoth(_done)
return d
def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
_assert(isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)),
userpath=userpath, childname=childname)
direntry = _direntry_for(parent, childname)
if direntry in all_heisenfiles:
all_old_files = all_heisenfiles[direntry]
all_new_files = [f for f in all_old_files if f is not file_to_remove]
if len(all_new_files) > 0:
all_heisenfiles[direntry] = all_new_files
else:
del all_heisenfiles[direntry]
if userpath in self._heisenfiles:
old_files = self._heisenfiles[userpath]
new_files = [f for f in old_files if f is not file_to_remove]
if len(new_files) > 0:
self._heisenfiles[userpath] = new_files
else:
del self._heisenfiles[userpath]
if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
def _make_file(self, existing_file, userpath, flags, parent=None, childname=None, filenode=None, metadata=None):
if noisy: self.log("._make_file(%r, %r, %r = %r, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
(existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
level=NOISY)
_assert((isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)) and
(metadata is None or 'no-write' in metadata)),
userpath=userpath, childname=childname, metadata=metadata)
writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
direntry = _direntry_for(parent, childname, filenode)
d = self._sync_heisenfiles(userpath, direntry, ignore=existing_file)
if not writing and (flags & FXF_READ) and filenode and not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
d.addCallback(lambda ign: ShortReadOnlySFTPFile(userpath, filenode, metadata))
else:
close_notify = None
if writing:
close_notify = self._remove_heisenfile
d.addCallback(lambda ign: existing_file or GeneralSFTPFile(userpath, flags, close_notify, self._convergence))
def _got_file(file):
file.open(parent=parent, childname=childname, filenode=filenode, metadata=metadata)
if writing:
self._add_heisenfile_by_direntry(file)
return file
d.addCallback(_got_file)
return d
def openFile(self, pathstring, flags, attrs, delay=None):
request = ".openFile(%r, %r = %r, %r, delay=%r)" % (pathstring, flags, _repr_flags(flags), attrs, delay)
self.log(request, level=OPERATIONAL)
# This is used for both reading and writing.
# First exclude invalid combinations of flags, and empty paths.
if not (flags & (FXF_READ | FXF_WRITE)):
def _bad_readwrite():
raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
return defer.execute(_bad_readwrite)
if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
def _bad_exclcreat():
raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
return defer.execute(_bad_exclcreat)
path = self._path_from_string(pathstring)
if not path:
def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
return defer.execute(_emptypath)
# The combination of flags is potentially valid.
# To work around clients that have race condition bugs, a getAttr, rename, or
# remove request following an 'open' request with FXF_WRITE or FXF_CREAT flags,
# should succeed even if the 'open' request has not yet completed. So we now
# synchronously add a file object into the self._heisenfiles dict, indexed
# by its UTF-8 userpath. (We can't yet add it to the all_heisenfiles dict,
# because we don't yet have a user-independent path for the file.) The file
# object does not know its filenode, parent, or childname at this point.
userpath = self._path_to_utf8(path)
if flags & (FXF_WRITE | FXF_CREAT):
file = GeneralSFTPFile(userpath, flags, self._remove_heisenfile, self._convergence)
self._add_heisenfile_by_path(file)
else:
# We haven't decided which file implementation to use yet.
file = None
desired_metadata = _attrs_to_metadata(attrs)
# Now there are two major cases:
#
# 1. The path is specified as /uri/FILECAP, with no parent directory.
# If the FILECAP is mutable and writeable, then we can open it in write-only
# or read/write mode (non-exclusively), otherwise we can only open it in
# read-only mode. The open should succeed immediately as long as FILECAP is
# a valid known filecap that grants the required permission.
#
# 2. The path is specified relative to a parent. We find the parent dirnode and
# get the child's URI and metadata if it exists. There are four subcases:
# a. the child does not exist: FXF_CREAT must be set, and we must be able
# to write to the parent directory.
# b. the child exists but is not a valid known filecap: fail
# c. the child is mutable: if we are trying to open it write-only or
# read/write, then we must be able to write to the file.
# d. the child is immutable: if we are trying to open it write-only or
# read/write, then we must be able to write to the parent directory.
#
# To reduce latency, open normally succeeds as soon as these conditions are
# met, even though there might be a failure in downloading the existing file
# or uploading a new one. However, there is an exception: if a file has been
# written, then closed, and is now being reopened, then we have to delay the
# open until the previous upload/publish has completed. This is necessary
# because sshfs does not wait for the result of an FXF_CLOSE message before
# reporting to the client that a file has been closed. It applies both to
# mutable files, and to directory entries linked to an immutable file.
#
# Note that the permission checks below are for more precise error reporting on
# the open call; later operations would fail even if we did not make these checks.
d = delay or defer.succeed(None)
d.addCallback(lambda ign: self._get_root(path))
def _got_root( (root, path) ):
if root.is_unknown():
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open an unknown cap (or child of an unknown object). "
"Upgrading the gateway to a later Tahoe-LAFS version may help")
if not path:
# case 1
if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
if not IFileNode.providedBy(root):
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open a directory cap")
if (flags & FXF_WRITE) and root.is_readonly():
raise SFTPError(FX_PERMISSION_DENIED,
"cannot write to a non-writeable filecap without a parent directory")
if flags & FXF_EXCL:
raise SFTPError(FX_FAILURE,
"cannot create a file exclusively when it already exists")
# The file does not need to be added to all_heisenfiles, because it is not
# associated with a directory entry that needs to be updated.
metadata = update_metadata(None, desired_metadata, time())
# We have to decide what to pass for the 'parent_readonly' argument to _no_write,
# given that we don't actually have a parent. This only affects the permissions
# reported by a getAttrs on this file handle in the case of an immutable file.
# We choose 'parent_readonly=True' since that will cause the permissions to be
# reported as r--r--r--, which is appropriate because an immutable file can't be
# written via this path.
metadata['no-write'] = _no_write(True, root)
return self._make_file(file, userpath, flags, filenode=root, metadata=metadata)
else:
# case 2
childname = path[-1]
if noisy: self.log("case 2: root = %r, childname = %r, desired_metadata = %r, path[:-1] = %r" %
(root, childname, desired_metadata, path[:-1]), level=NOISY)
d2 = root.get_child_at_path(path[:-1])
def _got_parent(parent):
if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
if parent.is_unknown():
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open a child of an unknown object. "
"Upgrading the gateway to a later Tahoe-LAFS version may help")
parent_readonly = parent.is_readonly()
d3 = defer.succeed(None)
if flags & FXF_EXCL:
# FXF_EXCL means that the link to the file (not the file itself) must
# be created atomically wrt updates by this storage client.
# That is, we need to create the link before returning success to the
# SFTP open request (and not just on close, as would normally be the
# case). We make the link initially point to a zero-length LIT file,
# which is consistent with what might happen on a POSIX filesystem.
if parent_readonly:
raise SFTPError(FX_FAILURE,
"cannot create a file exclusively when the parent directory is read-only")
# 'overwrite=False' ensures failure if the link already exists.
# FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
zero_length_lit = "URI:LIT:"
if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
(parent, zero_length_lit, childname), level=NOISY)
d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit,
metadata=desired_metadata, overwrite=False))
def _seturi_done(child):
if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY)
d4 = parent.get_metadata_for(childname)
d4.addCallback(lambda metadata: (child, metadata))
return d4
d3.addCallback(_seturi_done)
else:
if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
def _got_child( (filenode, current_metadata) ):
if noisy: self.log("_got_child( (%r, %r) )" % (filenode, current_metadata), level=NOISY)
metadata = update_metadata(current_metadata, desired_metadata, time())
# Ignore the permissions of the desired_metadata in an open call. The permissions
# can only be set by setAttrs.
metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata)
if filenode.is_unknown():
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open an unknown cap. Upgrading the gateway "
"to a later Tahoe-LAFS version may help")
if not IFileNode.providedBy(filenode):
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open a directory as if it were a file")
if (flags & FXF_WRITE) and metadata['no-write']:
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open a non-writeable file for writing")
return self._make_file(file, userpath, flags, parent=parent, childname=childname,
filenode=filenode, metadata=metadata)
def _no_child(f):
if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
f.trap(NoSuchChildError)
if not (flags & FXF_CREAT):
raise SFTPError(FX_NO_SUCH_FILE,
"the file does not exist, and was not opened with the creation (CREAT) flag")
if parent_readonly:
raise SFTPError(FX_PERMISSION_DENIED,
"cannot create a file when the parent directory is read-only")
return self._make_file(file, userpath, flags, parent=parent, childname=childname)
d3.addCallbacks(_got_child, _no_child)
return d3
d2.addCallback(_got_parent)
return d2
d.addCallback(_got_root)
def _remove_on_error(err):
if file:
self._remove_heisenfile(userpath, None, None, file)
return err
d.addErrback(_remove_on_error)
d.addBoth(_convert_error, request)
return d
def renameFile(self, from_pathstring, to_pathstring, overwrite=False):
request = ".renameFile(%r, %r)" % (from_pathstring, to_pathstring)
self.log(request, level=OPERATIONAL)
from_path = self._path_from_string(from_pathstring)
to_path = self._path_from_string(to_pathstring)
from_userpath = self._path_to_utf8(from_path)
to_userpath = self._path_to_utf8(to_path)
# the target directory must already exist
d = deferredutil.gatherResults([self._get_parent_or_node(from_path),
self._get_parent_or_node(to_path)])
def _got( (from_pair, to_pair) ):
if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r, overwrite=%r)" %
(from_pair, to_pair, from_pathstring, to_pathstring, overwrite), level=NOISY)
(from_parent, from_childname) = from_pair
(to_parent, to_childname) = to_pair
if from_childname is None:
raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
if to_childname is None:
raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
# "It is an error if there already exists a file with the name specified
# by newpath."
# OpenSSH's SFTP server returns FX_PERMISSION_DENIED for this error.
#
# For the standard SSH_FXP_RENAME operation, overwrite=False.
# We also support the posix-rename@openssh.com extension, which uses overwrite=True.
d2 = defer.succeed(None)
if not overwrite:
d2.addCallback(lambda ign: to_parent.get(to_childname))
def _expect_fail(res):
if not isinstance(res, Failure):
raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
# It is OK if we fail for errors other than NoSuchChildError, since that probably
# indicates some problem accessing the destination directory.
res.trap(NoSuchChildError)
d2.addBoth(_expect_fail)
# If there are heisenfiles to be written at the 'from' direntry, then ensure
# they will now be written at the 'to' direntry instead.
d2.addCallback(lambda ign:
self._rename_heisenfiles(from_userpath, from_parent, from_childname,
to_userpath, to_parent, to_childname, overwrite=overwrite))
def _move(renamed):
# FIXME: use move_child_to_path to avoid possible data loss due to #943
#d3 = from_parent.move_child_to_path(from_childname, to_root, to_path, overwrite=overwrite)
d3 = from_parent.move_child_to(from_childname, to_parent, to_childname, overwrite=overwrite)
def _check(err):
if noisy: self.log("_check(%r) in .renameFile(%r, %r, overwrite=%r)" %
(err, from_pathstring, to_pathstring, overwrite), level=NOISY)
if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
return None
if not overwrite and err.check(ExistingChildError):
raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
return err
d3.addBoth(_check)
return d3
d2.addCallback(_move)
return d2
d.addCallback(_got)
d.addBoth(_convert_error, request)
return d
def makeDirectory(self, pathstring, attrs):
request = ".makeDirectory(%r, %r)" % (pathstring, attrs)
self.log(request, level=OPERATIONAL)
path = self._path_from_string(pathstring)
metadata = _attrs_to_metadata(attrs)
if 'no-write' in metadata:
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
return defer.execute(_denied)
d = self._get_root(path)
d.addCallback(lambda (root, path):
self._get_or_create_directories(root, path, metadata))
d.addBoth(_convert_error, request)
return d
def _get_or_create_directories(self, node, path, metadata):
if not IDirectoryNode.providedBy(node):
# TODO: provide the name of the blocking file in the error message.
def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
"is a file in the way") # close enough
return defer.execute(_blocked)
if not path:
return defer.succeed(node)
d = node.get(path[0])
def _maybe_create(f):
f.trap(NoSuchChildError)
return node.create_subdirectory(path[0])
d.addErrback(_maybe_create)
d.addCallback(self._get_or_create_directories, path[1:], metadata)
return d
def removeFile(self, pathstring):
request = ".removeFile(%r)" % (pathstring,)
self.log(request, level=OPERATIONAL)
path = self._path_from_string(pathstring)
d = self._remove_object(path, must_be_file=True)
d.addBoth(_convert_error, request)
return d
def removeDirectory(self, pathstring):
request = ".removeDirectory(%r)" % (pathstring,)
self.log(request, level=OPERATIONAL)
path = self._path_from_string(pathstring)
d = self._remove_object(path, must_be_directory=True)
d.addBoth(_convert_error, request)
return d
def _remove_object(self, path, must_be_directory=False, must_be_file=False):
userpath = self._path_to_utf8(path)
d = self._get_parent_or_node(path)
def _got_parent( (parent, childname) ):
if childname is None:
raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
direntry = _direntry_for(parent, childname)
d2 = defer.succeed(False)
if not must_be_directory:
d2.addCallback(lambda ign: self._abandon_any_heisenfiles(userpath, direntry))
d2.addCallback(lambda abandoned:
parent.delete(childname, must_exist=not abandoned,
must_be_directory=must_be_directory, must_be_file=must_be_file))
return d2
d.addCallback(_got_parent)
return d
def openDirectory(self, pathstring):
request = ".openDirectory(%r)" % (pathstring,)
self.log(request, level=OPERATIONAL)
path = self._path_from_string(pathstring)
d = self._get_parent_or_node(path)
def _got_parent_or_node( (parent_or_node, childname) ):
if noisy: self.log("_got_parent_or_node( (%r, %r) ) in openDirectory(%r)" %
(parent_or_node, childname, pathstring), level=NOISY)
if childname is None:
return parent_or_node
else:
return parent_or_node.get(childname)
d.addCallback(_got_parent_or_node)
def _list(dirnode):
if dirnode.is_unknown():
raise SFTPError(FX_PERMISSION_DENIED,
"cannot list an unknown cap as a directory. Upgrading the gateway "
"to a later Tahoe-LAFS version may help")
if not IDirectoryNode.providedBy(dirnode):
raise SFTPError(FX_PERMISSION_DENIED,
"cannot list a file as if it were a directory")
d2 = dirnode.list()
def _render(children):
parent_readonly = dirnode.is_readonly()
results = []
for filename, (child, metadata) in children.iteritems():
# The file size may be cached or absent.
metadata['no-write'] = _no_write(parent_readonly, child, metadata)
attrs = _populate_attrs(child, metadata)
filename_utf8 = filename.encode('utf-8')
longname = _lsLine(filename_utf8, attrs)
results.append( (filename_utf8, longname, attrs) )
return StoppableList(results)
d2.addCallback(_render)
return d2
d.addCallback(_list)
d.addBoth(_convert_error, request)
return d
def getAttrs(self, pathstring, followLinks):
request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks)
self.log(request, level=OPERATIONAL)
# When asked about a specific file, report its current size.
# TODO: the modification time for a mutable file should be
# reported as the update time of the best version. But that
# information isn't currently stored in mutable shares, I think.
path = self._path_from_string(pathstring)
userpath = self._path_to_utf8(path)
d = self._get_parent_or_node(path)
def _got_parent_or_node( (parent_or_node, childname) ):
if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
# Some clients will incorrectly try to get the attributes
# of a file immediately after opening it, before it has been put
# into the all_heisenfiles table. This is a race condition bug in
# the client, but we handle it anyway by calling .sync() on all
# files matching either the path or the direntry.
direntry = _direntry_for(parent_or_node, childname)
d2 = self._sync_heisenfiles(userpath, direntry)
if childname is None:
node = parent_or_node
d2.addCallback(lambda ign: node.get_current_size())
d2.addCallback(lambda size:
_populate_attrs(node, {'no-write': node.is_unknown() or node.is_readonly()}, size=size))
else:
parent = parent_or_node
d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname]))
def _got( (child, metadata) ):
if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY)
_assert(IDirectoryNode.providedBy(parent), parent=parent)
metadata['no-write'] = _no_write(parent.is_readonly(), child, metadata)
d3 = child.get_current_size()
d3.addCallback(lambda size: _populate_attrs(child, metadata, size=size))
return d3
def _nosuch(err):
if noisy: self.log("_nosuch(%r)" % (err,), level=NOISY)
err.trap(NoSuchChildError)
if noisy: self.log("checking open files:\nself._heisenfiles = %r\nall_heisenfiles = %r\ndirentry=%r" %
(self._heisenfiles, all_heisenfiles, direntry), level=NOISY)
if direntry in all_heisenfiles:
files = all_heisenfiles[direntry]
if len(files) == 0: # pragma: no cover
return err
# use the heisenfile that was most recently opened
return files[-1].getAttrs()
return err
d2.addCallbacks(_got, _nosuch)
return d2
d.addCallback(_got_parent_or_node)
d.addBoth(_convert_error, request)
return d
def setAttrs(self, pathstring, attrs):
request = ".setAttrs(%r, %r)" % (pathstring, attrs)
self.log(request, level=OPERATIONAL)
if "size" in attrs:
# this would require us to download and re-upload the truncated/extended
# file contents
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
return defer.execute(_unsupported)
path = self._path_from_string(pathstring)
userpath = self._path_to_utf8(path)
d = self._get_parent_or_node(path)
def _got_parent_or_node( (parent_or_node, childname) ):
if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
direntry = _direntry_for(parent_or_node, childname)
d2 = self._update_attrs_for_heisenfiles(userpath, direntry, attrs)
def _update(updated_heisenfiles):
if childname is None:
if updated_heisenfiles:
return None
raise SFTPError(FX_NO_SUCH_FILE, userpath)
else:
desired_metadata = _attrs_to_metadata(attrs)
if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY)
d3 = parent_or_node.set_metadata_for(childname, desired_metadata)
def _nosuch(err):
if updated_heisenfiles:
err.trap(NoSuchChildError)
else:
return err
d3.addErrback(_nosuch)
return d3
d2.addCallback(_update)
d2.addCallback(lambda ign: None)
return d2
d.addCallback(_got_parent_or_node)
d.addBoth(_convert_error, request)
return d
def readLink(self, pathstring):
self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
return defer.execute(_unsupported)
def makeLink(self, linkPathstring, targetPathstring):
self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
# If this is implemented, note the reversal of arguments described in point 7 of
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
return defer.execute(_unsupported)
def extendedRequest(self, extensionName, extensionData):
self.log(".extendedRequest(%r, <data of length %r>)" % (extensionName, len(extensionData)), level=OPERATIONAL)
# We implement the three main OpenSSH SFTP extensions; see
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
if extensionName == 'posix-rename@openssh.com':
def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
if 4 > len(extensionData): return defer.execute(_bad)
(fromPathLen,) = struct.unpack('>L', extensionData[0:4])
if 8 + fromPathLen > len(extensionData): return defer.execute(_bad)
(toPathLen,) = struct.unpack('>L', extensionData[(4 + fromPathLen):(8 + fromPathLen)])
if 8 + fromPathLen + toPathLen != len(extensionData): return defer.execute(_bad)
fromPathstring = extensionData[4:(4 + fromPathLen)]
toPathstring = extensionData[(8 + fromPathLen):]
d = self.renameFile(fromPathstring, toPathstring, overwrite=True)
# Twisted conch assumes that the response from an extended request is either
# an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
# (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
def _succeeded(ign):
raise SFTPError(FX_OK, "request succeeded")
d.addCallback(_succeeded)
return d
if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com':
# f_bsize and f_frsize should be the same to avoid a bug in 'df'
return defer.succeed(struct.pack('>11Q',
1024, # uint64 f_bsize /* file system block size */
1024, # uint64 f_frsize /* fundamental fs block size */
628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */
314159265, # uint64 f_bfree /* free blocks in file system */
314159265, # uint64 f_bavail /* free blocks for non-root */
200000000, # uint64 f_files /* total file inodes */
100000000, # uint64 f_ffree /* free file inodes */
100000000, # uint64 f_favail /* free file inodes for non-root */
0x1AF5, # uint64 f_fsid /* file system id */
2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */
65535, # uint64 f_namemax /* maximum filename length */
))
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
(extensionName, len(extensionData)))
return defer.execute(_unsupported)
def realPath(self, pathstring):
self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
return self._path_to_utf8(self._path_from_string(pathstring))
def _path_to_utf8(self, path):
return (u"/" + u"/".join(path)).encode('utf-8')
def _path_from_string(self, pathstring):
if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
_assert(isinstance(pathstring, str), pathstring=pathstring)
# The home directory is the root directory.
pathstring = pathstring.strip("/")
if pathstring == "" or pathstring == ".":
path_utf8 = []
else:
path_utf8 = pathstring.split("/")
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
# "Servers SHOULD interpret a path name component ".." as referring to
# the parent directory, and "." as referring to the current directory."
path = []
for p_utf8 in path_utf8:
if p_utf8 == "..":
# ignore excess .. components at the root
if len(path) > 0:
path = path[:-1]
elif p_utf8 != ".":
try:
p = p_utf8.decode('utf-8', 'strict')
except UnicodeError:
raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
path.append(p)
if noisy: self.log(" PATH %r" % (path,), level=NOISY)
return path
def _get_root(self, path):
# return Deferred (root, remaining_path)
d = defer.succeed(None)
if path and path[0] == u"uri":
d.addCallback(lambda ign: self._client.create_node_from_uri(path[1].encode('utf-8')))
d.addCallback(lambda root: (root, path[2:]))
else:
d.addCallback(lambda ign: (self._root, path))
return d
def _get_parent_or_node(self, path):
# return Deferred (parent, childname) or (node, None)
d = self._get_root(path)
def _got_root( (root, remaining_path) ):
if not remaining_path:
return (root, None)
else:
d2 = root.get_child_at_path(remaining_path[:-1])
d2.addCallback(lambda parent: (parent, remaining_path[-1]))
return d2
d.addCallback(_got_root)
return d
class FakeTransport:
implements(ITransport)
def write(self, data):
logmsg("FakeTransport.write(<data of length %r>)" % (len(data),), level=NOISY)
def writeSequence(self, data):
logmsg("FakeTransport.writeSequence(...)", level=NOISY)
def loseConnection(self):
logmsg("FakeTransport.loseConnection()", level=NOISY)
# getPeer and getHost can just raise errors, since we don't know what to return
class ShellSession(PrefixingLogMixin):
implements(ISession)
def __init__(self, userHandler):
PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
if noisy: self.log(".__init__(%r)" % (userHandler), level=NOISY)
def getPty(self, terminal, windowSize, attrs):
self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL)
def openShell(self, protocol):
self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL)
if hasattr(protocol, 'transport') and protocol.transport is None:
protocol.transport = FakeTransport() # work around Twisted bug
return self._unsupported(protocol)
def execCommand(self, protocol, cmd):
self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL)
if hasattr(protocol, 'transport') and protocol.transport is None:
protocol.transport = FakeTransport() # work around Twisted bug
d = defer.succeed(None)
if cmd == "df -P -k /":
d.addCallback(lambda ign: protocol.write(
"Filesystem 1024-blocks Used Available Capacity Mounted on\r\n"
"tahoe 628318530 314159265 314159265 50% /\r\n"))
d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessDone(None))))
else:
d.addCallback(lambda ign: self._unsupported(protocol))
return d
def _unsupported(self, protocol):
d = defer.succeed(None)
d.addCallback(lambda ign: protocol.errReceived(
"This server supports only the SFTP protocol. It does not support SCP,\r\n"
"interactive shell sessions, or commands other than one needed by sshfs.\r\n"))
d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1))))
return d
def windowChanged(self, newWindowSize):
self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL)
def eofReceived(self):
self.log(".eofReceived()", level=OPERATIONAL)
def closed(self):
self.log(".closed()", level=OPERATIONAL)
# If you have an SFTPUserHandler and want something that provides ISession, you get
# ShellSession(userHandler).
# We use adaptation because this must be a different object to the SFTPUserHandler.
components.registerAdapter(ShellSession, SFTPUserHandler, ISession)
from allmydata.frontends.auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
class Dispatcher:
implements(portal.IRealm)
def __init__(self, client):
self._client = client
def requestAvatar(self, avatarID, mind, interface):
_assert(interface == IConchUser, interface=interface)
rootnode = self._client.create_node_from_uri(avatarID.rootcap)
handler = SFTPUserHandler(self._client, rootnode, avatarID.username)
return (interface, handler, handler.logout)
class SFTPServer(service.MultiService):
def __init__(self, client, accountfile, accounturl,
sftp_portstr, pubkey_file, privkey_file):
precondition(isinstance(accountfile, (unicode, NoneType)), accountfile)
precondition(isinstance(pubkey_file, unicode), pubkey_file)
precondition(isinstance(privkey_file, unicode), privkey_file)
service.MultiService.__init__(self)
r = Dispatcher(client)
p = portal.Portal(r)
if accountfile:
c = AccountFileChecker(self, accountfile)
p.registerChecker(c)
if accounturl:
c = AccountURLChecker(self, accounturl)
p.registerChecker(c)
if not accountfile and not accounturl:
# we could leave this anonymous, with just the /uri/CAP form
raise NeedRootcapLookupScheme("must provide an account file or URL")
pubkey = keys.Key.fromFile(pubkey_file.encode(get_filesystem_encoding()))
privkey = keys.Key.fromFile(privkey_file.encode(get_filesystem_encoding()))
class SSHFactory(factory.SSHFactory):
publicKeys = {pubkey.sshType(): pubkey}
privateKeys = {privkey.sshType(): privkey}
def getPrimes(self):
try:
# if present, this enables diffie-hellman-group-exchange
return primes.parseModuliFile("/etc/ssh/moduli")
except IOError:
return None
f = SSHFactory()
f.portal = p
s = strports.service(sftp_portstr, f)
s.setServiceParent(self)
|
tecwebjoao/TecWeb-TF-2T-B-SI
|
refs/heads/master
|
venv/Lib/encodings/cp932.py
|
817
|
#
# cp932.py: Python Unicode Codec for CP932
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('cp932')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='cp932',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
AltraMayor/XIA-for-Linux
|
refs/heads/xia
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
LTD-Beget/sprutio
|
refs/heads/master
|
app/modules/sftp/actions/files/rename.py
|
2
|
from core import FM
class RenameFile(FM.BaseAction):
def __init__(self, request, source_path, target_path, session, **kwargs):
super(RenameFile, self).__init__(request=request, **kwargs)
self.source_path = source_path
self.target_path = target_path
self.session = session
def run(self):
request = self.get_rpc_request()
result = request.request('sftp/rename_file', login=self.request.get_current_user(),
password=self.request.get_current_password(), source_path=self.source_path,
target_path=self.target_path, session=self.session)
answer = self.process_result(result)
return answer
|
caot/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnboundLocalVariableInspection/DecoratorAndParameter.py
|
83
|
def f(x):
def d(f):
return f
@d #pass
def g(d):
return d
return g(x)
|
Gateswong/GatesMusicPet
|
refs/heads/master
|
music_pet/services/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from . import vgmdb
|
Unow/edx-platform
|
refs/heads/master
|
common/lib/chem/chem/tests.py
|
68
|
import codecs
from fractions import Fraction
import unittest
from .chemcalc import (compare_chemical_expression, divide_chemical_expression,
render_to_html, chemical_equations_equal)
import miller
local_debug = None
def log(s, output_type=None):
if local_debug:
print s
if output_type == 'html':
f.write(s + '\n<br>\n')
class Test_Compare_Equations(unittest.TestCase):
def test_simple_equation(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> H2O2'))
# left sides don't match
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + 2H2 -> H2O2'))
# right sides don't match
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> H2O'))
# factors don't match
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> 2H2O2'))
def test_different_factor(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(chemical_equations_equal('2H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
def test_different_arrows(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 <-> 2H2O2'))
def test_exact_match(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2', exact=True))
# order still doesn't matter
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> H2O2', exact=True))
def test_syntax_errors(self):
self.assertFalse(chemical_equations_equal('H2 + O2 a-> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(chemical_equations_equal('H2O( -> H2O2',
'H2O -> H2O2'))
self.assertFalse(chemical_equations_equal('H2 + O2 ==> H2O2', # strange arrow
'2O2 + 2H2 -> 2H2O2'))
class Test_Compare_Expressions(unittest.TestCase):
def test_compare_incorrect_order_of_atoms_in_molecule(self):
self.assertFalse(compare_chemical_expression("H2O + CO2", "O2C + OH2"))
def test_compare_same_order_no_phases_no_factors_no_ions(self):
self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2+H2O"))
def test_compare_different_order_no_phases_no_factors_no_ions(self):
self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2 + H2O"))
def test_compare_different_order_three_multimolecule(self):
self.assertTrue(compare_chemical_expression("H2O + Fe(OH)3 + CO2", "CO2 + H2O + Fe(OH)3"))
def test_compare_same_factors(self):
self.assertTrue(compare_chemical_expression("3H2O + 2CO2", "2CO2 + 3H2O "))
def test_compare_different_factors(self):
self.assertFalse(compare_chemical_expression("2H2O + 3CO2", "2CO2 + 3H2O "))
def test_compare_correct_ions(self):
self.assertTrue(compare_chemical_expression("H^+ + OH^-", " OH^- + H^+ "))
def test_compare_wrong_ions(self):
self.assertFalse(compare_chemical_expression("H^+ + OH^-", " OH^- + H^- "))
def test_compare_parent_groups_ions(self):
self.assertTrue(compare_chemical_expression("Fe(OH)^2- + (OH)^-", " (OH)^- + Fe(OH)^2- "))
def test_compare_correct_factors_ions_and_one(self):
self.assertTrue(compare_chemical_expression("3H^+ + 2OH^-", " 2OH^- + 3H^+ "))
def test_compare_wrong_factors_ions(self):
self.assertFalse(compare_chemical_expression("2H^+ + 3OH^-", " 2OH^- + 3H^+ "))
def test_compare_float_factors(self):
self.assertTrue(compare_chemical_expression("7/2H^+ + 3/5OH^-", " 3/5OH^- + 7/2H^+ "))
# Phases tests
def test_compare_phases_ignored(self):
self.assertTrue(compare_chemical_expression(
"H2O(s) + CO2", "H2O+CO2", ignore_state=True))
def test_compare_phases_not_ignored_explicitly(self):
self.assertFalse(compare_chemical_expression(
"H2O(s) + CO2", "H2O+CO2", ignore_state=False))
def test_compare_phases_not_ignored(self): # same as previous
self.assertFalse(compare_chemical_expression(
"H2O(s) + CO2", "H2O+CO2"))
def test_compare_phases_not_ignored_explicitly(self):
self.assertTrue(compare_chemical_expression(
"H2O(s) + CO2", "H2O(s)+CO2", ignore_state=False))
# all in one cases
def test_complex_additivity(self):
self.assertTrue(compare_chemical_expression(
"5(H1H212)^70010- + 2H20 + 7/2HCl + H2O",
"7/2HCl + 2H20 + H2O + 5(H1H212)^70010-"))
def test_complex_additivity_wrong(self):
self.assertFalse(compare_chemical_expression(
"5(H1H212)^70010- + 2H20 + 7/2HCl + H2O",
"2H20 + 7/2HCl + H2O + 5(H1H212)^70011-"))
def test_complex_all_grammar(self):
self.assertTrue(compare_chemical_expression(
"5[Ni(NH3)4]^2+ + 5/2SO4^2-",
"5/2SO4^2- + 5[Ni(NH3)4]^2+"))
# special cases
def test_compare_one_superscript_explicitly_set(self):
self.assertTrue(compare_chemical_expression("H^+ + OH^1-", " OH^- + H^+ "))
def test_compare_equal_factors_differently_set(self):
self.assertTrue(compare_chemical_expression("6/2H^+ + OH^-", " OH^- + 3H^+ "))
def test_compare_one_subscript_explicitly_set(self):
self.assertFalse(compare_chemical_expression("H2 + CO2", "H2 + C102"))
class Test_Divide_Expressions(unittest.TestCase):
''' as compare_ use divide_,
tests here must consider different
division (not equality) cases '''
def test_divide_by_zero(self):
self.assertFalse(divide_chemical_expression(
"0H2O", "H2O"))
def test_divide_wrong_factors(self):
self.assertFalse(divide_chemical_expression(
"5(H1H212)^70010- + 10H2O", "5H2O + 10(H1H212)^70010-"))
def test_divide_right(self):
self.assertEqual(divide_chemical_expression(
"5(H1H212)^70010- + 10H2O", "10H2O + 5(H1H212)^70010-"), 1)
def test_divide_wrong_reagents(self):
self.assertFalse(divide_chemical_expression(
"H2O + CO2", "CO2"))
def test_divide_right_simple(self):
self.assertEqual(divide_chemical_expression(
"H2O + CO2", "H2O+CO2"), 1)
def test_divide_right_phases(self):
self.assertEqual(divide_chemical_expression(
"H2O(s) + CO2", "2H2O(s)+2CO2"), Fraction(1, 2))
def test_divide_right_phases_other_order(self):
self.assertEqual(divide_chemical_expression(
"2H2O(s) + 2CO2", "H2O(s)+CO2"), 2)
def test_divide_wrong_phases(self):
self.assertFalse(divide_chemical_expression(
"H2O(s) + CO2", "2H2O+2CO2(s)"))
def test_divide_wrong_phases_but_phases_ignored(self):
self.assertEqual(divide_chemical_expression(
"H2O(s) + CO2", "2H2O+2CO2(s)", ignore_state=True), Fraction(1, 2))
def test_divide_order(self):
self.assertEqual(divide_chemical_expression(
"2CO2 + H2O", "2H2O+4CO2"), Fraction(1, 2))
def test_divide_fract_to_int(self):
self.assertEqual(divide_chemical_expression(
"3/2CO2 + H2O", "2H2O+3CO2"), Fraction(1, 2))
def test_divide_fract_to_frac(self):
self.assertEqual(divide_chemical_expression(
"3/4CO2 + H2O", "2H2O+9/6CO2"), Fraction(1, 2))
def test_divide_fract_to_frac_wrog(self):
self.assertFalse(divide_chemical_expression(
"6/2CO2 + H2O", "2H2O+9/6CO2"), 2)
class Test_Render_Equations(unittest.TestCase):
def test_render1(self):
s = "H2O + CO2"
out = render_to_html(s)
correct = u'<span class="math">H<sub>2</sub>O+CO<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_uncorrect_reaction(self):
s = "O2C + OH2"
out = render_to_html(s)
correct = u'<span class="math">O<sub>2</sub>C+OH<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render2(self):
s = "CO2 + H2O + Fe(OH)3"
out = render_to_html(s)
correct = u'<span class="math">CO<sub>2</sub>+H<sub>2</sub>O+Fe(OH)<sub>3</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render3(self):
s = "3H2O + 2CO2"
out = render_to_html(s)
correct = u'<span class="math">3H<sub>2</sub>O+2CO<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render4(self):
s = "H^+ + OH^-"
out = render_to_html(s)
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render5(self):
s = "Fe(OH)^2- + (OH)^-"
out = render_to_html(s)
correct = u'<span class="math">Fe(OH)<sup>2-</sup>+(OH)<sup>-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render6(self):
s = "7/2H^+ + 3/5OH^-"
out = render_to_html(s)
correct = u'<span class="math"><sup>7</sup>⁄<sub>2</sub>H<sup>+</sup>+<sup>3</sup>⁄<sub>5</sub>OH<sup>-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render7(self):
s = "5(H1H212)^70010- + 2H2O + 7/2HCl + H2O"
out = render_to_html(s)
correct = u'<span class="math">5(H<sub>1</sub>H<sub>212</sub>)<sup>70010-</sup>+2H<sub>2</sub>O+<sup>7</sup>⁄<sub>2</sub>HCl+H<sub>2</sub>O</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render8(self):
s = "H2O(s) + CO2"
out = render_to_html(s)
correct = u'<span class="math">H<sub>2</sub>O(s)+CO<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render9(self):
s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-"
out = render_to_html(s)
correct = u'<span class="math">5[Ni(NH<sub>3</sub>)<sub>4</sub>]<sup>2+</sup>+<sup>5</sup>⁄<sub>2</sub>SO<sub>4</sub><sup>2-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_error(self):
s = "5.2H20"
out = render_to_html(s)
correct = u'<span class="math"><span class="inline-error inline">5.2H20</span></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_simple_brackets(self):
s = "(Ar)"
out = render_to_html(s)
correct = u'<span class="math">(Ar)</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_eq1(self):
s = "H^+ + OH^- -> H2O"
out = render_to_html(s)
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup>\u2192H<sub>2</sub>O</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_eq2(self):
s = "H^+ + OH^- <-> H2O"
out = render_to_html(s)
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup>\u2194H<sub>2</sub>O</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_eq3(self):
s = "H^+ + OH^- <= H2O" # unsupported arrow
out = render_to_html(s)
correct = u'<span class="math"><span class="inline-error inline">H^+ + OH^- <= H2O</span></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
class Test_Crystallography_Miller(unittest.TestCase):
''' Tests for crystallography grade function.'''
def test_empty_points(self):
user_input = '{"lattice": "bcc", "points": []}'
self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_only_one_point(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_only_two_points(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_1(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"], ["0.00", "0.00", "0.50"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_2(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,1)', 'lattice': 'bcc'}))
def test_3(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.50", "1.00"], ["1.00", "1.00", "0.50"], ["0.50", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_4(self):
user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.664", "0.00"], ["0.00", "1.00", "0.33"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-3, 3, -3)', 'lattice': 'bcc'}))
def test_5(self):
""" return true only in case points coordinates are exact.
But if they transform to closest 0.05 value it is not true"""
user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.33", "0.00"], ["0.00", "1.00", "0.33"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(-6,3,-6)', 'lattice': 'bcc'}))
def test_6(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.25", "0.00"], ["0.25", "0.00", "0.00"], ["0.00", "0.00", "0.25"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(4,4,4)', 'lattice': 'bcc'}))
def test_7(self): # goes throug origin
user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "0.00", "0.00"], ["0.50", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,0,-1)', 'lattice': 'bcc'}))
def test_8(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.50"], ["1.00", "0.00", "0.50"], ["0.50", "1.00", "0.50"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,0,2)', 'lattice': 'bcc'}))
def test_9(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "1.00", "1.00"], ["1.00", "0.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,0)', 'lattice': 'bcc'}))
def test_10(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "0.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'}))
def test_11(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,1,2)', 'lattice': 'bcc'}))
def test_12(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["0.00", "0.00", "0.50"], ["1.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,1,-2)', 'lattice': 'bcc'}))
def test_13(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.50", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,0,1)', 'lattice': 'bcc'}))
def test_14(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "0.00", "1.00"], ["0.50", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,-1,0)', 'lattice': 'bcc'}))
def test_15(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'}))
def test_16(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'}))
def test_17(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "1.00"], ["1.00", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,1)', 'lattice': 'bcc'}))
def test_18(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'}))
def test_19(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,0)', 'lattice': 'bcc'}))
def test_20(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,0,1)', 'lattice': 'bcc'}))
def test_21(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-1,0,1)', 'lattice': 'bcc'}))
def test_22(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,1,1)', 'lattice': 'bcc'}))
def test_23(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,1)', 'lattice': 'bcc'}))
def test_24(self):
user_input = '{"lattice": "bcc", "points": [["0.66", "0.00", "0.00"], ["0.00", "0.66", "0.00"], ["0.00", "0.00", "0.66"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'bcc'}))
def test_25(self):
user_input = u'{"lattice":"","points":[["0.00","0.00","0.01"],["1.00","1.00","0.01"],["0.00","1.00","1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': ''}))
def test_26(self):
user_input = u'{"lattice":"","points":[["0.00","0.01","0.00"],["1.00","0.00","0.00"],["0.00","0.00","1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,0)', 'lattice': ''}))
def test_27(self):
""" rounding to 0.35"""
user_input = u'{"lattice":"","points":[["0.33","0.00","0.00"],["0.00","0.33","0.00"],["0.00","0.00","0.33"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': ''}))
def test_28(self):
""" rounding to 0.30"""
user_input = u'{"lattice":"","points":[["0.30","0.00","0.00"],["0.00","0.30","0.00"],["0.00","0.00","0.30"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(10,10,10)', 'lattice': ''}))
def test_wrong_lattice(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'fcc'}))
def suite():
testcases = [Test_Compare_Expressions,
Test_Divide_Expressions,
Test_Render_Equations,
Test_Crystallography_Miller]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
local_debug = True
with codecs.open('render.html', 'w', encoding='utf-8') as f:
unittest.TextTestRunner(verbosity=2).run(suite())
# open render.html to look at rendered equations
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/plotly/validators/splom/marker/colorbar/_tickfont.py
|
2
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="splom.marker.colorbar", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
|
inoshiro/symposion-demo
|
refs/heads/master
|
symposion_project/apps/symposion/review/management/commands/calculate_results.py
|
5
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group
from symposion.review.models import ProposalResult
class Command(BaseCommand):
def handle(self, *args, **options):
ProposalResult.full_calculate()
|
factorlibre/OCB
|
refs/heads/8.0
|
addons/base_action_rule/base_action_rule.py
|
55
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
import logging
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
DATE_RANGE_FUNCTION = {
'minutes': lambda interval: relativedelta(minutes=interval),
'hour': lambda interval: relativedelta(hours=interval),
'day': lambda interval: relativedelta(days=interval),
'month': lambda interval: relativedelta(months=interval),
False: lambda interval: relativedelta(0),
}
def get_datetime(date_str):
'''Return a datetime from a date string or a datetime string'''
# complete date time if date_str contains only a date
if ' ' not in date_str:
date_str = date_str + " 00:00:00"
return datetime.strptime(date_str, DEFAULT_SERVER_DATETIME_FORMAT)
class base_action_rule(osv.osv):
""" Base Action Rules """
_name = 'base.action.rule'
_description = 'Action Rules'
_order = 'sequence'
_columns = {
'name': fields.char('Rule Name', required=True),
'model_id': fields.many2one('ir.model', 'Related Document Model',
required=True, domain=[('osv_memory', '=', False)]),
'model': fields.related('model_id', 'model', type="char", string='Model'),
'create_date': fields.datetime('Create Date', readonly=1),
'active': fields.boolean('Active',
help="When unchecked, the rule is hidden and will not be executed."),
'sequence': fields.integer('Sequence',
help="Gives the sequence order when displaying a list of rules."),
'kind': fields.selection(
[('on_create', 'On Creation'),
('on_write', 'On Update'),
('on_create_or_write', 'On Creation & Update'),
('on_time', 'Based on Timed Condition')],
string='When to Run'),
'trg_date_id': fields.many2one('ir.model.fields', string='Trigger Date',
help="When should the condition be triggered. If present, will be checked by the scheduler. If empty, will be checked at creation and update.",
domain="[('model_id', '=', model_id), ('ttype', 'in', ('date', 'datetime'))]"),
'trg_date_range': fields.integer('Delay after trigger date',
help="Delay after the trigger date." \
"You can put a negative number if you need a delay before the" \
"trigger date, like sending a reminder 15 minutes before a meeting."),
'trg_date_range_type': fields.selection([('minutes', 'Minutes'), ('hour', 'Hours'),
('day', 'Days'), ('month', 'Months')], 'Delay type'),
'trg_date_calendar_id': fields.many2one(
'resource.calendar', 'Use Calendar',
help='When calculating a day-based timed condition, it is possible to use a calendar to compute the date based on working days.',
ondelete='set null',
),
'act_user_id': fields.many2one('res.users', 'Set Responsible'),
'act_followers': fields.many2many("res.partner", string="Add Followers"),
'server_action_ids': fields.many2many('ir.actions.server', string='Server Actions',
domain="[('model_id', '=', model_id)]",
help="Examples: email reminders, call object service, etc."),
'filter_pre_id': fields.many2one('ir.filters', string='Before Update Filter',
ondelete='restrict',
domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before the update of the record."),
'filter_id': fields.many2one('ir.filters', string='Filter',
ondelete='restrict',
domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before executing the action rule."),
'last_run': fields.datetime('Last Run', readonly=1, copy=False),
}
_defaults = {
'active': True,
'trg_date_range_type': 'day',
}
def onchange_kind(self, cr, uid, ids, kind, context=None):
clear_fields = []
if kind in ['on_create', 'on_create_or_write']:
clear_fields = ['filter_pre_id', 'trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind in ['on_write', 'on_create_or_write']:
clear_fields = ['trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind == 'on_time':
clear_fields = ['filter_pre_id']
return {'value': dict.fromkeys(clear_fields, False)}
def _get_eval_context(self, cr, uid, context=None):
""" Prepare the context used when evaluating python code
:returns: dict -- evaluation context given to (safe_)eval """
return {
'time': time,
'user': self.pool['res.users'].browse(cr, uid, uid, context=context),
}
def _filter(self, cr, uid, action, action_filter, record_ids, context=None):
""" filter the list record_ids that satisfy the action filter """
eval_context = self._get_eval_context(cr, uid, context=context)
if record_ids and action_filter:
assert action.model == action_filter.model_id, "Filter model different from action rule model"
model = self.pool[action_filter.model_id]
domain = [('id', 'in', record_ids)] + eval(action_filter.domain, eval_context)
ctx = dict(context or {})
ctx.update(eval(action_filter.context))
record_ids = model.search(cr, uid, domain, context=ctx)
return record_ids
def _process(self, cr, uid, action, record_ids, context=None):
""" process the given action on the records """
model = self.pool[action.model_id.model]
# modify records
values = {}
if 'date_action_last' in model._fields:
values['date_action_last'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if action.act_user_id and 'user_id' in model._fields:
values['user_id'] = action.act_user_id.id
if values:
model.write(cr, uid, record_ids, values, context=context)
if action.act_followers and hasattr(model, 'message_subscribe'):
follower_ids = map(int, action.act_followers)
model.message_subscribe(cr, uid, record_ids, follower_ids, context=context)
# execute server actions
if action.server_action_ids:
server_action_ids = map(int, action.server_action_ids)
for record in model.browse(cr, uid, record_ids, context):
action_server_obj = self.pool.get('ir.actions.server')
ctx = dict(context, active_model=model._name, active_ids=[record.id], active_id=record.id)
action_server_obj.run(cr, uid, server_action_ids, context=ctx)
return True
def _register_hook(self, cr, ids=None):
""" Wrap the methods `create` and `write` of the models specified by
the rules given by `ids` (or all existing rules if `ids` is `None`.)
"""
#
# Note: the patched methods create and write must be defined inside
# another function, otherwise their closure may be wrong. For instance,
# the function create refers to the outer variable 'create', which you
# expect to be bound to create itself. But that expectation is wrong if
# create is defined inside a loop; in that case, the variable 'create'
# is bound to the last function defined by the loop.
#
def make_create():
""" instanciate a create method that processes action rules """
def create(self, cr, uid, vals, context=None, **kwargs):
# avoid loops or cascading actions
if context and context.get('action'):
return create.origin(self, cr, uid, vals, context=context, **kwargs)
# call original method with a modified context
context = dict(context or {}, action=True)
new_id = create.origin(self, cr, uid, vals, context=context, **kwargs)
# as it is a new record, we do not consider the actions that have a prefilter
action_model = self.pool.get('base.action.rule')
action_dom = [('model', '=', self._name),
('kind', 'in', ['on_create', 'on_create_or_write'])]
action_ids = action_model.search(cr, uid, action_dom, context=dict(context, active_test=True))
# check postconditions, and execute actions on the records that satisfy them
for action in action_model.browse(cr, uid, action_ids, context=context):
if action_model._filter(cr, uid, action, action.filter_id, [new_id], context=context):
action_model._process(cr, uid, action, [new_id], context=context)
return new_id
return create
def make_write():
""" instanciate a write method that processes action rules """
def write(self, cr, uid, ids, vals, context=None, **kwargs):
# avoid loops or cascading actions
if context and context.get('action'):
return write.origin(self, cr, uid, ids, vals, context=context, **kwargs)
# modify context
context = dict(context or {}, action=True)
ids = [ids] if isinstance(ids, (int, long, str)) else ids
# retrieve the action rules to possibly execute
action_model = self.pool.get('base.action.rule')
action_dom = [('model', '=', self._name),
('kind', 'in', ['on_write', 'on_create_or_write'])]
action_ids = action_model.search(cr, uid, action_dom, context=context)
actions = action_model.browse(cr, uid, action_ids, context=context)
# check preconditions
pre_ids = {}
for action in actions:
pre_ids[action] = action_model._filter(cr, uid, action, action.filter_pre_id, ids, context=context)
# call original method
write.origin(self, cr, uid, ids, vals, context=context, **kwargs)
# check postconditions, and execute actions on the records that satisfy them
for action in actions:
post_ids = action_model._filter(cr, uid, action, action.filter_id, pre_ids[action], context=context)
if post_ids:
action_model._process(cr, uid, action, post_ids, context=context)
return True
return write
updated = False
if ids is None:
ids = self.search(cr, SUPERUSER_ID, [])
for action_rule in self.browse(cr, SUPERUSER_ID, ids):
model = action_rule.model_id.model
model_obj = self.pool.get(model)
if model_obj and not hasattr(model_obj, 'base_action_ruled'):
# monkey-patch methods create and write
model_obj._patch_method('create', make_create())
model_obj._patch_method('write', make_write())
model_obj.base_action_ruled = True
updated = True
return updated
def _update_cron(self, cr, uid, context=None):
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'base_action_rule', 'ir_cron_crm_action', context=context)
except ValueError:
return False
return cron.toggle(model=self._name, domain=[('kind', '=', 'on_time')])
def create(self, cr, uid, vals, context=None):
res_id = super(base_action_rule, self).create(cr, uid, vals, context=context)
if self._register_hook(cr, [res_id]):
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
self._update_cron(cr, uid, context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
super(base_action_rule, self).write(cr, uid, ids, vals, context=context)
if self._register_hook(cr, ids):
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
self._update_cron(cr, uid, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
res = super(base_action_rule, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
data = {'model': False, 'filter_pre_id': False, 'filter_id': False}
if model_id:
model = self.pool.get('ir.model').browse(cr, uid, model_id, context=context)
data.update({'model': model.model})
return {'value': data}
def _check_delay(self, cr, uid, action, record, record_dt, context=None):
if action.trg_date_calendar_id and action.trg_date_range_type == 'day':
start_dt = get_datetime(record_dt)
action_dt = self.pool['resource.calendar'].schedule_days_get_date(
cr, uid, action.trg_date_calendar_id.id, action.trg_date_range,
day_date=start_dt, compute_leaves=True, context=context
)
else:
delay = DATE_RANGE_FUNCTION[action.trg_date_range_type](action.trg_date_range)
action_dt = get_datetime(record_dt) + delay
return action_dt
def _check(self, cr, uid, automatic=False, use_new_cursor=False, context=None):
""" This Function is called by scheduler. """
context = context or {}
# retrieve all the action rules to run based on a timed condition
action_dom = [('kind', '=', 'on_time')]
action_ids = self.search(cr, uid, action_dom, context=dict(context, active_test=True))
eval_context = self._get_eval_context(cr, uid, context=context)
for action in self.browse(cr, uid, action_ids, context=context):
now = datetime.now()
if action.last_run:
last_run = get_datetime(action.last_run)
else:
last_run = datetime.utcfromtimestamp(0)
# retrieve all the records that satisfy the action's condition
model = self.pool[action.model_id.model]
domain = []
ctx = dict(context)
if action.filter_id:
domain = eval(action.filter_id.domain, eval_context)
ctx.update(eval(action.filter_id.context))
if 'lang' not in ctx:
# Filters might be language-sensitive, attempt to reuse creator lang
# as we are usually running this as super-user in background
[filter_meta] = action.filter_id.get_metadata()
user_id = filter_meta['write_uid'] and filter_meta['write_uid'][0] or \
filter_meta['create_uid'][0]
ctx['lang'] = self.pool['res.users'].browse(cr, uid, user_id).lang
record_ids = model.search(cr, uid, domain, context=ctx)
# determine when action should occur for the records
date_field = action.trg_date_id.name
if date_field == 'date_action_last' and 'create_date' in model._fields:
get_record_dt = lambda record: record[date_field] or record.create_date
else:
get_record_dt = lambda record: record[date_field]
# process action on the records that should be executed
for record in model.browse(cr, uid, record_ids, context=context):
record_dt = get_record_dt(record)
if not record_dt:
continue
action_dt = self._check_delay(cr, uid, action, record, record_dt, context=context)
if last_run <= action_dt < now:
try:
context = dict(context or {}, action=True)
self._process(cr, uid, action, [record.id], context=context)
except Exception:
import traceback
_logger.error(traceback.format_exc())
action.write({'last_run': now.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
if automatic:
# auto-commit for batch processing
cr.commit()
|
Chilledheart/chromium
|
refs/heads/master
|
chrome/test/ispy/server/update_mask_handler.py
|
100
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Request Handler to allow test mask updates."""
import webapp2
import re
import sys
import os
from common import constants
from common import image_tools
from common import ispy_utils
import gs_bucket
class UpdateMaskHandler(webapp2.RequestHandler):
"""Request handler to allow test mask updates."""
def post(self):
"""Accepts post requests.
This method will accept a post request containing device, site and
device_id parameters. This method takes the diff of the run
indicated by it's parameters and adds it to the mask of the run's
test. It will then delete the run it is applied to and redirect
to the device list view.
"""
test_run = self.request.get('test_run')
expectation = self.request.get('expectation')
# Short-circuit if a parameter is missing.
if not (test_run and expectation):
self.response.headers['Content-Type'] = 'json/application'
self.response.write(json.dumps(
{'error': '\'test_run\' and \'expectation\' must be '
'supplied to update a mask.'}))
return
# Otherwise, set up the utilities.
self.bucket = gs_bucket.GoogleCloudStorageBucket(constants.BUCKET)
self.ispy = ispy_utils.ISpyUtils(self.bucket)
# Short-circuit if the failure does not exist.
if not self.ispy.FailureExists(test_run, expectation):
self.response.headers['Content-Type'] = 'json/application'
self.response.write(json.dumps(
{'error': 'Could not update mask because failure does not exist.'}))
return
# Get the failure namedtuple (which also computes the diff).
failure = self.ispy.GetFailure(test_run, expectation)
# Upload the new mask in place of the original.
self.ispy.UpdateImage(
ispy_utils.GetExpectationPath(expectation, 'mask.png'),
image_tools.ConvertDiffToMask(failure.diff))
# Now that there is no diff for the two images, remove the failure.
self.ispy.RemoveFailure(test_run, expectation)
# Redirect back to the sites list for the test run.
self.redirect('/?test_run=%s' % test_run)
|
ChinaMassClouds/copenstack-server
|
refs/heads/master
|
openstack/src/horizon-2014.2/openstack_dashboard/dashboards/admin/hostmonitor/views.py
|
1
|
from horizon import tables
from openstack_dashboard.dashboards.admin.hostmonitor import tables as hostmonitor_tables
from openstack_dashboard.openstack.common.requestapi import RequestApi
from openstack_dashboard.openstack.common.dictutils import DictList2ObjectList
from horizon import exceptions
from django.utils.translation import ugettext_lazy as _
class IndexView(tables.MultiTableView):
template_name = 'admin/hostmonitor/index.html'
table_classes = (hostmonitor_tables.HostAlarmTable,
hostmonitor_tables.HostPolicyTable)
def get_host_alarm_data(self):
return DictList2ObjectList([{'host_name':'host_name',
'alarm_content':'alarm_content',
'status':'status'
}])
def get_host_policy_data(self):
return DictList2ObjectList([{'name':'name',
'data_center':'data_center',
'status':'new'
}])
|
Studijacob/MIALab
|
refs/heads/master
|
wd/mainLeer.py
|
1
|
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import SimpleITK as sitk
import sys
import os
import time
def command_iteration(method) :
print("{0:3} = {1:10.5f} : {2}".format(method.GetOptimizerIteration(),
method.GetMetricValue(),
method.GetOptimizerPosition()))
def RSGD():
outputFile = 'RSGD.txt'
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMeanSquares()
R.SetOptimizerAsRegularStepGradientDescent(4.0, .001, 200)
R.SetInitialTransform(sitk.TranslationTransform(fixed.GetDimension()))
R.SetInterpolator(sitk.sitkLinear)
return outputFile, R
def GDLS(fixed, moving):
outputFile = 'GDLS.txt'
fixed = sitk.Normalize(fixed)
fixed = sitk.DiscreteGaussian(fixed, 2.0)
moving = sitk.Normalize(moving)
moving = sitk.DiscreteGaussian(moving, 2.0)
R = sitk.ImageRegistrationMethod()
R.SetMetricAsJointHistogramMutualInformation()
R.SetOptimizerAsGradientDescentLineSearch(learningRate=1.0,
numberOfIterations=200,
convergenceMinimumValue=1e-5,
convergenceWindowSize=5)
R.SetInitialTransform(sitk.TranslationTransform(fixed.GetDimension()))
R.SetInterpolator(sitk.sitkLinear)
return outputFile, fixed, moving, R
def corr_RSGD(fixed, moving):
outputFile = 'corr_RSGD.txt'
R = sitk.ImageRegistrationMethod()
R.SetMetricAsCorrelation()
R.SetOptimizerAsRegularStepGradientDescent(learningRate=2.0,
minStep=1e-4,
numberOfIterations=500,
gradientMagnitudeTolerance=1e-8)
R.SetOptimizerScalesFromIndexShift()
tx = sitk.CenteredTransformInitializer(fixed, moving, sitk.Similarity3DTransform())
R.SetInitialTransform(tx)
R.SetInterpolator(sitk.sitkLinear)
return outputFile, R
def MR_MMI_GD(fixed_image, moving_image):
outputFile = 'MR_MMI_GD.txt'
# initial alignment of the two volumes
transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, sitk.Similarity3DTransform(),sitk.CenteredTransformInitializerFilter.GEOMETRY)
# multi-resolution rigid registration using Mutual Information
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
R.SetMetricSamplingStrategy(R.RANDOM)
R.SetMetricSamplingPercentage(0.01)
R.SetInterpolator(sitk.sitkLinear)
R.SetOptimizerAsRegularStepGradientDescent(learningRate=1.0,
minStep=0.0001,
numberOfIterations=200,
relaxationFactor=0.7,
gradientMagnitudeTolerance=1e-4,
estimateLearningRate=R.EachIteration,
maximumStepSizeInPhysicalUnits=0.0)
R.SetOptimizerScalesFromPhysicalShift()
R.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
R.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
R.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
R.SetInitialTransform(transform)
return outputFile, R
def MMI_RSGD():
outputFile = 'MMI_RSGD.txt'
numberOfBins = 24
samplingPercentage = 0.10
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(numberOfBins)
R.SetMetricSamplingPercentage(samplingPercentage, sitk.sitkWallClock)
R.SetMetricSamplingStrategy(R.RANDOM)
R.SetOptimizerAsRegularStepGradientDescent(1.0, .001, 200)
R.SetInitialTransform(sitk.TranslationTransform(fixed.GetDimension()))
R.SetInterpolator(sitk.sitkLinear)
return outputFile, R
fixed = sitk.ReadImage('./test/mni_icbm152_t1_tal_nlin_sym_09a.nii.gz', sitk.sitkFloat32)
moving = sitk.ReadImage('./test/100307/T1native.nii.gz', sitk.sitkFloat32)
# different registration and metric systems
# outputFile, R = RSGD() # Total exection time: 32.13047194480896s
# outputFile, fixed, moving, R = GDLS(fixed, moving) # Total exection time: 219.74626207351685s
# outputFile, R = corr_RSGD(fixed, moving) # Total exection time: 199.60729265213013s
outputFile, R = MR_MMI_GD(fixed, moving)
# outputFile, R = MMI_RSGD() # Total exection time: 7.378397226333618s
R.AddCommand( sitk.sitkIterationEvent, lambda: command_iteration(R) )
# We'll meausure the execution time
start = time.time()
outTx = R.Execute(fixed, moving)
exec_time = time.time()-start
print('Total exection time: {}s'.format(exec_time))
print("-------")
print(outTx)
print("Optimizer stop condition: {0}".format(R.GetOptimizerStopConditionDescription()))
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
print(" Metric value: {0}".format(R.GetMetricValue()))
sitk.WriteTransform(outTx, outputFile)
# no scaling -> maybe no transformation applyed before saving??
registered_image = sitk.Resample(moving, fixed, outTx, sitk.sitkLinear, 0.0, moving.GetPixelIDValue())
sitk.WriteImage(registered_image, 'myRegistred2.nii.gz')
T2_native = sitk.ReadImage('./test/100307/T2native.nii.gz', sitk.sitkFloat32)
registered_image_T2 = sitk.Resample(T2_native, fixed, outTx, sitk.sitkLinear, 0.0, moving.GetPixelIDValue())
sitk.WriteImage(registered_image_T2, 'myRegistred2_T2.nii.gz')
if ( not "SITK_NOSHOW" in os.environ ):
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed);
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(100)
resampler.SetTransform(outTx)
|
arjoly/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_cluster_comparison.py
|
246
|
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
ingenioustechie/zamboni
|
refs/heads/master
|
mkt/operators/tests/test_management.py
|
19
|
from cStringIO import StringIO
from nose.tools import eq_, ok_
from django.core.management import call_command
from django.core.management.base import CommandError
import mkt.site.tests
from mkt.constants.carriers import TELEFONICA, AMERICA_MOVIL
from mkt.constants.regions import BRA, FRA
from mkt.operators.models import OperatorPermission
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
class TestCommand(mkt.site.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
self.email = self.user.email
self.buff = StringIO()
def tearDown(self):
if not self.buff.closed:
self.buff.close()
def call(self, *args, **opts):
call_command('operator_user', stdout=self.buff, *args, **opts)
self.buff.seek(0)
return self.buff
def test_invalid_command(self):
with self.assertRaises(CommandError):
self.call('foo')
def add(self, email, carrier, region):
self.call('add', email, carrier, region)
def test_add(self):
eq_(OperatorPermission.objects.all().count(), 0)
self.add(self.email, TELEFONICA.slug, BRA.slug)
qs = OperatorPermission.objects.all()
eq_(qs.count(), 1)
eq_(qs[0].user, self.user)
eq_(qs[0].carrier, TELEFONICA.id)
eq_(qs[0].region, BRA.id)
def test_add_dupe(self):
self.add(self.email, TELEFONICA.slug, BRA.slug)
with self.assertRaises(CommandError):
self.add(self.email, TELEFONICA.slug, BRA.slug)
def test_add_invalid_user(self):
with self.assertRaises(CommandError):
self.add('foo@bar.com', TELEFONICA.slug, BRA.slug)
def test_add_invalid_carrier(self):
with self.assertRaises(CommandError):
self.add(self.email, 'foocarrier', BRA.slug)
def test_add_invalid_region(self):
with self.assertRaises(CommandError):
self.add(self.email, TELEFONICA.slug, 'fooregion')
def test_add_bad_args(self):
with self.assertRaises(CommandError):
self.call('add', self.email)
with self.assertRaises(CommandError):
self.call('add', self.email, TELEFONICA.slug, BRA.slug, 'foo')
def remove(self, email, carrier=None, region=None, all=False):
self.call('remove', email, carrier, region, remove_all=all)
def test_remove(self):
self.add(self.email, TELEFONICA.slug, BRA.slug)
self.remove(self.email, TELEFONICA.slug, BRA.slug)
eq_(OperatorPermission.objects.all().count(), 0)
def test_remove_nonexistant(self):
with self.assertRaises(CommandError):
self.remove(self.email, TELEFONICA.slug, BRA.slug)
def test_remove_invalid_user(self):
with self.assertRaises(CommandError):
self.remove('foo@bar.com', TELEFONICA.slug, BRA.slug)
def test_remove_invalid_carrier(self):
with self.assertRaises(CommandError):
self.remove(self.email, 'foocarrier', BRA.slug)
def test_remove_invalid_region(self):
with self.assertRaises(CommandError):
self.remove(self.email, TELEFONICA.slug, 'fooregion')
def test_remove_bad_args(self):
with self.assertRaises(CommandError):
self.call('remove', self.email)
with self.assertRaises(CommandError):
self.call('remove', self.email, TELEFONICA.slug, BRA.slug, 'foo')
def test_remove_all(self):
self.add(self.email, TELEFONICA.slug, BRA.slug)
self.add(self.email, AMERICA_MOVIL.slug, FRA.slug)
self.remove(self.email, all=True)
eq_(OperatorPermission.objects.all().count(), 0)
def test_remove_all_nonexistant(self):
with self.assertRaises(CommandError):
self.remove(self.email, all=True)
def test_remove_all_invalid_email(self):
with self.assertRaises(CommandError):
self.remove('foo@bar.com', all=True)
def list(self, email):
return self.call('list', email)
def test_list(self):
pairs = [
[TELEFONICA.slug, BRA.slug],
[AMERICA_MOVIL.slug, FRA.slug],
]
for carrier, region in pairs:
self.add(self.email, carrier, region)
output = self.list(self.email).read()
for carrier, region in pairs:
ok_(output.find('%s/%s' % (region, carrier)) >= 0)
def test_list_invalid_email(self):
with self.assertRaises(CommandError):
self.list('foo@bar.com')
|
dilipbobby/DataScience
|
refs/heads/master
|
Python3/Level-1/posorneg.py
|
1
|
_author__ = "Dilipbobby"
#taking the input from the user
num = float(input("Enter a number: "))
#using if else if condtion for checking out a number is postive or equal to zero or negative
if num > 0:
print("{0} is a positive number".format(num))
elif num == 0:
print("{0} is zero".format(num))
else:
print("{0} is negative number".format(num))
|
sachinkum/Bal-Aveksha
|
refs/heads/master
|
WebServer/BalAvekshaEnv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
ooici/marine-integrations
|
refs/heads/master
|
mi/dataset/driver/WFP_ENG/STC_IMODEM/test/test_driver.py
|
1
|
"""
@package mi.dataset.driver.WFP_ENG.STC_IMODEM.test.test_driver
@file marine-integrations/mi/dataset/driver/WFP_ENG/STC_IMODEM/driver.py
@author Emily Hahn
@brief Test cases for WFP_ENG__STC_IMODEM driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import os
from nose.plugins.attrib import attr
from mi.idk.config import Config
from mi.core.log import get_logger
log = get_logger()
from mi.idk.exceptions import SampleTimeout
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.driver.WFP_ENG.STC_IMODEM.driver import WFP_ENG__STC_IMODEM_DataSetDriver, DataTypeKey
from mi.dataset.parser.wfp_eng__stc_imodem_particles import DataParticleType
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStatusRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStartRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemEngineeringRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStatusTelemeteredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStartTelemeteredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemEngineeringTelemeteredDataParticle
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi', 'dataset', 'driver', 'WFP_ENG', 'STC_IMODEM', 'resource')
RECOV_DIR = '/tmp/dsatest_rec'
TELEM_DIR = '/tmp/dsatest_tel'
RECOV_FILE_ONE = 'E00000001.DAT'
RECOV_FILE_TWO = 'E00000002.DAT'
TELEM_FILE_ONE = 'E00000001.DAT'
TELEM_FILE_TWO = 'E00000002.DAT'
# Fill in driver details
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.WFP_ENG.STC_IMODEM.driver',
driver_class='WFP_ENG__STC_IMODEM_DataSetDriver',
agent_resource_id='123xyz',
agent_name='Agent007',
agent_packet_config=WFP_ENG__STC_IMODEM_DataSetDriver.stream_config(),
startup_config={
DataSourceConfigKey.RESOURCE_ID: 'wfp_eng__stc_imodem',
DataSourceConfigKey.HARVESTER:
{
DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED:
{
DataSetDriverConfigKeys.DIRECTORY: RECOV_DIR,
DataSetDriverConfigKeys.PATTERN: 'E*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED:
{
DataSetDriverConfigKeys.DIRECTORY: TELEM_DIR,
DataSetDriverConfigKeys.PATTERN: 'E*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
},
DataSourceConfigKey.PARSER: {
DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED: {},
DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED: {},
}
}
)
RECOV_PARTICLES = (WfpEngStcImodemStatusRecoveredDataParticle,
WfpEngStcImodemStartRecoveredDataParticle,
WfpEngStcImodemEngineeringRecoveredDataParticle)
TELEM_PARTICLES = (WfpEngStcImodemStatusTelemeteredDataParticle,
WfpEngStcImodemStartTelemeteredDataParticle,
WfpEngStcImodemEngineeringTelemeteredDataParticle)
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def test_get(self):
"""
Test that we can get data from files. Verify that the driver
sampling can be started and stopped
"""
# Clear any existing sampling
self.clear_sample_data()
# Clear the asynchronous callback results
self.clear_async_data()
# Notify the driver to start sampling
self.driver.start_sampling()
# Test simple telemetered data handling
self.create_sample_data_set_dir('telemetered_one.dat', TELEM_DIR, TELEM_FILE_ONE)
self.assert_data(TELEM_PARTICLES, 'telemetered.one.yml', count=2, timeout=10)
# # Test simple recovered data handling
self.create_sample_data_set_dir('recovered_one.dat', RECOV_DIR, RECOV_FILE_ONE)
self.assert_data(RECOV_PARTICLES, 'recovered.one.yml', count=2, timeout=10)
def test_stop_resume(self):
"""
Test the ability to stop and restart the process
"""
# Clear any existing sampling
self.clear_sample_data()
path_1 = self.create_sample_data_set_dir('first.DAT', RECOV_DIR, RECOV_FILE_ONE)
path_2 = self.create_sample_data_set_dir('second.DAT', RECOV_DIR, RECOV_FILE_TWO)
path_3 = self.create_sample_data_set_dir('first.DAT', TELEM_DIR, TELEM_FILE_ONE)
path_4 = self.create_sample_data_set_dir('second.DAT', TELEM_DIR, TELEM_FILE_TWO)
key_rec = DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED
key_tel = DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED
# Set the state of the driver to the prior state altered to have ingested the first recovered
# data file fully, not ingested the second recovered data file, and to have not returned the fifth
# telemetered data particle in the original version of the telemetered data file
state = {
key_rec: {
# The following recovered file state will be fully read
RECOV_FILE_ONE: self.get_file_state(path_1, True, position=50),
# The following recovered file state will start at byte 76
RECOV_FILE_TWO: self.get_file_state(path_2, False, position=76)
},
key_tel: {
TELEM_FILE_TWO: self.get_file_state(path_4, True, position=76),
TELEM_FILE_ONE: self.get_file_state(path_3, False, position=0)
}
}
self.driver = self._get_driver_object(memento=state)
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data(RECOV_PARTICLES, 'recovered_partial.result.yml', count=2, timeout=10)
self.assert_data(TELEM_PARTICLES, 'telemetered_partial.result.yml', count=2, timeout=10)
def test_stop_start_ingest(self):
"""
Test the ability to stop and restart sampling, and ingesting files in the correct order
"""
# Clear any existing sampling
self.clear_sample_data()
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
self.create_sample_data_set_dir('first.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('second.DAT', RECOV_DIR, RECOV_FILE_TWO)
self.assert_data(RECOV_PARTICLES, 'recovered_first.result.yml', count=2, timeout=10)
self.assert_file_ingested(RECOV_FILE_ONE, DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED)
self.assert_file_not_ingested(RECOV_FILE_TWO, DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED)
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(RECOV_PARTICLES, 'recovered_second.result.yml', count=5, timeout=10)
self.assert_file_ingested(RECOV_FILE_ONE, DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED)
self.assert_file_ingested(RECOV_FILE_TWO, DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED)
self.driver.stop_sampling()
self.driver.start_sampling()
self.create_sample_data_set_dir('first.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.create_sample_data_set_dir('second.DAT', TELEM_DIR, TELEM_FILE_TWO)
self.assert_data(TELEM_PARTICLES, 'telemetered_first.result.yml', count=2, timeout=10)
self.assert_file_ingested(TELEM_FILE_ONE, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED)
self.assert_file_not_ingested(TELEM_FILE_TWO, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED)
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(TELEM_PARTICLES, 'telemetered_second.result.yml', count=5, timeout=10)
self.assert_file_ingested(TELEM_FILE_ONE, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED)
self.assert_file_ingested(TELEM_FILE_TWO, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED)
def test_sample_exception(self):
"""
test that a file is marked as parsed if it has a sample exception (which will happen with an empty file)
"""
self.clear_async_data()
filename = 'Efoo.dat'
self.create_sample_data_set_dir(filename, RECOV_DIR, RECOV_FILE_ONE)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(RECOV_FILE_ONE, DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED)
self.clear_async_data()
self.create_sample_data_set_dir(filename, TELEM_DIR, TELEM_FILE_ONE)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(TELEM_FILE_ONE, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def setUp(self):
super(QualificationTest, self).setUp()
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
self.create_sample_data_set_dir('second.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('second.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# NOTE: If the processing is not slowed down here, the engineering samples are
# returned in the wrong order
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
result_eng = self.data_subscribers.get_samples(DataParticleType.ENGINEERING_RECOVERED, 4)
log.debug("Recovered First RESULT: %s", result_eng)
result = self.data_subscribers.get_samples(DataParticleType.START_TIME_RECOVERED, 1)
log.debug("Recovered Second RESULT: %s", result)
result.extend(result_eng)
log.debug("Recovered Extended RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'recovered_second.result.yml')
result_eng = self.data_subscribers.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 4)
log.debug("Telemetered First RESULT: %s", result_eng)
result = self.data_subscribers.get_samples(DataParticleType.START_TIME_TELEMETERED, 1)
log.debug("Telemetered Second RESULT: %s", result)
result.extend(result_eng)
log.debug("Telemetered Extended RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'telemetered_second.result.yml')
except Exception as e:
log.error("Exception trapped: %s", e)
self.fail("Sample timeout.")
def test_cp02pmuo(self):
"""
Test with an example file from cp02pmuo platform
"""
self.create_sample_data_set_dir('CP02PMUO.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('CP02PMUO.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize()
self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 316, 60)
self.get_samples(DataParticleType.STATUS_RECOVERED, 7, 10)
self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 316, 60)
self.get_samples(DataParticleType.STATUS_TELEMETERED, 7, 10)
def test_cp02pmui(self):
"""
Test with an example file from cp02pmui platform
"""
self.create_sample_data_set_dir('CP02PMUI.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('CP02PMUI.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize()
self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 267, 60)
self.get_samples(DataParticleType.STATUS_RECOVERED, 7, 10)
self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 267, 60)
self.get_samples(DataParticleType.STATUS_TELEMETERED, 7, 10)
def test_cp02pmci(self):
"""
Test with an example file from cp02pmci platform
"""
self.create_sample_data_set_dir('CP02PMCI.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('CP02PMCI.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize()
self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 53, 40)
self.get_samples(DataParticleType.STATUS_RECOVERED, 7, 10)
self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 53, 40)
self.get_samples(DataParticleType.STATUS_TELEMETERED, 7, 10)
def test_ce09ospm(self):
"""
Test with an example file from ce09ospm platform
"""
self.create_sample_data_set_dir('CE09OSPM.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('CE09OSPM.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize()
self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 14, 10)
self.get_samples(DataParticleType.STATUS_RECOVERED, 1, 10)
self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 14, 10)
self.get_samples(DataParticleType.STATUS_TELEMETERED, 1, 10)
def test_cp04ospm(self):
"""
Test with an example file from cp04ospm platform
"""
self.create_sample_data_set_dir('CP04OSPM.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('CP04OSPM.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize()
self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 14, 10)
self.get_samples(DataParticleType.STATUS_RECOVERED, 1, 10)
self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 14, 10)
self.get_samples(DataParticleType.STATUS_TELEMETERED, 1, 10)
def test_large_import(self):
"""
Test importing a large number of samples from the file at once
"""
self.create_sample_data_set_dir('E0000303.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('E0000427.DAT', RECOV_DIR, RECOV_FILE_TWO)
self.create_sample_data_set_dir('E0000303.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.create_sample_data_set_dir('E0000427.DAT', TELEM_DIR, TELEM_FILE_TWO)
self.assert_initialize()
# get results for each of the data particle streams
self.get_samples(DataParticleType.START_TIME_RECOVERED, 2, 10)
self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 64, 40)
self.get_samples(DataParticleType.STATUS_RECOVERED, 2, 10)
self.get_samples(DataParticleType.START_TIME_TELEMETERED, 2, 10)
self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 64, 40)
self.get_samples(DataParticleType.STATUS_TELEMETERED, 2, 10)
def test_status_in_middle(self):
"""
This file has status particles in the middle and at the end
"""
self.create_sample_data_set_dir('E0000039.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('E0000039.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize()
# get results for each of the data particle streams
self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 53, 40)
self.get_samples(DataParticleType.STATUS_RECOVERED, 7, 10)
self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)
self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 53, 40)
self.get_samples(DataParticleType.STATUS_TELEMETERED, 7, 10)
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('first.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.create_sample_data_set_dir('first.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
recov_result = self.get_samples(DataParticleType.START_TIME_RECOVERED)
recov_result2 = self.get_samples(DataParticleType.ENGINEERING_RECOVERED)
recov_result.extend(recov_result2)
log.debug("RECOVERED RESULT: %s", recov_result)
telem_result = self.get_samples(DataParticleType.START_TIME_TELEMETERED)
telem_result2 = self.get_samples(DataParticleType.ENGINEERING_TELEMETERED)
telem_result.extend(telem_result2)
log.debug("TELEMETERED RESULT: %s", telem_result)
# Verify values
self.assert_data_values(recov_result, 'recovered_first.result.yml')
self.assert_data_values(telem_result, 'telemetered_first.result.yml')
self.assert_all_queue_empty()
self.create_sample_data_set_dir('second.DAT', RECOV_DIR, RECOV_FILE_TWO)
self.create_sample_data_set_dir('second.DAT', TELEM_DIR, TELEM_FILE_TWO)
# Now read the first three records of the second file then stop
recov_result = self.get_samples(DataParticleType.START_TIME_RECOVERED)
recov_result2 = self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 2)
recov_result.extend(recov_result2)
log.debug("got recovered result 1 %s", recov_result)
telem_result = self.get_samples(DataParticleType.START_TIME_TELEMETERED)
telem_result2 = self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 2)
telem_result.extend(telem_result2)
log.debug("got telemetered result 1 %s", telem_result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# Restart sampling and ensure we get the last 5 records of the file
self.assert_start_sampling()
recov_result3 = self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 2)
log.debug("got recovered result 2 %s", recov_result3)
recov_result.extend(recov_result3)
telem_result3 = self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 2)
log.debug("got telemetered result 2 %s", telem_result3)
telem_result.extend(telem_result3)
self.assert_data_values(recov_result, 'recovered_second.result.yml')
self.assert_data_values(telem_result, 'telemetered_second.result.yml')
self.assert_all_queue_empty()
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_shutdown_restart(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('first.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('first.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
recov_result = self.get_samples(DataParticleType.START_TIME_RECOVERED, 1)
recov_result2 = self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 1)
recov_result.extend(recov_result2)
log.debug("Recovered RESULT: %s", recov_result)
telem_result = self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1)
telem_result2 = self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 1)
telem_result.extend(telem_result2)
log.debug("Telemetered RESULT: %s", telem_result)
# Verify values
self.assert_data_values(recov_result, 'recovered_first.result.yml')
self.assert_data_values(telem_result, 'telemetered_first.result.yml')
self.assert_all_queue_empty()
self.create_sample_data_set_dir('second.DAT', RECOV_DIR, RECOV_FILE_TWO)
self.create_sample_data_set_dir('second.DAT', TELEM_DIR, TELEM_FILE_TWO)
# Now read the first three records of the second file then stop
recov_result = self.get_samples(DataParticleType.START_TIME_RECOVERED, 1)
recov_result2 = self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 2)
recov_result.extend(recov_result2)
log.debug("got recovered result 1 %s", recov_result)
telem_result = self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1)
telem_result2 = self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 2)
telem_result.extend(telem_result2)
log.debug("got telemetered result 1 %s", telem_result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Restart sampling and ensure we get the last 2 records of the file
self.assert_start_sampling()
recov_result3 = self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 2, 200)
log.debug("got recovered result 2 %s", recov_result3)
recov_result.extend(recov_result3)
self.assert_data_values(recov_result, 'recovered_second.result.yml')
telem_result3 = self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 2, 200)
log.debug("got telemetered result 2 %s", telem_result3)
telem_result.extend(telem_result3)
self.assert_data_values(telem_result, 'telemetered_second.result.yml')
self.assert_all_queue_empty()
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def assert_all_queue_empty(self):
"""
Assert the sample queue for all 3 data streams is empty
"""
self.assert_sample_queue_size(DataParticleType.START_TIME_RECOVERED, 0)
self.assert_sample_queue_size(DataParticleType.ENGINEERING_RECOVERED, 0)
self.assert_sample_queue_size(DataParticleType.STATUS_RECOVERED, 0)
self.assert_sample_queue_size(DataParticleType.START_TIME_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.ENGINEERING_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.STATUS_TELEMETERED, 0)
def test_parser_exception(self):
"""
Test an exception is raised after the driver is started during
record parsing.
"""
self.clear_sample_data()
self.create_sample_data_set_dir('bad.DAT', RECOV_DIR, RECOV_FILE_ONE)
self.create_sample_data_set_dir('first.DAT', RECOV_DIR, RECOV_FILE_TWO)
self.create_sample_data_set_dir('bad.DAT', TELEM_DIR, TELEM_FILE_ONE)
self.create_sample_data_set_dir('first.DAT', TELEM_DIR, TELEM_FILE_TWO)
self.assert_initialize()
self.event_subscribers.clear_events()
recov_result = self.get_samples(DataParticleType.START_TIME_RECOVERED)
recov_result2 = self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 1)
recov_result.extend(recov_result2)
self.assert_data_values(recov_result, 'recovered_first.result.yml')
telem_result = self.get_samples(DataParticleType.START_TIME_TELEMETERED)
telem_result2 = self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 1)
telem_result.extend(telem_result2)
self.assert_data_values(telem_result, 'telemetered_first.result.yml')
self.assert_all_queue_empty();
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 10)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
|
toontownfunserver/Panda3D-1.9.0
|
refs/heads/master
|
python/Lib/distutils/extension.py
|
250
|
"""distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
|
muravjov/ansible
|
refs/heads/stable-1.9
|
v2/ansible/plugins/strategies/__init__.py
|
10
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import Queue
import time
from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
from ansible.playbook.role import ROLE_CACHE, hash_params
from ansible.plugins import module_loader
from ansible.utils.debug import debug
__all__ = ['StrategyBase']
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm.get_notified_handlers()
self._callback = tqm.get_callback()
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
def run(self, iterator, connection_info, result=True):
# save the counts on failed/unreachable hosts, as the cleanup/handler
# methods will clear that information during their runs
num_failed = len(self._tqm._failed_hosts)
num_unreachable = len(self._tqm._unreachable_hosts)
#debug("running the cleanup portion of the play")
#result &= self.cleanup(iterator, connection_info)
debug("running handlers")
result &= self.run_handlers(iterator, connection_info)
if not result:
if num_unreachable > 0:
return 3
elif num_failed > 0:
return 2
else:
return 1
else:
return 0
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.get_name() not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def _queue_task(self, host, task, task_vars, connection_info):
''' handles queueing the task up to be sent to a worker '''
debug("entering _queue_task() for %s/%s" % (host, task))
# and then queue the new task
debug("%s - putting task (%s) in queue" % (host, task))
try:
debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
self._pending_results += 1
main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, module_loader), block=False)
except (EOFError, IOError, AssertionError), e:
# most likely an abort
debug("got an error while queuing: %s" % e)
return
debug("exiting _queue_task() for %s/%s" % (host, task))
def _process_pending_results(self, iterator):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
while not self._final_q.empty() and not self._tqm._terminated:
try:
result = self._final_q.get(block=False)
debug("got result from result worker: %s" % (result,))
# all host status messages contain 2 entries: (msg, task_result)
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
task_result = result[1]
host = task_result._host
task = task_result._task
if result[0] == 'host_task_failed':
if not task.ignore_errors:
debug("marking %s as failed" % host.get_name())
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.get_name()] = True
self._callback.runner_on_failed(task, task_result)
elif result[0] == 'host_unreachable':
self._tqm._unreachable_hosts[host.get_name()] = True
self._callback.runner_on_unreachable(task, task_result)
elif result[0] == 'host_task_skipped':
self._callback.runner_on_skipped(task, task_result)
elif result[0] == 'host_task_ok':
self._callback.runner_on_ok(task, task_result)
self._pending_results -= 1
if host.name in self._blocked_hosts:
del self._blocked_hosts[host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in ROLE_CACHE[task_result._task._role._role_name].iteritems():
hashed_entry = hash_params(task_result._task._role._role_params)
if entry == hashed_entry :
role_obj._had_task_run = True
ret_results.append(task_result)
#elif result[0] == 'include':
# host = result[1]
# task = result[2]
# include_file = result[3]
# include_vars = result[4]
#
# if isinstance(task, Handler):
# # FIXME: figure out how to make includes work for handlers
# pass
# else:
# original_task = iterator.get_original_task(host, task)
# if original_task and original_task._role:
# include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file)
# new_tasks = self._load_included_file(original_task, include_file, include_vars)
# iterator.add_tasks(host, new_tasks)
elif result[0] == 'add_host':
task_result = result[1]
new_host_info = task_result.get('add_host', dict())
self._add_host(new_host_info)
elif result[0] == 'add_group':
host = result[1]
task_result = result[2]
group_name = task_result.get('add_group')
self._add_group(host, group_name)
elif result[0] == 'notify_handler':
host = result[1]
handler_name = result[2]
if handler_name not in self._notified_handlers:
self._notified_handlers[handler_name] = []
if host not in self._notified_handlers[handler_name]:
self._notified_handlers[handler_name].append(host)
elif result[0] == 'set_host_var':
host = result[1]
var_name = result[2]
var_value = result[3]
self._variable_manager.set_host_variable(host, var_name, var_value)
elif result[0] == 'set_host_facts':
host = result[1]
facts = result[2]
self._variable_manager.set_host_facts(host, facts)
else:
raise AnsibleError("unknown result message received: %s" % result[0])
except Queue.Empty:
pass
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
while self._pending_results > 0 and not self._tqm._terminated:
debug("waiting for pending results (%d left)" % self._pending_results)
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._tqm._terminated:
break
time.sleep(0.01)
return ret_results
def _add_host(self, host_info):
'''
Helper function to add a new host to inventory based on a task result.
'''
host_name = host_info.get('host_name')
# Check if host in cache, add if not
if host_name in self._inventory._hosts_cache:
new_host = self._inventory._hosts_cache[host_name]
else:
new_host = Host(host_name)
self._inventory._hosts_cache[host_name] = new_host
allgroup = self._inventory.get_group('all')
allgroup.add_host(new_host)
# Set/update the vars for this host
# FIXME: probably should have a set vars method for the host?
new_vars = host_info.get('host_vars', dict())
new_host.vars.update(new_vars)
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if not self._inventory.get_group(group_name):
new_group = Group(group_name)
self._inventory.add_group(new_group)
new_group.vars = self._inventory.get_group_variables(group_name)
else:
new_group = self._inventory.get_group(group_name)
new_group.add_host(new_host)
# add this host to the group cache
if self._inventory._groups_list is not None:
if group_name in self._inventory._groups_list:
if new_host.name not in self._inventory._groups_list[group_name]:
self._inventory._groups_list[group_name].append(new_host.name)
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
# FIXME: is this still required?
self._inventory.clear_pattern_cache()
def _add_group(self, host, group_name):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
new_group = self._inventory.get_group(group_name)
if not new_group:
# create the new group and add it to inventory
new_group = Group(group_name)
self._inventory.add_group(new_group)
# and add the group to the proper hierarchy
allgroup = self._inventory.get_group('all')
allgroup.add_child_group(new_group)
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
actual_host = self._inventory.get_host(host.name)
# and add the host to the group
new_group.add_host(actual_host)
def _load_included_file(self, included_file):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
data = self._loader.load_from_file(included_file._filename)
if not isinstance(data, list):
raise AnsibleParsingError("included task files must contain a list of tasks", obj=included_file._task._ds)
is_handler = isinstance(included_file._task, Handler)
block_list = load_list_of_blocks(
data,
parent_block=included_file._task._block,
task_include=included_file._task,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader
)
task_list = compile_block_list(block_list)
# set the vars for this task from those specified as params to the include
for t in task_list:
t.vars = included_file._args.copy()
return task_list
def cleanup(self, iterator, connection_info):
'''
Iterates through failed hosts and runs any outstanding rescue/always blocks
and handlers which may still need to be run after a failure.
'''
debug("in cleanup")
result = True
debug("getting failed hosts")
failed_hosts = self.get_failed_hosts(iterator._play)
if len(failed_hosts) == 0:
debug("there are no failed hosts")
return result
debug("marking hosts failed in the iterator")
# mark the host as failed in the iterator so it will take
# any required rescue paths which may be outstanding
for host in failed_hosts:
iterator.mark_host_failed(host)
debug("clearing the failed hosts list")
# clear the failed hosts dictionary now while also
for entry in self._tqm._failed_hosts.keys():
del self._tqm._failed_hosts[entry]
work_to_do = True
while work_to_do:
work_to_do = False
for host in failed_hosts:
host_name = host.get_name()
if host_name in self._tqm._failed_hosts:
iterator.mark_host_failed(host)
del self._tqm._failed_hosts[host_name]
if host_name in self._blocked_hosts:
work_to_do = True
continue
elif iterator.get_next_task_for_host(host, peek=True) and host_name not in self._tqm._unreachable_hosts:
work_to_do = True
# pop the task, mark the host blocked, and queue it
self._blocked_hosts[host_name] = True
task = iterator.get_next_task_for_host(host)
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
self._callback.playbook_on_cleanup_task_start(task.get_name())
self._queue_task(host, task, task_vars, connection_info)
self._process_pending_results(iterator)
time.sleep(0.01)
# no more work, wait until the queue is drained
self._wait_on_pending_results(iterator)
return result
def run_handlers(self, iterator, connection_info):
'''
Runs handlers on those hosts which have been notified.
'''
result = True
# FIXME: getting the handlers from the iterators play should be
# a method on the iterator, which may also filter the list
# of handlers based on the notified list
handlers = compile_block_list(iterator._play.handlers)
debug("handlers are: %s" % handlers)
for handler in handlers:
handler_name = handler.get_name()
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
if not len(self.get_hosts_remaining(iterator._play)):
self._callback.playbook_on_no_hosts_remaining()
result = False
break
self._callback.playbook_on_handler_task_start(handler_name)
for host in self._notified_handlers[handler_name]:
if not handler.has_triggered(host):
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
self._queue_task(host, handler, task_vars, connection_info)
handler.flag_for_host(host)
self._process_pending_results(iterator)
self._wait_on_pending_results(iterator)
# wipe the notification list
self._notified_handlers[handler_name] = []
debug("done running handlers, result is: %s" % result)
return result
|
sandeepgupta2k4/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
|
26
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.estimator.inputs.queues.feeding_functions import _ArrayFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.estimator.inputs.queues.feeding_functions import _GeneratorFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _OrderedDictNumpyFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _PandasFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _GeneratorFeedFn
# pylint: enable=unused-import
|
evanson/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_messages/layer.py
|
69
|
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import TextMessageProtocolEntity
class YowMessagesProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"message": (self.recvMessageStanza, self.sendMessageEntity)
}
super(YowMessagesProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Messages Layer"
def sendMessageEntity(self, entity):
if entity.getType() == "text":
self.entityToLower(entity)
###recieved node handlers handlers
def recvMessageStanza(self, node):
if node.getAttributeValue("type") == "text":
entity = TextMessageProtocolEntity.fromProtocolTreeNode(node)
self.toUpper(entity)
|
elssar/calibre
|
refs/heads/master
|
src/calibre/srv/pool.py
|
12
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
from Queue import Queue, Full
from threading import Thread
from calibre.utils.monotonic import monotonic
class Worker(Thread):
daemon = True
def __init__(self, log, notify_server, num, request_queue, result_queue):
self.request_queue, self.result_queue = request_queue, result_queue
self.notify_server = notify_server
self.log = log
self.working = False
Thread.__init__(self, name='ServerWorker%d' % num)
def run(self):
while True:
x = self.request_queue.get()
if x is None:
break
job_id, func = x
self.working = True
try:
result = func()
except Exception:
self.handle_error(job_id) # must be a separate function to avoid reference cycles with sys.exc_info()
else:
self.result_queue.put((job_id, True, result))
finally:
self.working = False
try:
self.notify_server()
except Exception:
self.log.exception('ServerWorker failed to notify server on job completion')
def handle_error(self, job_id):
self.result_queue.put((job_id, False, sys.exc_info()))
class ThreadPool(object):
def __init__(self, log, notify_server, count=10, queue_size=1000):
self.request_queue, self.result_queue = Queue(queue_size), Queue(queue_size)
self.workers = [Worker(log, notify_server, i, self.request_queue, self.result_queue) for i in xrange(count)]
def start(self):
for w in self.workers:
w.start()
def put_nowait(self, job_id, func):
self.request_queue.put_nowait((job_id, func))
def get_nowait(self):
return self.result_queue.get_nowait()
def stop(self, wait_till):
for w in self.workers:
try:
self.request_queue.put_nowait(None)
except Full:
break
for w in self.workers:
now = monotonic()
if now >= wait_till:
break
w.join(wait_till - now)
self.workers = [w for w in self.workers if w.is_alive()]
@property
def busy(self):
return sum(int(w.working) for w in self.workers)
@property
def idle(self):
return sum(int(not w.working) for w in self.workers)
class PluginPool(object):
def __init__(self, loop, plugins):
self.workers = []
self.loop = loop
for plugin in plugins:
w = Thread(target=self.run_plugin, args=(plugin,), name=self.plugin_name(plugin))
w.daemon = True
w.plugin = plugin
self.workers.append(w)
def plugin_name(self, plugin):
return plugin.__class__.__name__
def run_plugin(self, plugin):
try:
plugin.start(self.loop)
except Exception:
self.loop.log.exception('Failed to start plugin: %s', self.plugin_name(plugin))
def start(self):
for w in self.workers:
w.start()
def stop(self, wait_till):
for w in self.workers:
if w.is_alive():
try:
w.plugin.stop()
except Exception:
self.loop.log.exception('Failed to stop plugin: %s', self.plugin_name(w.plugin))
for w in self.workers:
left = wait_till - monotonic()
if left > 0:
w.join(left)
else:
break
self.workers = [w for w in self.workers if w.is_alive()]
|
LuckJC/cubie-linux
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
bobeirasa/virtualenvs
|
refs/heads/master
|
pygeckozabbix/lib/python2.7/site-packages/setuptools/command/install_scripts.py
|
285
|
from distutils.command.install_scripts import install_scripts \
as _install_scripts
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(_install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
_install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
_install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
|
randynobx/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/zfs/zfs_facts.py
|
69
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs_facts
short_description: Gather facts about ZFS datasets.
description:
- Gather facts from ZFS dataset properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS dataset name.
aliases: [ "ds", "dataset" ]
required: yes
recurse:
description:
- Specifies if properties for any children should be recursively
displayed.
type: bool
default: False
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zfs(1M) man page.
aliases: [ "props" ]
default: all
required: false
type:
description:
- Specifies which datasets types to display. Multiple values have to be
provided in comma-separated form.
aliases: [ "props" ]
default: all
choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
required: false
depth:
description:
- Specifiies recurion depth.
default: None
required: false
'''
EXAMPLES = '''
- name: Gather facts about ZFS dataset rpool/export/home
zfs_facts:
dataset: rpool/export/home
- name: Report space usage on ZFS filesystems under data/home
zfs_facts:
name: data/home
recurse: yes
type: filesystem
- debug:
msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
with_items: '{{ ansible_zfs_datasets }}'
'''
RETURN = '''
name:
description: ZFS dataset name
returned: always
type: string
sample: rpool/var/spool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
recurse:
description: if we should recurse over ZFS dataset
returned: if 'recurse' is set to True
type: boolean
sample: True
zfs_datasets:
description: ZFS dataset facts
returned: always
type: string
sample:
{
"aclinherit": "restricted",
"aclmode": "discard",
"atime": "on",
"available": "43.8G",
"canmount": "on",
"casesensitivity": "sensitive",
"checksum": "on",
"compression": "off",
"compressratio": "1.00x",
"copies": "1",
"creation": "Thu Jun 16 11:37 2016",
"dedup": "off",
"devices": "on",
"exec": "on",
"filesystem_count": "none",
"filesystem_limit": "none",
"logbias": "latency",
"logicalreferenced": "18.5K",
"logicalused": "3.45G",
"mlslabel": "none",
"mounted": "yes",
"mountpoint": "/rpool",
"name": "rpool",
"nbmand": "off",
"normalization": "none",
"org.openindiana.caiman:install": "ready",
"primarycache": "all",
"quota": "none",
"readonly": "off",
"recordsize": "128K",
"redundant_metadata": "all",
"refcompressratio": "1.00x",
"referenced": "29.5K",
"refquota": "none",
"refreservation": "none",
"reservation": "none",
"secondarycache": "all",
"setuid": "on",
"sharenfs": "off",
"sharesmb": "off",
"snapdir": "hidden",
"snapshot_count": "none",
"snapshot_limit": "none",
"sync": "standard",
"type": "filesystem",
"used": "4.41G",
"usedbychildren": "4.41G",
"usedbydataset": "29.5K",
"usedbyrefreservation": "0",
"usedbysnapshots": "0",
"utf8only": "off",
"version": "5",
"vscan": "off",
"written": "29.5K",
"xattr": "on",
"zoned": "off"
}
'''
import os
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
class ZFSFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.recurse = module.params['recurse']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self.type = module.params['type']
self.depth = module.params['depth']
self._datasets = defaultdict(dict)
self.facts = []
def dataset_exists(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zfs')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
if self.recurse:
cmd.append('-r')
if int(self.depth) != 0:
cmd.append('-d')
cmd.append('%s' % self.depth)
if self.type:
cmd.append('-t')
cmd.append(self.type)
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
dataset, property, value = line.split('\t')
self._datasets[dataset].update({property: value})
for k, v in iteritems(self._datasets):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_datasets': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
recurse=dict(required=False, default=False, type='bool'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
depth=dict(required=False, default=0, type='int')
),
supports_check_mode=True
)
zfs_facts = ZFSFacts(module)
result = {}
result['changed'] = False
result['name'] = zfs_facts.name
if zfs_facts.parsable:
result['parsable'] = zfs_facts.parsable
if zfs_facts.recurse:
result['recurse'] = zfs_facts.recurse
if zfs_facts.dataset_exists():
result['ansible_facts'] = zfs_facts.get_facts()
else:
module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
akhilari7/pa-dude
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py
|
199
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
anthgur/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/set_window_rect.py
|
11
|
# META: timeout=long
import pytest
from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
from tests.support.fixtures import create_dialog
from tests.support.inline import inline
alert_doc = inline("<script>window.alert()</script>")
def set_window_rect(session, rect):
return session.transport.send("POST", "session/%s/window/rect" % session.session_id, rect)
# 10.7.2 Set Window Rect
def test_current_top_level_browsing_context_no_longer_open(session, create_window):
"""
1. If the current top-level browsing context is no longer open,
return error with error code no such window.
"""
session.window_handle = create_window()
session.close()
response = set_window_rect(session, {})
assert_error(response, "no such window")
def test_handle_prompt_dismiss():
"""TODO"""
def test_handle_prompt_accept(new_session, add_browser_capabilites):
"""
2. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- accept state
Accept the current user prompt.
"""
_, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({"unhandledPromptBehavior": "accept"})}})
original = session.window.rect
# step 2
create_dialog(session)("alert", text="dismiss #1", result_var="dismiss1")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert result.status == 200
assert_dialog_handled(session, "dismiss #1")
create_dialog(session)("confirm", text="dismiss #2", result_var="dismiss2")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert result.status == 200
assert_dialog_handled(session, "dismiss #2")
create_dialog(session)("prompt", text="dismiss #3", result_var="dismiss3")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_success(result)
assert_dialog_handled(session, "dismiss #3")
def test_handle_prompt_dismiss_and_notify():
"""TODO"""
def test_handle_prompt_accept_and_notify():
"""TODO"""
def test_handle_prompt_ignore():
"""TODO"""
def test_handle_prompt_missing_value(session, create_dialog):
"""
2. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- missing value default state
1. Dismiss the current user prompt.
2. Return error with error code unexpected alert open.
"""
original = session.window.rect
# step 2
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
result = set_window_rect(session, {"x": original["x"],
"y": original["y"]})
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
@pytest.mark.parametrize("rect", [
{"width": "a"},
{"height": "b"},
{"width": "a", "height": "b"},
{"x": "a"},
{"y": "b"},
{"x": "a", "y": "b"},
{"width": "a", "height": "b", "x": "a", "y": "b"},
{"width": True},
{"height": False},
{"width": True, "height": False},
{"x": True},
{"y": False},
{"x": True, "y": False},
{"width": True, "height": False, "x": True, "y": False},
{"width": []},
{"height": []},
{"width": [], "height": []},
{"x": []},
{"y": []},
{"x": [], "y": []},
{"width": [], "height": [], "x": [], "y": []},
{"height": {}},
{"width": {}},
{"height": {}, "width": {}},
{"x": {}},
{"y": {}},
{"x": {}, "y": {}},
{"width": {}, "height": {}, "x": {}, "y": {}},
])
def test_invalid_types(session, rect):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, rect)
assert_error(response, "invalid argument")
@pytest.mark.parametrize("rect", [
{"width": -1},
{"height": -2},
{"width": -1, "height": -2},
])
def test_out_of_bounds(session, rect):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, rect)
assert_error(response, "invalid argument")
def test_width_height_floats(session):
"""
8. If width or height is neither null nor a Number from 0 to 2^31 -
1, return error with error code invalid argument.
"""
response = set_window_rect(session, {"width": 500.5, "height": 420})
value = assert_success(response)
assert value["width"] == 500
assert value["height"] == 420
response = set_window_rect(session, {"width": 500, "height": 450.5})
value = assert_success(response)
assert value["width"] == 500
assert value["height"] == 450
def test_x_y_floats(session):
"""
9. If x or y is neither null nor a Number from -(2^31) to 2^31 - 1,
return error with error code invalid argument.
"""
response = set_window_rect(session, {"x": 0.5, "y": 420})
value = assert_success(response)
assert value["x"] == 0
assert value["y"] == 420
response = set_window_rect(session, {"x": 100, "y": 450.5})
value = assert_success(response)
assert value["x"] == 100
assert value["y"] == 450
@pytest.mark.parametrize("rect", [
{},
{"width": None},
{"height": None},
{"width": None, "height": None},
{"x": None},
{"y": None},
{"x": None, "y": None},
{"width": None, "x": None},
{"width": None, "y": None},
{"height": None, "x": None},
{"height": None, "Y": None},
{"width": None, "height": None, "x": None, "y": None},
{"width": 200},
{"height": 200},
{"x": 200},
{"y": 200},
{"width": 200, "x": 200},
{"height": 200, "x": 200},
{"width": 200, "y": 200},
{"height": 200, "y": 200},
])
def test_no_change(session, rect):
"""
13. If width and height are not null:
[...]
14. If x and y are not null:
[...]
15. Return success with the JSON serialization of the current
top-level browsing context's window rect.
"""
original = session.window.rect
response = set_window_rect(session, rect)
assert_success(response, original)
def test_fully_exit_fullscreen(session):
"""
10. Fully exit fullscreen.
[...]
To fully exit fullscreen a document document, run these steps:
1. If document's fullscreen element is null, terminate these steps.
2. Unfullscreen elements whose fullscreen flag is set, within
document's top layer, except for document's fullscreen element.
3. Exit fullscreen document.
"""
session.window.fullscreen()
assert session.execute_script("return window.fullScreen") is True
response = set_window_rect(session, {"width": 400, "height": 400})
value = assert_success(response)
assert value["width"] == 400
assert value["height"] == 400
assert session.execute_script("return window.fullScreen") is False
def test_restore_from_minimized(session):
"""
12. If the visibility state of the top-level browsing context's
active document is hidden, restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
session.window.minimize()
assert session.execute_script("return document.hidden") is True
response = set_window_rect(session, {"width": 450, "height": 450})
value = assert_success(response)
assert value["width"] == 450
assert value["height"] == 450
assert session.execute_script("return document.hidden") is False
def test_restore_from_maximized(session):
"""
12. If the visibility state of the top-level browsing context's
active document is hidden, restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
original_size = session.window.size
session.window.maximize()
assert session.window.size != original_size
response = set_window_rect(session, {"width": 400, "height": 400})
value = assert_success(response)
assert value["width"] == 400
assert value["height"] == 400
def test_height_width(session):
original = session.window.rect
max = session.execute_script("""
return {
width: window.screen.availWidth,
height: window.screen.availHeight,
}""")
# step 12
response = set_window_rect(session, {"width": max["width"] - 100,
"height": max["height"] - 100})
# step 14
assert_success(response, {"x": original["x"],
"y": original["y"],
"width": max["width"] - 100,
"height": max["height"] - 100})
def test_height_width_larger_than_max(session):
max = session.execute_script("""
return {
width: window.screen.availWidth,
height: window.screen.availHeight,
}""")
# step 12
response = set_window_rect(session, {"width": max["width"] + 100,
"height": max["height"] + 100})
# step 14
rect = assert_success(response)
assert rect["width"] >= max["width"]
assert rect["height"] >= max["height"]
def test_height_width_as_current(session):
original = session.window.rect
# step 12
response = set_window_rect(session, {"width": original["width"],
"height": original["height"]})
# step 14
assert_success(response, {"x": original["x"],
"y": original["y"],
"width": original["width"],
"height": original["height"]})
def test_x_y(session):
original = session.window.rect
# step 13
response = set_window_rect(session, {"x": original["x"] + 10,
"y": original["y"] + 10})
# step 14
assert_success(response, {"x": original["x"] + 10,
"y": original["y"] + 10,
"width": original["width"],
"height": original["height"]})
def test_negative_x_y(session):
original = session.window.rect
# step 13
response = set_window_rect(session, {"x": - 8, "y": - 8})
# step 14
os = session.capabilities["platformName"]
# certain WMs prohibit windows from being moved off-screen
if os == "linux":
rect = assert_success(response)
assert rect["x"] <= 0
assert rect["y"] <= 0
assert rect["width"] == original["width"]
assert rect["height"] == original["height"]
# On macOS, windows can only be moved off the screen on the
# horizontal axis. The system menu bar also blocks windows from
# being moved to (0,0).
elif os == "darwin":
assert_success(response, {"x": -8,
"y": 23,
"width": original["width"],
"height": original["height"]})
# It turns out that Windows is the only platform on which the
# window can be reliably positioned off-screen.
elif os == "windows_nt":
assert_success(response, {"x": -8,
"y": -8,
"width": original["width"],
"height": original["height"]})
def test_move_to_same_position(session):
original_position = session.window.position
position = session.window.position = original_position
assert position == original_position
def test_move_to_same_x(session):
original_x = session.window.position[0]
position = session.window.position = (original_x, 345)
assert position == (original_x, 345)
def test_move_to_same_y(session):
original_y = session.window.position[1]
position = session.window.position = (456, original_y)
assert position == (456, original_y)
def test_resize_to_same_size(session):
original_size = session.window.size
size = session.window.size = original_size
assert size == original_size
def test_resize_to_same_width(session):
original_width = session.window.size[0]
size = session.window.size = (original_width, 345)
assert size == (original_width, 345)
def test_resize_to_same_height(session):
original_height = session.window.size[1]
size = session.window.size = (456, original_height)
assert size == (456, original_height)
def test_payload(session):
# step 14
response = set_window_rect(session, {"x": 400, "y": 400})
assert response.status == 200
assert isinstance(response.body["value"], dict)
value = response.body["value"]
assert "width" in value
assert "height" in value
assert "x" in value
assert "y" in value
assert isinstance(value["width"], int)
assert isinstance(value["height"], int)
assert isinstance(value["x"], int)
assert isinstance(value["y"], int)
|
tensorflow/transform
|
refs/heads/master
|
examples/census_example_v2_test.py
|
1
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for census_example_v2."""
import os
import shutil
import tensorflow.compat.v2 as tf
import census_example_common
import census_example_v2
from tensorflow_transform import test_case as tft_test_case
import local_model_server
from google.protobuf import text_format
from tensorflow.python import tf2 # pylint: disable=g-direct-tensorflow-import
# Use first row of test data set, which has high probability on label 1 (which
# corresponds to '<=50K').
_PREDICT_TF_EXAMPLE_TEXT_PB = """
features {
feature {
key: "age"
value { float_list: { value: 25 } }
}
feature {
key: "workclass"
value { bytes_list: { value: "Private" } }
}
feature {
key: "education"
value { bytes_list: { value: "11th" } }
}
feature {
key: "education-num"
value { float_list: { value: 7 } }
}
feature {
key: "marital-status"
value { bytes_list: { value: "Never-married" } }
}
feature {
key: "occupation"
value { bytes_list: { value: "Machine-op-inspct" } }
}
feature {
key: "relationship"
value { bytes_list: { value: "Own-child" } }
}
feature {
key: "race"
value { bytes_list: { value: "Black" } }
}
feature {
key: "sex"
value { bytes_list: { value: "Male" } }
}
feature {
key: "capital-gain"
value { float_list: { value: 0 } }
}
feature {
key: "capital-loss"
value { float_list: { value: 0 } }
}
feature {
key: "hours-per-week"
value { float_list: { value: 40 } }
}
feature {
key: "native-country"
value { bytes_list: { value: "United-States" } }
}
}
"""
_MODEL_NAME = 'my_model'
_CLASSIFICATION_REQUEST_TEXT_PB = """model_spec { name: "%s" }
input {
example_list {
examples {
%s
}
}
}""" % (_MODEL_NAME, _PREDICT_TF_EXAMPLE_TEXT_PB)
class CensusExampleV2Test(tft_test_case.TransformTestCase):
def setUp(self):
super().setUp()
if (not tf2.enabled() or
tft_test_case.is_external_environment() and tf.version.VERSION < '2.3'):
raise tft_test_case.SkipTest('This test requires TF version >= 2.3')
def _get_data_dir(self):
return os.path.join(os.path.dirname(__file__), 'testdata/census')
def _get_working_dir(self):
return os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
def _should_saved_model_load_work(self):
return tf.__version__ >= '2.2'
@tft_test_case.named_parameters([
dict(
testcase_name='_read_raw_data_for_training',
read_raw_data_for_training=True),
dict(
testcase_name='_read_transformed_data_for_training',
read_raw_data_for_training=False),
])
def testCensusExampleAccuracy(self, read_raw_data_for_training):
if not self._should_saved_model_load_work():
self.skipTest('The generated SavedModel cannot be read with TF<2.2')
raw_data_dir = self._get_data_dir()
working_dir = self._get_working_dir()
train_data_file = os.path.join(raw_data_dir, 'adult.data')
test_data_file = os.path.join(raw_data_dir, 'adult.test')
census_example_common.transform_data(train_data_file, test_data_file,
working_dir)
if read_raw_data_for_training:
raw_train_and_eval_patterns = (train_data_file, test_data_file)
transformed_train_and_eval_patterns = None
else:
train_pattern = os.path.join(
working_dir,
census_example_common.TRANSFORMED_TRAIN_DATA_FILEBASE + '*')
eval_pattern = os.path.join(
working_dir,
census_example_common.TRANSFORMED_TEST_DATA_FILEBASE + '*')
raw_train_and_eval_patterns = None
transformed_train_and_eval_patterns = (train_pattern, eval_pattern)
output_dir = os.path.join(working_dir,
census_example_common.EXPORTED_MODEL_DIR)
results = census_example_v2.train_and_evaluate(
raw_train_and_eval_patterns,
transformed_train_and_eval_patterns,
output_dir,
working_dir,
num_train_instances=1000,
num_test_instances=1000)
self.assertGreaterEqual(results[1], 0.7)
# Removing the tf.Transform output directory in order to show that the
# exported model is hermetic.
shutil.rmtree(os.path.join(working_dir, 'transform_fn'))
model_path = os.path.join(working_dir,
census_example_common.EXPORTED_MODEL_DIR)
actual_model_path = os.path.join(model_path, '1')
tf.keras.backend.clear_session()
model = tf.keras.models.load_model(actual_model_path)
model.summary()
example = text_format.Parse(_PREDICT_TF_EXAMPLE_TEXT_PB, tf.train.Example())
prediction = model.signatures['serving_default'](
tf.constant([example.SerializeToString()], tf.string))
self.assertAllEqual([['0', '1']], prediction['classes'])
self.assertAllClose([[0, 1]], prediction['scores'], atol=0.001)
# This is required in order to support the classify API for this Keras
# model.
updater = tf.compat.v1.saved_model.signature_def_utils.MethodNameUpdater(
actual_model_path)
updater.replace_method_name(
signature_key='serving_default',
method_name='tensorflow/serving/classify',
tags=['serve'])
updater.save()
if local_model_server.local_model_server_supported():
with local_model_server.start_server(_MODEL_NAME, model_path) as address:
ascii_classification_request = _CLASSIFICATION_REQUEST_TEXT_PB
results = local_model_server.make_classification_request(
address, ascii_classification_request)
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].classes), 2)
self.assertEqual(results[0].classes[0].label, '0')
self.assertLess(results[0].classes[0].score, 0.01)
self.assertEqual(results[0].classes[1].label, '1')
self.assertGreater(results[0].classes[1].score, 0.99)
def test_main_runs(self):
census_example_v2.main(
self._get_data_dir(),
self._get_working_dir(),
read_raw_data_for_training=False,
num_train_instances=10,
num_test_instances=10)
def test_main_runs_raw_data(self):
census_example_v2.main(
self._get_data_dir(),
self._get_working_dir(),
read_raw_data_for_training=True,
num_train_instances=10,
num_test_instances=10)
if __name__ == '__main__':
tf.test.main()
|
Shnatsel/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/win/gyptest-link-update-manifest.py
|
226
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure binary is relinked when manifest settings are changed.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
gyp_template = '''
{
'targets': [
{
'target_name': 'test_update_manifest',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'EnableUAC': 'true',
'UACExecutionLevel': '%(uac_execution_level)d',
},
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': '%(additional_manifest_files)s',
},
},
},
],
}
'''
gypfile = 'update-manifest.gyp'
def WriteAndUpdate(uac_execution_level, additional_manifest_files, do_build):
with open(os.path.join(CHDIR, gypfile), 'wb') as f:
f.write(gyp_template % {
'uac_execution_level': uac_execution_level,
'additional_manifest_files': additional_manifest_files,
})
test.run_gyp(gypfile, chdir=CHDIR)
if do_build:
test.build(gypfile, chdir=CHDIR)
exe_file = test.built_file_path('test_update_manifest.exe', chdir=CHDIR)
return extract_manifest(exe_file, 1)
manifest = WriteAndUpdate(0, '', True)
test.fail_test('asInvoker' not in manifest)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' in manifest)
# Make sure that updating .gyp and regenerating doesn't cause a rebuild.
WriteAndUpdate(0, '', False)
test.up_to_date(gypfile, test.ALL, chdir=CHDIR)
# But make sure that changing a manifest property does cause a relink.
manifest = WriteAndUpdate(2, '', True)
test.fail_test('requireAdministrator' not in manifest)
# Adding a manifest causes a rebuild.
manifest = WriteAndUpdate(2, 'extra.manifest', True)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in manifest)
|
themiurgo/csvkit
|
refs/heads/csvsplit
|
csvkit/__init__.py
|
21
|
#!/usr/bin/env python
"""
This module contains csvkit's superpowered replacement for the builtin :mod:`csv` module. For Python 2 users, the greatest improvement over the standard library full unicode support. Python 3's :mod:`csv` module supports unicode internally, so this module is provided primarily for compatability purposes.
* Python 2: :mod:`csvkit.py2`.
* Python 3: :mod:`csvkit.py3`.
"""
import six
if six.PY2:
from csvkit import py2
CSVKitReader = py2.CSVKitReader
CSVKitWriter = py2.CSVKitWriter
CSVKitDictReader = py2.CSVKitDictReader
CSVKitDictWriter = py2.CSVKitDictWriter
reader = py2.reader
writer = py2.writer
DictReader = py2.CSVKitDictReader
DictWriter = py2.CSVKitDictWriter
else:
from csvkit import py3
CSVKitReader = py3.CSVKitReader
CSVKitWriter = py3.CSVKitWriter
CSVKitDictReader = py3.CSVKitDictReader
CSVKitDictWriter = py3.CSVKitDictWriter
reader = py3.reader
writer = py3.writer
DictReader = py3.CSVKitDictReader
DictWriter = py3.CSVKitDictWriter
|
edunham/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/old-tests/webdriver/command_contexts/window_size_test.py
|
141
|
import os
import sys
import random
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
repo_root = os.path.abspath(os.path.join(__file__, "../../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver import exceptions
class WindowSizeTest(base_test.WebDriverBaseTest):
def test_set_and_get_window_size(self):
self.driver.get(self.webserver.where_is("command_contexts/res/first-page.html"))
initial_dimensions = self.driver.get_window_size()
new_dimensions = {
"height": initial_dimensions["height"] - 100,
"width": initial_dimensions["width"] - 100}
try:
self.driver.set_window_size(new_dimensions["height"], new_dimensions["width"])
actual_dimensions = self.driver.get_window_size()
self.assertDictEqual(new_dimensions, actual_dimensions)
except exceptions.UnsupportedOperationException:
pass
if __name__ == "__main__":
unittest.main()
|
RMKD/networkx
|
refs/heads/master
|
examples/drawing/labels_and_colors.py
|
44
|
#!/usr/bin/env python
"""
Draw a graph with matplotlib, color by degree.
You must have matplotlib for this to work.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
import matplotlib.pyplot as plt
import networkx as nx
G=nx.cubical_graph()
pos=nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,
nodelist=[0,1,2,3],
node_color='r',
node_size=500,
alpha=0.8)
nx.draw_networkx_nodes(G,pos,
nodelist=[4,5,6,7],
node_color='b',
node_size=500,
alpha=0.8)
# edges
nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)
nx.draw_networkx_edges(G,pos,
edgelist=[(0,1),(1,2),(2,3),(3,0)],
width=8,alpha=0.5,edge_color='r')
nx.draw_networkx_edges(G,pos,
edgelist=[(4,5),(5,6),(6,7),(7,4)],
width=8,alpha=0.5,edge_color='b')
# some math labels
labels={}
labels[0]=r'$a$'
labels[1]=r'$b$'
labels[2]=r'$c$'
labels[3]=r'$d$'
labels[4]=r'$\alpha$'
labels[5]=r'$\beta$'
labels[6]=r'$\gamma$'
labels[7]=r'$\delta$'
nx.draw_networkx_labels(G,pos,labels,font_size=16)
plt.axis('off')
plt.savefig("labels_and_colors.png") # save as png
plt.show() # display
|
chrisdembia/googletest
|
refs/heads/master
|
test/gtest_xml_outfiles_test.py
|
2526
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
jtattermusch/grpc
|
refs/heads/master
|
src/python/grpcio_reflection/grpc_reflection/v1alpha/_async.py
|
8
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The AsyncIO version of the reflection servicer."""
from typing import AsyncIterable
import grpc
from grpc_reflection.v1alpha import reflection_pb2 as _reflection_pb2
from grpc_reflection.v1alpha._base import BaseReflectionServicer
class ReflectionServicer(BaseReflectionServicer):
"""Servicer handling RPCs for service statuses."""
async def ServerReflectionInfo(
self, request_iterator: AsyncIterable[
_reflection_pb2.ServerReflectionRequest], unused_context
) -> AsyncIterable[_reflection_pb2.ServerReflectionResponse]:
async for request in request_iterator:
if request.HasField('file_by_filename'):
yield self._file_by_filename(request.file_by_filename)
elif request.HasField('file_containing_symbol'):
yield self._file_containing_symbol(
request.file_containing_symbol)
elif request.HasField('file_containing_extension'):
yield self._file_containing_extension(
request.file_containing_extension.containing_type,
request.file_containing_extension.extension_number)
elif request.HasField('all_extension_numbers_of_type'):
yield self._all_extension_numbers_of_type(
request.all_extension_numbers_of_type)
elif request.HasField('list_services'):
yield self._list_services()
else:
yield _reflection_pb2.ServerReflectionResponse(
error_response=_reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1].
encode(),
))
__all__ = [
"ReflectionServicer",
]
|
davidharrigan/django
|
refs/heads/master
|
tests/template_tests/test_response.py
|
55
|
from __future__ import unicode_literals
import pickle
import time
from datetime import datetime
from django.conf import settings
from django.template import engines
from django.template.response import (
ContentNotRenderedError, SimpleTemplateResponse, TemplateResponse,
)
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import require_jinja2
from .utils import TEMPLATE_DIR
def test_processor(request):
return {'processors': 'yes'}
test_processor_name = 'template_tests.test_response.test_processor'
# A test middleware that installs a temporary URLConf
class CustomURLConfMiddleware(object):
def process_request(self, request):
request.urlconf = 'template_tests.alternate_urls'
class SimpleTemplateResponseTest(SimpleTestCase):
def _response(self, template='foo', *args, **kwargs):
template = engines['django'].from_string(template)
return SimpleTemplateResponse(template, *args, **kwargs)
def test_template_resolving(self):
response = SimpleTemplateResponse('first/test.html')
response.render()
self.assertEqual(response.content, b'First template\n')
templates = ['foo.html', 'second/test.html', 'first/test.html']
response = SimpleTemplateResponse(templates)
response.render()
self.assertEqual(response.content, b'Second template\n')
response = self._response()
response.render()
self.assertEqual(response.content, b'foo')
def test_explicit_baking(self):
# explicit baking
response = self._response()
self.assertFalse(response.is_rendered)
response.render()
self.assertTrue(response.is_rendered)
def test_render(self):
# response is not re-rendered without the render call
response = self._response().render()
self.assertEqual(response.content, b'foo')
# rebaking doesn't change the rendered content
template = engines['django'].from_string('bar{{ baz }}')
response.template_name = template
response.render()
self.assertEqual(response.content, b'foo')
# but rendered content can be overridden by manually
# setting content
response.content = 'bar'
self.assertEqual(response.content, b'bar')
def test_iteration_unrendered(self):
# unrendered response raises an exception on iteration
response = self._response()
self.assertFalse(response.is_rendered)
def iteration():
for x in response:
pass
self.assertRaises(ContentNotRenderedError, iteration)
self.assertFalse(response.is_rendered)
def test_iteration_rendered(self):
# iteration works for rendered responses
response = self._response().render()
res = [x for x in response]
self.assertEqual(res, [b'foo'])
def test_content_access_unrendered(self):
# unrendered response raises an exception when content is accessed
response = self._response()
self.assertFalse(response.is_rendered)
self.assertRaises(ContentNotRenderedError, lambda: response.content)
self.assertFalse(response.is_rendered)
def test_content_access_rendered(self):
# rendered response content can be accessed
response = self._response().render()
self.assertEqual(response.content, b'foo')
def test_set_content(self):
# content can be overridden
response = self._response()
self.assertFalse(response.is_rendered)
response.content = 'spam'
self.assertTrue(response.is_rendered)
self.assertEqual(response.content, b'spam')
response.content = 'baz'
self.assertEqual(response.content, b'baz')
def test_dict_context(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'})
self.assertEqual(response.context_data, {'foo': 'bar'})
response.render()
self.assertEqual(response.content, b'bar')
def test_kwargs(self):
response = self._response(content_type='application/json', status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = SimpleTemplateResponse('', {}, 'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
response = SimpleTemplateResponse('template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
def test_post_callbacks(self):
"Rendering a template response triggers the post-render callbacks"
post = []
def post1(obj):
post.append('post1')
def post2(obj):
post.append('post2')
response = SimpleTemplateResponse('first/test.html', {})
response.add_post_render_callback(post1)
response.add_post_render_callback(post2)
# When the content is rendered, all the callbacks are invoked, too.
response.render()
self.assertEqual(response.content, b'First template\n')
self.assertEqual(post, ['post1', 'post2'])
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data', '_post_render_callbacks')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
def test_pickling_cookie(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
response.cookies['key'] = 'value'
response.render()
pickled_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.cookies['key'].value, 'value')
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'OPTIONS': {
'context_processors': [test_processor_name],
},
}])
class TemplateResponseTest(SimpleTestCase):
def setUp(self):
self.factory = RequestFactory()
def _response(self, template='foo', *args, **kwargs):
self._request = self.factory.get('/')
template = engines['django'].from_string(template)
return TemplateResponse(self._request, template, *args, **kwargs)
def test_render(self):
response = self._response('{{ foo }}{{ processors }}').render()
self.assertEqual(response.content, b'yes')
def test_render_with_requestcontext(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'}).render()
self.assertEqual(response.content, b'baryes')
def test_context_processor_priority(self):
# context processors should be overridden by passed-in context
response = self._response('{{ foo }}{{ processors }}',
{'processors': 'no'}).render()
self.assertEqual(response.content, b'no')
def test_kwargs(self):
response = self._response(content_type='application/json',
status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = TemplateResponse(self.factory.get('/'), '', {},
'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
request = self.factory.get('/')
response = TemplateResponse(request, 'template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = TemplateResponse(self.factory.get('/'),
'first/test.html', {
'value': 123,
'fn': datetime.now,
}
)
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = (
'template_name',
'context_data',
'_post_render_callbacks',
'_request',
)
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
@override_settings(
MIDDLEWARE_CLASSES=settings.MIDDLEWARE_CLASSES + [
'template_tests.test_response.CustomURLConfMiddleware'
],
ROOT_URLCONF='template_tests.urls',
)
class CustomURLConfTest(SimpleTestCase):
def test_custom_urlconf(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is where you can find the snark: /snark/')
@override_settings(
CACHE_MIDDLEWARE_SECONDS=2.0,
MIDDLEWARE_CLASSES=settings.MIDDLEWARE_CLASSES + [
'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
],
ROOT_URLCONF='template_tests.alternate_urls',
)
class CacheMiddlewareTest(SimpleTestCase):
def test_middleware_caching(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
|
40223112/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/browser/indexed_db.py
|
632
|
class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, obj, key=None, onsuccess=None, onerror=None):
_r = self._objectStore.put(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
def add(self, obj, key, onsuccess=None, onerror=None):
_r = self._objectStore.add(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
#self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
getattr(cursor,"continue")() # cursor.continue() is illegal
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
|
dlazz/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_gtm_monitor_external.py
|
21
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_monitor_external import ApiParameters
from library.modules.bigip_gtm_monitor_external import ModuleParameters
from library.modules.bigip_gtm_monitor_external import ModuleManager
from library.modules.bigip_gtm_monitor_external import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_monitor_external import ApiParameters
from ansible.modules.network.f5.bigip_gtm_monitor_external import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_monitor_external import ModuleManager
from ansible.modules.network.f5.bigip_gtm_monitor_external import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='parent',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
partition='Common'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.type == 'external'
assert p.port == 80
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_gtm_monitor_external.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_gtm_monitor_external.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
parent='parent',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
partition='Common',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['parent'] == '/Common/parent'
|
papados/ordersys
|
refs/heads/master
|
Lib/site-packages/django/contrib/gis/db/models/proxy.py
|
220
|
"""
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
from django.contrib.gis import memoryview
from django.utils import six
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
geom = geom_value
elif (geom_value is None) or (geom_value==''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None: value.srid = self._field.srid
elif value is None or isinstance(value, six.string_types + (memoryview,)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('cannot set %s GeometryProxy with value of type: %s' % (obj.__class__.__name__, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
|
gmassei/wfrog
|
refs/heads/master
|
wfrender/renderer/wunderground.py
|
4
|
## Copyright 2010 Jordi Puigsegur <jordi.puigsegur@gmail.com>
## derived from PyWeather by Patrick C. McGinty
##
## This file is part of wfrog
##
## wfrog is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
import logging
import sys
import time
import wfcommon.database
from wfcommon.formula.base import LastFormula
from wfcommon.formula.base import SumFormula
try:
from wfrender.datasource.accumulator import AccumulatorDatasource
except ImportError, e:
from datasource.accumulator import AccumulatorDatasource
from wfcommon.units import HPaToInHg
from wfcommon.units import CToF
from wfcommon.units import MmToIn
from wfcommon.units import MpsToMph
class WeatherUndergroundPublisher(object):
"""
Render and publisher for Weather Underground. It is a wrapper
around PyWeather, thus needs this package installed on your
system, version 0.9.1 or superior. (sudo easy_install weather)
[ Properties ]
id [string]:
Weather Underground station ID.
password [string]:
Weather Underground password.
period [numeric]:
The update period in seconds.
storage:
The storage service.
real_time [boolean] (optional):
If true then uses real time server. period must be < 30 secs.
Default value is false.
"""
id = None
password = None
publisher = None
real_time = False
storage = None
alive = False
logger = logging.getLogger("renderer.wunderground")
def render(self, data={}, context={}):
try:
assert self.id is not None, "'wunderground.id' must be set"
assert self.password is not None, "'wunderground.password' must be set"
assert self.period is not None, "'wunderground.period' must be set"
self.real_time = self.real_time and self.period < 30
rtfreq = None
if self.real_time: rtfreq = self.period
self.logger.info("Initializing Wunderground publisher (station %s)" % self.id)
import weather.services
self.publisher = weather.services.Wunderground(self.id, self.password, rtfreq)
self.alive = True
if not self.real_time:
accu = AccumulatorDatasource()
accu.slice = 'day'
accu.span = 1
accu.storage = self.storage
accu.formulas = {'current': {
'temp' : LastFormula('temp'),
'dew_point': LastFormula('dew_point'),
'hum' : LastFormula('hum'),
'pressure' : LastFormula('pressure'),
'wind' : LastFormula('wind'),
'wind_deg' : LastFormula('wind_dir'),
'gust' : LastFormula('wind_gust'),
'gust_deg' : LastFormula('wind_gust_dir'),
'rain_rate' : LastFormula('rain_rate'),
'rain_fall' : SumFormula('rain'),
'utctime' : LastFormula('utctime') } }
while self.alive:
try:
data = accu.execute()['current']['series']
index = len(data['lbl'])-1
params = {
# <float> pressure: in inches of Hg
'pressure' : HPaToInHg(data['pressure'][index]),
# <float> dewpoint: in Fahrenheit
'dewpoint' : CToF(data['dew_point'][index]),
# <float> humidity: between 0.0 and 100.0 inclusive
'humidity' : data['hum'][index],
# <float> tempf: in Fahrenheit
'tempf' : CToF(data['temp'][index]),
# <float> rainin: inches/hour of rain
'rainin' : MmToIn(data['rain_rate'][index]),
# <float> rainday: total rainfall in day (localtime)
'rainday' : MmToIn(data['rain_fall'][index]),
# <string> dateutc: date "YYYY-MM-DD HH:MM:SS" in GMT timezone
'dateutc' : data['utctime'][index].strftime('%Y-%m-%d %H:%M:%S'),
# <float> windspeed: in mph
'windspeed' : MpsToMph(data['wind'][index]),
# <float> winddir: in degrees, between 0.0 and 360.0
'winddir' : data['wind_deg'][index],
# <float> windgust: in mph
'windgust' : MpsToMph(data['gust'][index]),
# <float> windgustdir: in degrees, between 0.0 and 360.0
'windgustdir' : data['gust_deg'][index] }
# Do not send parameters that are null (None).
# from above only dateutc is a mandatory parameter.
params = dict(filter(lambda (p,v): v, [(p,v) for p,v in params.iteritems()]))
self.logger.info("Publishing Wunderground data (normal server): %s " % str(params))
self.publisher.set(**params)
response = self.publisher.publish()
self.logger.info('Result Wunderground publisher: %s' % str(response))
except Exception, e:
self.logger.exception(e)
time.sleep(self.period)
else:
self.logger.error("Wunderground real time server not yet supported")
# while self.alive:
# self.logger.debug("Publishing weather underground data (real time server).")
#
# time.sleep(self.period)
except Exception, e:
self.logger.exception(e)
raise
def close(self):
self.alive = False
|
yl565/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/panel/panel_short.py
|
25
|
# -*- coding: utf-8 -*-
"""Panel data analysis for short T and large N
Created on Sat Dec 17 19:32:00 2011
Author: Josef Perktold
License: BSD-3
starting from scratch before looking at references again
just a stub to get the basic structure for group handling
target outsource as much as possible for reuse
Notes
-----
this is the basic version using a loop over individuals which will be more
widely applicable. Depending on the special cases, there will be faster
implementations possible (sparse, kroneker, ...)
the only two group specific methods or get_within_cov and whiten
"""
import numpy as np
from statsmodels.regression.linear_model import OLS, GLS
from statsmodels.tools.grouputils import Group, GroupSorted
#not used
class Unit(object):
def __init__(endog, exog):
self.endog = endog
self.exog = exog
def sum_outer_product_loop(x, group_iter):
'''sum outerproduct dot(x_i, x_i.T) over individuals
loop version
'''
mom = 0
for g in group_iter():
x_g = x[g]
#print 'x_g.shape', x_g.shape
mom += np.outer(x_g, x_g)
return mom
def sum_outer_product_balanced(x, n_groups):
'''sum outerproduct dot(x_i, x_i.T) over individuals
where x_i is (nobs_i, 1), and result is (nobs_i, nobs_i)
reshape-dot version, for x.ndim=1 only
'''
xrs = x.reshape(-1, n_groups, order='F')
return np.dot(xrs, xrs.T) #should be (nobs_i, nobs_i)
#x.reshape(n_groups, nobs_i, k_vars) #, order='F')
#... ? this is getting 3-dimensional dot, tensordot?
#needs (n_groups, k_vars, k_vars) array with sum over groups
#NOT
#I only need this for x is 1d, i.e. residual
def whiten_individuals_loop(x, transform, group_iter):
'''apply linear transform for each individual
loop version
'''
#Note: figure out dimension of transformed variable
#so we can pre-allocate
x_new = []
for g in group_iter():
x_g = x[g]
x_new.append(np.dot(transform, x_g))
return np.concatenate(x_new) #np.vstack(x_new) #or np.array(x_new) #check shape
class ShortPanelGLS2(object):
'''Short Panel with general intertemporal within correlation
assumes data is stacked by individuals, panel is balanced and
within correlation structure is identical across individuals.
It looks like this can just inherit GLS and overwrite whiten
'''
def __init__(self, endog, exog, group):
self.endog = endog
self.exog = exog
self.group = GroupSorted(group)
self.n_groups = self.group.n_groups
#self.nobs_group = #list for unbalanced?
def fit_ols(self):
self.res_pooled = OLS(self.endog, self.exog).fit()
return self.res_pooled #return or not
def get_within_cov(self, resid):
#central moment or not?
mom = sum_outer_product_loop(resid, self.group.group_iter)
return mom / self.n_groups #df correction ?
def whiten_groups(self, x, cholsigmainv_i):
#from scipy import sparse #use sparse
wx = whiten_individuals_loop(x, cholsigmainv_i, self.group.group_iter)
return wx
def fit(self):
res_pooled = self.fit_ols() #get starting estimate
sigma_i = self.get_within_cov(res_pooled.resid)
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
wendog = self.whiten_groups(self.endog, self.cholsigmainv_i)
wexog = self.whiten_groups(self.exog, self.cholsigmainv_i)
#print wendog.shape, wexog.shape
self.res1 = OLS(wendog, wexog).fit()
return self.res1
class ShortPanelGLS(GLS):
'''Short Panel with general intertemporal within correlation
assumes data is stacked by individuals, panel is balanced and
within correlation structure is identical across individuals.
It looks like this can just inherit GLS and overwrite whiten
'''
def __init__(self, endog, exog, group, sigma_i=None):
self.group = GroupSorted(group)
self.n_groups = self.group.n_groups
#self.nobs_group = #list for unbalanced?
nobs_i = len(endog) / self.n_groups #endog might later not be an ndarray
#balanced only for now,
#which is a requirement anyway in this case (full cov)
#needs to change for parameterized sigma_i
#
if sigma_i is None:
sigma_i = np.eye(int(nobs_i))
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
#super is taking care of endog, exog and sigma
super(self.__class__, self).__init__(endog, exog, sigma=None)
def get_within_cov(self, resid):
#central moment or not?
mom = sum_outer_product_loop(resid, self.group.group_iter)
return mom / self.n_groups #df correction ?
def whiten_groups(self, x, cholsigmainv_i):
#from scipy import sparse #use sparse
wx = whiten_individuals_loop(x, cholsigmainv_i, self.group.group_iter)
return wx
def _fit_ols(self):
#used as starting estimate in old explicity version
self.res_pooled = OLS(self.endog, self.exog).fit()
return self.res_pooled #return or not
def _fit_old(self):
#old explicit version
res_pooled = self._fit_ols() #get starting estimate
sigma_i = self.get_within_cov(res_pooled.resid)
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
wendog = self.whiten_groups(self.endog, self.cholsigmainv_i)
wexog = self.whiten_groups(self.exog, self.cholsigmainv_i)
self.res1 = OLS(wendog, wexog).fit()
return self.res1
def whiten(self, x):
#whiten x by groups, will be applied to endog and exog
wx = whiten_individuals_loop(x, self.cholsigmainv_i, self.group.group_iter)
return wx
#copied from GLSHet and adjusted (boiler plate?)
def fit_iterative(self, maxiter=3):
"""
Perform an iterative two-step procedure to estimate the GLS model.
Parameters
----------
maxiter : integer, optional
the number of iterations
Notes
-----
maxiter=1: returns the estimated based on given weights
maxiter=2: performs a second estimation with the updated weights,
this is 2-step estimation
maxiter>2: iteratively estimate and update the weights
TODO: possible extension stop iteration if change in parameter
estimates is smaller than x_tol
Repeated calls to fit_iterative, will do one redundant pinv_wexog
calculation. Calling fit_iterative(maxiter) once does not do any
redundant recalculations (whitening or calculating pinv_wexog).
"""
#Note: in contrast to GLSHet, we don't have an auxilliary regression here
# might be needed if there is more structure in cov_i
#because we only have the loop we are not attaching the ols_pooled
#initial estimate anymore compared to original version
if maxiter < 1:
raise ValueError('maxiter needs to be at least 1')
import collections
self.history = collections.defaultdict(list) #not really necessary
for i in range(maxiter):
#pinv_wexog is cached, delete it to force recalculation
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
#fit with current cov, GLS, i.e. OLS on whitened endog, exog
results = self.fit()
self.history['self_params'].append(results.params)
if not i == maxiter-1: #skip for last iteration, could break instead
#print 'ols',
self.results_old = results #store previous results for debugging
#get cov from residuals of previous regression
sigma_i = self.get_within_cov(results.resid)
self.cholsigmainv_i = np.linalg.cholesky(np.linalg.pinv(sigma_i)).T
#calculate new whitened endog and exog
self.initialize()
#note results is the wrapper, results._results is the results instance
#results._results.results_residual_regression = res_resid
return results
if __name__ == '__main__':
pass
|
buckiracer/data-science-from-scratch
|
refs/heads/master
|
RefMaterials/code/clustering.py
|
60
|
from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
|
lifeinoppo/littlefishlet-scode
|
refs/heads/master
|
RES/REF/python_sourcecode/ipython-master/IPython/core/display_trap.py
|
22
|
# encoding: utf-8
"""
A context manager for handling sys.displayhook.
Authors:
* Robert Kern
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from traitlets.config.configurable import Configurable
from traitlets import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DisplayTrap(Configurable):
"""Object to manage sys.displayhook.
This came from IPython.core.kernel.display_hook, but is simplified
(no callbacks or formatters) until more of the core is refactored.
"""
hook = Any()
def __init__(self, hook=None):
super(DisplayTrap, self).__init__(hook=hook, config=None)
self.old_hook = None
# We define this to track if a single BuiltinTrap is nested.
# Only turn off the trap when the outermost call to __exit__ is made.
self._nested_level = 0
def __enter__(self):
if self._nested_level == 0:
self.set()
self._nested_level += 1
return self
def __exit__(self, type, value, traceback):
if self._nested_level == 1:
self.unset()
self._nested_level -= 1
# Returning False will cause exceptions to propagate
return False
def set(self):
"""Set the hook."""
if sys.displayhook is not self.hook:
self.old_hook = sys.displayhook
sys.displayhook = self.hook
def unset(self):
"""Unset the hook."""
sys.displayhook = self.old_hook
|
russellb/nova
|
refs/heads/master
|
nova/tests/api/openstack/compute/contrib/__init__.py
|
99
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
quxiaolong1504/django
|
refs/heads/master
|
django/template/engine.py
|
199
|
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache, six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Context, Template
from .context import _builtin_context_processors
from .exceptions import TemplateDoesNotExist
from .library import import_library
_context_instance_undefined = object()
_dictionary_undefined = object()
_dirs_undefined = object()
class Engine(object):
default_builtins = [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
]
def __init__(self, dirs=None, app_dirs=False,
allowed_include_roots=None, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8', libraries=None, builtins=None):
if dirs is None:
dirs = []
if allowed_include_roots is None:
allowed_include_roots = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ['django.template.loaders.filesystem.Loader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined.")
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
if isinstance(allowed_include_roots, six.string_types):
raise ImproperlyConfigured(
"allowed_include_roots must be a tuple, not a string.")
self.dirs = dirs
self.app_dirs = app_dirs
self.allowed_include_roots = allowed_include_roots
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
@staticmethod
@lru_cache.lru_cache()
def get_default():
"""
When only one DjangoTemplates backend is configured, returns it.
Raises ImproperlyConfigured otherwise.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
django_engines = [engine for engine in engines.all()
if isinstance(engine, DjangoTemplates)]
if len(django_engines) == 1:
# Unwrap the Engine instance inside DjangoTemplates
return django_engines[0].engine
elif len(django_engines) == 0:
raise ImproperlyConfigured(
"No DjangoTemplates backend is configured.")
else:
raise ImproperlyConfigured(
"Several DjangoTemplates backends are configured. "
"You must select one explicitly.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
args = list(loader[1:])
loader = loader[0]
else:
args = []
if isinstance(loader, six.string_types):
loader_class = import_string(loader)
if getattr(loader_class, '_accepts_engine_in_init', False):
args.insert(0, self)
else:
warnings.warn(
"%s inherits from django.template.loader.BaseLoader "
"instead of django.template.loaders.base.Loader. " %
loader, RemovedInDjango110Warning, stacklevel=2)
return loader_class(*args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
if loader.supports_recursion:
try:
template = loader.get_template(
name, template_dirs=dirs, skip=skip,
)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
else:
# RemovedInDjango20Warning: Use old api for non-recursive
# loaders.
try:
return loader(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name, dirs=_dirs_undefined):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
if dirs is _dirs_undefined:
dirs = None
else:
warnings.warn(
"The dirs argument of get_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
template, origin = self.find_template(template_name, dirs)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
return template
# This method was originally a function defined in django.template.loader.
# It was moved here in Django 1.8 when encapsulating the Django template
# engine in this Engine class. It's still called by deprecated code but it
# will be removed in Django 1.10. It's superseded by a new render_to_string
# function in django.template.loader.
def render_to_string(self, template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined):
if context_instance is _context_instance_undefined:
context_instance = None
else:
warnings.warn(
"The context_instance argument of render_to_string is "
"deprecated.", RemovedInDjango110Warning, stacklevel=2)
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in select_template or get_template.
pass
else:
warnings.warn(
"The dirs argument of render_to_string is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if dictionary is _dictionary_undefined:
dictionary = None
else:
warnings.warn(
"The dictionary argument of render_to_string was renamed to "
"context.", RemovedInDjango110Warning, stacklevel=2)
context = dictionary
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name, dirs)
else:
t = self.get_template(template_name, dirs)
if not context_instance:
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context))
if not context:
return t.render(context_instance)
# Add the context to the context stack, ensuring it gets removed again
# to keep the context_instance in the same state it started in.
with context_instance.push(context):
return t.render(context_instance)
def select_template(self, template_name_list, dirs=_dirs_undefined):
"""
Given a list of template names, returns the first that can be loaded.
"""
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in get_template.
pass
else:
warnings.warn(
"The dirs argument of select_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name, dirs)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
|
vincentbetro/NACA-SIM
|
refs/heads/master
|
imagescripts/tecplotmigrate.py
|
1
|
#!/usr/bin/env python
import os,sys
import glob
def mysystem(s):
print(s)
retval = os.system(s)
return retval
def main():
alphas = range(-8,9)
orders = [1,2]
machs = [0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25]
refinelevels = [0,1,2]
#now, we need to recursively move everybody back
for order in orders:
for mach in machs:
for alpha in alphas:
for refinelevel in refinelevels:
mysystem('mv TecplotEuler_%d_%+03d_%0.2f_%02d.png /home/vbetro/online_edu/images/order%d/mach%0.2f/alpha%+03d'%(order,alpha,mach,refinelevel,order,mach,alpha));
if __name__ == "__main__":
main()
|
ganeshrn/ansible
|
refs/heads/devel
|
test/units/module_utils/common/test_network.py
|
14
|
# -*- coding: utf-8 -*-
# (c) 2017 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.network import (
to_bits,
to_masklen,
to_netmask,
to_subnet,
to_ipv6_network,
is_masklen,
is_netmask
)
def test_to_masklen():
assert 24 == to_masklen('255.255.255.0')
def test_to_masklen_invalid():
with pytest.raises(ValueError):
to_masklen('255')
def test_to_netmask():
assert '255.0.0.0' == to_netmask(8)
assert '255.0.0.0' == to_netmask('8')
def test_to_netmask_invalid():
with pytest.raises(ValueError):
to_netmask(128)
def test_to_subnet():
result = to_subnet('192.168.1.1', 24)
assert '192.168.1.0/24' == result
result = to_subnet('192.168.1.1', 24, dotted_notation=True)
assert '192.168.1.0 255.255.255.0' == result
def test_to_subnet_invalid():
with pytest.raises(ValueError):
to_subnet('foo', 'bar')
def test_is_masklen():
assert is_masklen(32)
assert not is_masklen(33)
assert not is_masklen('foo')
def test_is_netmask():
assert is_netmask('255.255.255.255')
assert not is_netmask(24)
assert not is_netmask('foo')
def test_to_ipv6_network():
assert '2001:db8::' == to_ipv6_network('2001:db8::')
assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0:0:8a2e:0370:7334')
def test_to_bits():
assert to_bits('0') == '00000000'
assert to_bits('1') == '00000001'
assert to_bits('2') == '00000010'
assert to_bits('1337') == '10100111001'
assert to_bits('127.0.0.1') == '01111111000000000000000000000001'
assert to_bits('255.255.255.255') == '11111111111111111111111111111111'
assert to_bits('255.255.255.0') == '11111111111111111111111100000000'
|
PhotonX-Networks/OfdPy
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='ofdpy',
version='0.1a0',
description='Broadcom OF-DPA utilities',
author='Karel van de Plassche',
author_email='karelvandeplassche@gmail.com',
classifiers=['License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)'],
packages=['ofdpy'],
)
|
mrtexaznl/p2pool
|
refs/heads/master
|
p2pool/bitcoin/sha256.py
|
285
|
from __future__ import division
import struct
k = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
]
def process(state, chunk):
def rightrotate(x, n):
return (x >> n) | (x << 32 - n) % 2**32
w = list(struct.unpack('>16I', chunk))
for i in xrange(16, 64):
s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ (w[i-15] >> 3)
s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ (w[i-2] >> 10)
w.append((w[i-16] + s0 + w[i-7] + s1) % 2**32)
a, b, c, d, e, f, g, h = start_state = struct.unpack('>8I', state)
for k_i, w_i in zip(k, w):
t1 = (h + (rightrotate(e, 6) ^ rightrotate(e, 11) ^ rightrotate(e, 25)) + ((e & f) ^ (~e & g)) + k_i + w_i) % 2**32
a, b, c, d, e, f, g, h = (
(t1 + (rightrotate(a, 2) ^ rightrotate(a, 13) ^ rightrotate(a, 22)) + ((a & b) ^ (a & c) ^ (b & c))) % 2**32,
a, b, c, (d + t1) % 2**32, e, f, g,
)
return struct.pack('>8I', *((x + y) % 2**32 for x, y in zip(start_state, [a, b, c, d, e, f, g, h])))
initial_state = struct.pack('>8I', 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19)
class sha256(object):
digest_size = 256//8
block_size = 512//8
def __init__(self, data='', _=(initial_state, '', 0)):
self.state, self.buf, self.length = _
self.update(data)
def update(self, data):
state = self.state
buf = self.buf + data
chunks = [buf[i:i + self.block_size] for i in xrange(0, len(buf) + 1, self.block_size)]
for chunk in chunks[:-1]:
state = process(state, chunk)
self.state = state
self.buf = chunks[-1]
self.length += 8*len(data)
def copy(self, data=''):
return self.__class__(data, (self.state, self.buf, self.length))
def digest(self):
state = self.state
buf = self.buf + '\x80' + '\x00'*((self.block_size - 9 - len(self.buf)) % self.block_size) + struct.pack('>Q', self.length)
for chunk in [buf[i:i + self.block_size] for i in xrange(0, len(buf), self.block_size)]:
state = process(state, chunk)
return state
def hexdigest(self):
return self.digest().encode('hex')
|
subhadram/insilico
|
refs/heads/master
|
examples/NeuronSAHPVGCCNetwork/peaki.py
|
1
|
from pylab import *
import numpy as np
from matplotlib import pyplot
matplotlib.rcParams.update({'font.size': 24})
locator_params(axis='y', nbins=3)
locator_params(axis='x', nbins=4)
# Get peak of data and store it in file
dat = genfromtxt('ccc.dat')
#dat = genfromtxt('0.dat')
data = dat[5000:]
tt = []
dt1 = []
dt2 = []
for i in range(1,len(data)-1):
if (((data[i,1]>=data[i-1,1])&(data[i,1]>=data[i+1,1]))&(data[i,1]>23.0)):
tt.append(data[i,:])
tt = np.array(tt)
for j in range(2,len(tt)):
x = float(tt[j,0]) - float(tt[j-1,0])
y = float(tt[j-1,0]) - float(tt[j-2,0])
#print tt[i]
if x > 100.0 :
#print x
dt1.append(tt[j,:])
dt2.append(tt[j-1,:])
print tt
dt1 = np.array(dt1)
dt2 = np.array(dt2)
t = np.array(dat[:,1])
sp = np.fft.fft(t)
freq = np.fft.fftfreq(t.shape[-1])
plt.plot(freq, sp.real, freq, sp.imag)
#plot(data[:,0],data[:,1],label = 'Voltage')
#plot(tt[:,0],tt[:,1],'ro',label = 'Inter-spike interval')
#plot(dt2[:,0],dt2[:,1],'go',label = 'Inter-burst interval')
#plot(dt1[:,0],dt1[:,1],'go')
xlim(0,.1)
#ylim(-100,100)
#xlabel("Time (msec)")
#ylabel("Voltage (mV)")
#plot(dt1[:,0],dt1[:,1],'go',label = 'Peak')
legend(loc='best',fontsize = 20)
show()
"""
# Save peak time to file
#tt = tt[:,3]
#print tt[5]
np.savetxt('dt.dat', tt, fmt='%.3f')
# Time difference
#tt = genfromtxt('noise0_tt.dat')
g = np.mean(tt)
print g
dt = []
for j in range(1,len(tt)):
x = float(tt[j]) - float(tt[j-1])
#print tt[i]
if x > 50.0 :
#print x
dt.append(x)
#print dt
dt = np.array(dt)
np.savetxt('switchdt.dat', dt, fmt='%.2f')
meen = np.mean(dt)
vari = np.var(dt)
print meen ,vari
th = genfromtxt('dt.dat')
dh = []
for i in range(1,len(th)):
dh.append(th[i]-th[i-1])
mn = np.mean(dh)
st = np.std(dh)
ts = genfromtxt('5.dat')
ds = []
for i in range(1,len(ts)):
ds.append(ts[i]-ts[i-1])
mn1 = np.mean(ds)
st1 = np.std(ds)
print mn,st
print mn1,st1
tu = genfromtxt('1.dat')
du = []
for i in range(1,len(tu)):
du.append(tu[i]-tu[i-1])
tt = genfromtxt('detf.dat')
dt = []
for i in range(1,len(tu)):
dt.append(tt[i]-tt[i-1])
binBoundaries = np.linspace(0,250,100)
# Histogram of time difference
#n, bins, patches = pyplot.hist(dt, log = True, alpha = 0.3, bins = binBoundaries, facecolor='k',label = "Infinity")
n, bins, patches = pyplot.hist(dh, log = False, alpha = 0.3, bins = binBoundaries, facecolor='r',label = "N = 1000")
n, bins, patches = pyplot.hist(ds, log = False, alpha = 0.3, bins = binBoundaries, facecolor='b',label = "N = 800")
n, bins, patches = pyplot.hist(du, log = True, alpha = 0.3, bins = binBoundaries, facecolor='g',label = "N = 400")
pyplot.legend(loc='upper right')
pyplot.ylabel("#Counts")
pyplot.xlabel("Time difference in consecutive action potentials(msec)")
#plt.hist(numpy.log2(data), log=True, bins=bins)
#freq, bins = np.histogram(ValList,bins)
#dat = zip(*np.histogram(dt,50))
#np.savetxt('hist40.dat', dat, delimiter=' ')
#legend("0.4")
#ylim (1,50)
#xlim (700, 1300)
"""
#plot(range(len(tt)), dt, '.')
#plt.yscale('log')
show()
#savefig('peaks.jpg', format='jpg', dpi=150)
|
1c7/Python-ask-answer-website-practice
|
refs/heads/master
|
5-Database:SQLite3/6. Table_Struct.py
|
1
|
import sqlite3
con = sqlite3.connect('posts.db')
c = con.cursor()
c.execute('''
SELECT name FROM sqlite_master WHERE type='table'
''')
for one in c.fetchall():
print (one)
c.close()
|
ESOedX/edx-platform
|
refs/heads/master
|
lms/djangoapps/grades/migrations/0009_auto_20170111_1507.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0008_persistentsubsectiongrade_first_attempted'),
]
operations = [
migrations.AlterIndexTogether(
name='persistentcoursegrade',
index_together=set([('passed_timestamp', 'course_id'), ('modified', 'course_id')]),
),
migrations.AlterIndexTogether(
name='persistentsubsectiongrade',
index_together=set([('modified', 'course_id', 'usage_key')]),
),
]
|
KECB/learn
|
refs/heads/master
|
computer_vision/image_gradients.py
|
1
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
# bicycle_img = cv2.imread('images/bicycle.png', 0)
#
# sobelx = cv2.Sobel(bicycle_img, cv2.CV_64F, 1, 0, ksize=5)
# sobely = cv2.Sobel(bicycle_img, cv2.CV_64F, 0, 1, ksize=5)
#
#
# scharrx = cv2.Scharr(bicycle_img, cv2.CV_64F, 1, 0)
# scharry = cv2.Scharr(bicycle_img, cv2.CV_64F, 0, 1)
#
#
#
# # apply the laplacian filter to the image using OpenCV
# laplacian = cv2.Laplacian(bicycle_img, cv2.CV_64F)
#
# # show the original, sobel, scharr and
# plt.figure(figsize=(12, 8))
# plt.subplot(231),plt.imshow(bicycle_img, cmap = 'gray')
# plt.title('Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(232),plt.imshow(sobelx, cmap = 'gray')
# plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
# plt.subplot(233),plt.imshow(sobely, cmap = 'gray')
# plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
#
# # scharr
# plt.subplot(234),plt.imshow(scharrx, cmap = 'gray')
# plt.title('Scharr X'), plt.xticks([]), plt.yticks([])
# plt.subplot(235),plt.imshow(scharry, cmap = 'gray')
# plt.title('Scharr Y'), plt.xticks([]), plt.yticks([])
#
# # laplacian
# plt.subplot(236),plt.imshow(laplacian, cmap = 'gray')
# plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
#
# plt.suptitle('Sobel, Scharr & Laplacian Edge Detection', fontsize=16)
# plt.tight_layout()
# plt.show()
img = cv2.imread('images/android.png', 0 )
sobelx = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=5)
sobel_combine = sobelx + sobely
plt.subplot(131), plt.imshow(sobelx, cmap='gray')
plt.subplot(132), plt.imshow(sobely, cmap='gray')
plt.subplot(133), plt.imshow(sobel_combine, cmap='gray')
plt.show()
|
ajjohnso/ajjohnso-byte1-attempt56
|
refs/heads/master
|
lib/werkzeug/local.py
|
310
|
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
|
timduru/platform-external-chromium_org
|
refs/heads/katkiss-4.4
|
tools/win/split_link/graph_dependencies.py
|
145
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def main():
if len(sys.argv) != 2:
print 'usage: %s <output.html>' % sys.argv[0]
return 1
env = os.environ.copy()
env['GYP_GENERATORS'] = 'dump_dependency_json'
print 'Dumping dependencies...'
popen = subprocess.Popen(
['python', 'build/gyp_chromium'],
shell=True, env=env)
popen.communicate()
if popen.returncode != 0:
return popen.returncode
print 'Finding problems...'
popen = subprocess.Popen(
['python', 'tools/gyp-explain.py', '--dot',
'chrome.gyp:browser#', 'core.gyp:webcore#'],
stdout=subprocess.PIPE,
shell=True)
out, _ = popen.communicate()
if popen.returncode != 0:
return popen.returncode
# Break into pairs to uniq to make graph less of a mess.
print 'Simplifying...'
deduplicated = set()
lines = out.splitlines()[2:-1]
for line in lines:
line = line.strip('\r\n ;')
pairs = line.split(' -> ')
for i in range(len(pairs) - 1):
deduplicated.add('%s -> %s;' % (pairs[i], pairs[i + 1]))
graph = 'strict digraph {\n' + '\n'.join(sorted(deduplicated)) + '\n}'
print 'Writing report to %s...' % sys.argv[1]
path_count = len(out.splitlines())
with open(os.path.join(BASE_DIR, 'viz.js', 'viz.js')) as f:
viz_js = f.read()
with open(sys.argv[1], 'w') as f:
f.write(PREFIX % path_count)
f.write(graph)
f.write(SUFFIX % viz_js)
print 'Done.'
PREFIX = r'''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Undesirable Dependencies</title>
</head>
<body>
<h1>Undesirable Dependencies</h1>
<h2>browser → webcore</h2>
<h3>%d paths</h3>
<script type="text/vnd.graphviz" id="graph">
'''
SUFFIX = r'''
</script>
<script>%s</script>
<div id="output">Rendering...</div>
<script>
setTimeout(function() {
document.getElementById("output").innerHTML =
Viz(document.getElementById("graph").innerHTML, "svg");
}, 1);
</script>
</body>
</html>
'''
if __name__ == '__main__':
sys.exit(main())
|
gx1997/chrome-loongson
|
refs/heads/master
|
chrome/tools/build/win/scan_server_dlls.py
|
79
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script used to scan for server DLLs at build time and build a header
included by setup.exe. This header contains an array of the names of
the DLLs that need registering at install time.
"""
import ConfigParser
import glob
import optparse
import os
import sys
CHROME_DIR = "Chrome-bin"
SERVERS_DIR = "servers"
GENERATED_DLL_INCLUDE_FILE_NAME = "registered_dlls.h"
GENERATED_DLL_INCLUDE_FILE_CONTENTS = """
// This file is automatically generated by scan_server_dlls.py.
// It contains the list of COM server dlls that need registering at
// install time.
#include "base/basictypes.h"
namespace {
const wchar_t* kDllsToRegister[] = { %s };
const int kNumDllsToRegister = %d;
}
"""
def Readconfig(output_dir, input_file):
"""Reads config information from input file after setting default value of
global variabes.
"""
variables = {}
variables['ChromeDir'] = CHROME_DIR
# Use a bogus version number, we don't really care what it is, we just
# want to find the files that would get picked up from chrome.release,
# and don't care where the installer archive task ends up putting them.
variables['VersionDir'] = os.path.join(variables['ChromeDir'],
'0.0.0.0')
config = ConfigParser.SafeConfigParser(variables)
print "Reading input_file: " + input_file
config.read(input_file)
return config
def CreateRegisteredDllIncludeFile(registered_dll_list, header_output_dir):
""" Outputs the header file included by the setup project that
contains the names of the DLLs to be registered at installation
time.
"""
output_file = os.path.join(header_output_dir, GENERATED_DLL_INCLUDE_FILE_NAME)
dll_array_string = ""
for dll in registered_dll_list:
dll.replace("\\", "\\\\")
if dll_array_string:
dll_array_string += ', '
dll_array_string += "L\"%s\"" % dll
if len(registered_dll_list) == 0:
contents = GENERATED_DLL_INCLUDE_FILE_CONTENTS % ("L\"\"", 0)
else:
contents = GENERATED_DLL_INCLUDE_FILE_CONTENTS % (dll_array_string,
len(registered_dll_list))
# Don't rewrite the header file if we don't need to.
try:
old_file = open(output_file, 'r')
except EnvironmentError:
old_contents = None
else:
old_contents = old_file.read()
old_file.close()
if contents != old_contents:
print 'Updating server dll header: ' + str(output_file)
open(output_file, 'w').write(contents)
def ScanServerDlls(config, distribution, output_dir):
"""Scans for DLLs in the specified section of config that are in the
subdirectory of output_dir named SERVERS_DIR. Returns a list of only the
filename components of the paths to all matching DLLs.
"""
print "Scanning for server DLLs in " + output_dir
registered_dll_list = []
ScanDllsInSection(config, 'GENERAL', output_dir, registered_dll_list)
if distribution:
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:]
ScanDllsInSection(config, distribution.upper(), output_dir,
registered_dll_list)
return registered_dll_list
def ScanDllsInSection(config, section, output_dir, registered_dll_list):
"""Scans for DLLs in the specified section of config that are in the
subdirectory of output_dir named SERVERS_DIR. Appends the file name of all
matching dlls to registered_dll_list.
"""
for option in config.options(section):
if option.endswith('dir'):
continue
dst = config.get(section, option)
(x, src_folder) = os.path.split(dst)
for file in glob.glob(os.path.join(output_dir, option)):
if option.startswith(SERVERS_DIR):
(x, file_name) = os.path.split(file)
if file_name.lower().endswith('.dll'):
print "Found server DLL file: " + file_name
registered_dll_list.append(file_name)
def RunSystemCommand(cmd):
if (os.system(cmd) != 0):
raise "Error while running cmd: %s" % cmd
def main():
"""Main method that reads input file, scans <build_output>\servers for
matches to files described in the input file. A header file for the
setup project is then generated.
"""
option_parser = optparse.OptionParser()
option_parser.add_option('-o', '--output_dir', help='Build Output directory')
option_parser.add_option('-x', '--header_output_dir',
help='Location where the generated header file will be placed.')
option_parser.add_option('-i', '--input_file', help='Input file')
option_parser.add_option('-d', '--distribution',
help='Name of Chromium Distribution. Optional.')
options, args = option_parser.parse_args()
config = Readconfig(options.output_dir, options.input_file)
registered_dll_list = ScanServerDlls(config, options.distribution,
options.output_dir)
CreateRegisteredDllIncludeFile(registered_dll_list,
options.header_output_dir)
return 0
if '__main__' == __name__:
sys.exit(main())
|
DJMelonz/basic-blog
|
refs/heads/master
|
django/contrib/gis/db/models/fields.py
|
400
|
from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if not connection.alias in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if not srid in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
# Geodetic units.
geodetic_units = ('Decimal Degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entrie in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection) in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
if isinstance(value, SQLEvaluator):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, basestring) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name):
super(GeometryField, self).contribute_to_class(cls, name)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class' : forms.GeometryField,
'null' : self.null,
'geom_type' : self.geom_type,
'srid' : self.srid,
}
defaults.update(kwargs)
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
if lookup_type in connection.ops.gis_terms:
# special case for isnull lookup
if lookup_type == 'isnull':
return []
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if lookup_type in connection.ops.distance_functions:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, SQLEvaluator):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if value is None:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
description = _("Geometry collection")
|
wfxiang08/django178
|
refs/heads/master
|
tests/template_tests/alternate_urls.py
|
26
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
# View returning a template response
(r'^template_response_view/$', views.template_response_view),
# A view that can be hard to find...
url(r'^snark/', views.snark, name='snark'),
)
|
repotvsupertuga/repo
|
refs/heads/master
|
plugin.video.specto/resources/lib/libraries/cloudflare2.py
|
19
|
#
# Copyright (C) 2015 tknorris (Derived from Mikey1234's & Lambda's)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
# This code is a derivative of the YouTube plugin for XBMC and associated works
# released under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3
import re
import urllib2
import urllib
import urlparse
#import xbmc
import time
from resources.lib.libraries import control
MAX_TRIES = 3
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
control.log('Stopping Redirect')
return response
https_response = http_response
def solve_equation(equation):
try:
offset = 1 if equation[0] == '+' else 0
return int(eval(equation.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0').replace('(', 'str(')[offset:]))
except:
pass
def solve(url, cj, user_agent=None, wait=True):
if user_agent is None: user_agent = control.USER_AGENT
headers = {'User-Agent': user_agent, 'Referer': url}
if cj is not None:
try: cj.load(ignore_discard=True)
except: pass
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
request = urllib2.Request(url)
for key in headers: request.add_header(key, headers[key])
try:
response = urllib2.urlopen(request)
html = response.read()
except urllib2.HTTPError as e:
html = e.read()
tries = 0
while tries < MAX_TRIES:
solver_pattern = 'var t,r,a,f,\s*([^=]+)={"([^"]+)":([^}]+)};.+challenge-form\'\);.*?\n.*?;(.*?);a\.value'
vc_pattern = 'input type="hidden" name="jschl_vc" value="([^"]+)'
pass_pattern = 'input type="hidden" name="pass" value="([^"]+)'
init_match = re.search(solver_pattern, html, re.DOTALL)
vc_match = re.search(vc_pattern, html)
pass_match = re.search(pass_pattern, html)
if not init_match or not vc_match or not pass_match:
#control.log("Couldn't find attribute: init: |%s| vc: |%s| pass: |%s| No cloudflare check?" % (init_match, vc_match, pass_match))
return False
init_dict, init_var, init_equation, equations = init_match.groups()
vc = vc_match.group(1)
password = pass_match.group(1)
# control.log("VC is: %s" % (vc), xbmc.LOGDEBUG)
varname = (init_dict, init_var)
result = int(solve_equation(init_equation.rstrip()))
#control.log('Initial value: |%s| Result: |%s|' % (init_equation, result))
for equation in equations.split(';'):
equation = equation.rstrip()
if equation[:len('.'.join(varname))] != '.'.join(varname):
control.log('Equation does not start with varname |%s|' % (equation))
else:
equation = equation[len('.'.join(varname)):]
expression = equation[2:]
operator = equation[0]
if operator not in ['+', '-', '*', '/']:
control.log('Unknown operator: |%s|' % (equation))
continue
result = int(str(eval(str(result) + operator + str(solve_equation(expression)))))
#control.log('intermediate: %s = %s' % (equation, result))
scheme = urlparse.urlparse(url).scheme
domain = urlparse.urlparse(url).hostname
result += len(domain)
#control.log('Final Result: |%s|' % (result))
if wait:
#control.log('Sleeping for 5 Seconds')
time.sleep(5)
url = '%s://%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s&pass=%s' % (scheme, domain, vc, result, urllib.quote(password))
#control.log('url: %s' % (url))
request = urllib2.Request(url)
for key in headers: request.add_header(key, headers[key])
try:
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
response = urllib2.urlopen(request)
while response.getcode() in [301, 302, 303, 307]:
if cj is not None:
cj.extract_cookies(response, request)
request = urllib2.Request(response.info().getheader('location'))
for key in headers: request.add_header(key, headers[key])
if cj is not None:
cj.add_cookie_header(request)
response = urllib2.urlopen(request)
final = response.read()
if 'cf-browser-verification' in final:
#control.log('CF Failure: html: %s url: %s' % (html, url))
tries += 1
html = final
else:
break
except urllib2.HTTPError as e:
control.log('CloudFlare Error: %s on url: %s' % (e.code, url))
return False
if cj is not None:
cj.extract_cookies(response, request)
cj.save()
return final
|
rosmo/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/dellos10/dellos10_config.py
|
42
|
#!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2017 Dell Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos10_config
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Manage Dell EMC Networking OS10 configuration sections
description:
- OS10 configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with OS10 configuration sections in
a deterministic way.
extends_documentation_fragment: dellos10
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser. This argument is mutually exclusive with I(src).
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
update:
description:
- The I(update) argument controls how the configuration statements
are processed on the remote device. Valid choices for the I(update)
argument are I(merge) and I(check). When you set this argument to
I(merge), the configuration changes merge with the current
device running configuration. When you set this argument to I(check)
the configuration updates are determined but not actually configured
on the remote device.
default: merge
choices: ['merge', 'check']
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
type: bool
default: 'no'
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- dellos10_config:
lines: ['hostname {{ inventory_hostname }}']
- dellos10_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
- 50 permit ip host 5.5.5.5 any log
parents: ['ip access-list test']
before: ['no ip access-list test']
match: exact
- dellos10_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
parents: ['ip access-list test']
before: ['no ip access-list test']
replace: block
- dellos10_config:
lines: ['hostname {{ inventory_hostname }}']
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device.
returned: always
type: list
sample: ['hostname foo', 'router bgp 1', 'router-id 1.1.1.1']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router bgp 1', 'router-id 1.1.1.1']
saved:
description: Returns whether the configuration is saved to the startup
configuration or not.
returned: When not check_mode.
type: bool
sample: True
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/dellos10_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos10.dellos10 import get_config, get_sublevel_config
from ansible.module_utils.network.dellos10.dellos10 import dellos10_argument_spec, check_args
from ansible.module_utils.network.dellos10.dellos10 import load_config, run_commands
from ansible.module_utils.network.dellos10.dellos10 import WARNING_PROMPTS_RE
from ansible.module_utils.network.common.config import NetworkConfig, dumps
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
commands = module.params['lines'][0]
if (isinstance(commands, dict)) and (isinstance((commands['command']), list)):
candidate.add(commands['command'], parents=parents)
elif (isinstance(commands, dict)) and (isinstance((commands['command']), str)):
candidate.add([commands['command']], parents=parents)
else:
candidate.add(module.params['lines'], parents=parents)
return candidate
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return contents
def main():
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
src=dict(type='path'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line',
choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
update=dict(choices=['merge', 'check'], default='merge'),
save=dict(type='bool', default=False),
config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec)
)
argument_spec.update(dellos10_argument_spec)
mutually_exclusive = [('lines', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
parents = module.params['parents'] or list()
match = module.params['match']
replace = module.params['replace']
warnings = list()
check_args(module, warnings)
result = dict(changed=False, saved=False, warnings=warnings)
if module.params['backup']:
if not module.check_mode:
result['__backup__'] = get_config(module)
commands = list()
candidate = get_candidate(module)
if any((module.params['lines'], module.params['src'])):
if match != 'none':
config = get_running_config(module)
if parents:
contents = get_sublevel_config(config, module)
config = NetworkConfig(contents=contents, indent=1)
else:
config = NetworkConfig(contents=config, indent=1)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands')
if ((isinstance((module.params['lines']), list)) and
(isinstance((module.params['lines'][0]), dict)) and
(set(['prompt', 'answer']).issubset(module.params['lines'][0]))):
cmd = {'command': commands,
'prompt': module.params['lines'][0]['prompt'],
'answer': module.params['lines'][0]['answer']}
commands = [module.jsonify(cmd)]
else:
commands = commands.split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
if not module.check_mode and module.params['update'] == 'merge':
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
result['updates'] = commands
if module.params['save']:
result['changed'] = True
if not module.check_mode:
cmd = {r'command': 'copy running-config startup-config',
r'prompt': r'\[confirm yes/no\]:\s?$', 'answer': 'yes'}
run_commands(module, [cmd])
result['saved'] = True
else:
module.warn('Skipping command `copy running-config startup-config`'
'due to check_mode. Configuration not copied to '
'non-volatile storage')
module.exit_json(**result)
if __name__ == '__main__':
main()
|
anatm/administrator
|
refs/heads/master
|
git-1.8.1/git-remote-testgit.py
|
12
|
#!/usr/bin/env python
# This command is a simple remote-helper, that is used both as a
# testcase for the remote-helper functionality, and as an example to
# show remote-helper authors one possible implementation.
#
# This is a Git <-> Git importer/exporter, that simply uses git
# fast-import and git fast-export to consume and produce fast-import
# streams.
#
# To understand better the way things work, one can activate debug
# traces by setting (to any value) the environment variables
# GIT_TRANSPORT_HELPER_DEBUG and GIT_DEBUG_TESTGIT, to see messages
# from the transport-helper side, or from this example remote-helper.
# hashlib is only available in python >= 2.5
try:
import hashlib
_digest = hashlib.sha1
except ImportError:
import sha
_digest = sha.new
import sys
import os
import time
sys.path.insert(0, os.getenv("GITPYTHONLIB","."))
from git_remote_helpers.util import die, debug, warn
from git_remote_helpers.git.repo import GitRepo
from git_remote_helpers.git.exporter import GitExporter
from git_remote_helpers.git.importer import GitImporter
from git_remote_helpers.git.non_local import NonLocalGit
def get_repo(alias, url):
"""Returns a git repository object initialized for usage.
"""
repo = GitRepo(url)
repo.get_revs()
repo.get_head()
hasher = _digest()
hasher.update(repo.path)
repo.hash = hasher.hexdigest()
repo.get_base_path = lambda base: os.path.join(
base, 'info', 'fast-import', repo.hash)
prefix = 'refs/testgit/%s/' % alias
debug("prefix: '%s'", prefix)
repo.gitdir = os.environ["GIT_DIR"]
repo.alias = alias
repo.prefix = prefix
repo.exporter = GitExporter(repo)
repo.importer = GitImporter(repo)
repo.non_local = NonLocalGit(repo)
return repo
def local_repo(repo, path):
"""Returns a git repository object initalized for usage.
"""
local = GitRepo(path)
local.non_local = None
local.gitdir = repo.gitdir
local.alias = repo.alias
local.prefix = repo.prefix
local.hash = repo.hash
local.get_base_path = repo.get_base_path
local.exporter = GitExporter(local)
local.importer = GitImporter(local)
return local
def do_capabilities(repo, args):
"""Prints the supported capabilities.
"""
print "import"
print "export"
print "refspec refs/heads/*:%s*" % repo.prefix
dirname = repo.get_base_path(repo.gitdir)
if not os.path.exists(dirname):
os.makedirs(dirname)
path = os.path.join(dirname, 'testgit.marks')
print "*export-marks %s" % path
if os.path.exists(path):
print "*import-marks %s" % path
print # end capabilities
def do_list(repo, args):
"""Lists all known references.
Bug: This will always set the remote head to master for non-local
repositories, since we have no way of determining what the remote
head is at clone time.
"""
for ref in repo.revs:
debug("? refs/heads/%s", ref)
print "? refs/heads/%s" % ref
if repo.head:
debug("@refs/heads/%s HEAD" % repo.head)
print "@refs/heads/%s HEAD" % repo.head
else:
debug("@refs/heads/master HEAD")
print "@refs/heads/master HEAD"
print # end list
def update_local_repo(repo):
"""Updates (or clones) a local repo.
"""
if repo.local:
return repo
path = repo.non_local.clone(repo.gitdir)
repo.non_local.update(repo.gitdir)
repo = local_repo(repo, path)
return repo
def do_import(repo, args):
"""Exports a fast-import stream from testgit for git to import.
"""
if len(args) != 1:
die("Import needs exactly one ref")
if not repo.gitdir:
die("Need gitdir to import")
ref = args[0]
refs = [ref]
while True:
line = sys.stdin.readline()
if line == '\n':
break
if not line.startswith('import '):
die("Expected import line.")
# strip of leading 'import '
ref = line[7:].strip()
refs.append(ref)
repo = update_local_repo(repo)
repo.exporter.export_repo(repo.gitdir, refs)
print "done"
def do_export(repo, args):
"""Imports a fast-import stream from git to testgit.
"""
if not repo.gitdir:
die("Need gitdir to export")
update_local_repo(repo)
changed = repo.importer.do_import(repo.gitdir)
if not repo.local:
repo.non_local.push(repo.gitdir)
for ref in changed:
print "ok %s" % ref
print
COMMANDS = {
'capabilities': do_capabilities,
'list': do_list,
'import': do_import,
'export': do_export,
}
def sanitize(value):
"""Cleans up the url.
"""
if value.startswith('testgit::'):
value = value[9:]
return value
def read_one_line(repo):
"""Reads and processes one command.
"""
sleepy = os.environ.get("GIT_REMOTE_TESTGIT_SLEEPY")
if sleepy:
debug("Sleeping %d sec before readline" % int(sleepy))
time.sleep(int(sleepy))
line = sys.stdin.readline()
cmdline = line
if not cmdline:
warn("Unexpected EOF")
return False
cmdline = cmdline.strip().split()
if not cmdline:
# Blank line means we're about to quit
return False
cmd = cmdline.pop(0)
debug("Got command '%s' with args '%s'", cmd, ' '.join(cmdline))
if cmd not in COMMANDS:
die("Unknown command, %s", cmd)
func = COMMANDS[cmd]
func(repo, cmdline)
sys.stdout.flush()
return True
def main(args):
"""Starts a new remote helper for the specified repository.
"""
if len(args) != 3:
die("Expecting exactly three arguments.")
sys.exit(1)
if os.getenv("GIT_DEBUG_TESTGIT"):
import git_remote_helpers.util
git_remote_helpers.util.DEBUG = True
alias = sanitize(args[1])
url = sanitize(args[2])
if not alias.isalnum():
warn("non-alnum alias '%s'", alias)
alias = "tmp"
args[1] = alias
args[2] = url
repo = get_repo(alias, url)
debug("Got arguments %s", args[1:])
more = True
sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0)
while (more):
more = read_one_line(repo)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Kwanghyuk-Kim/WATT
|
refs/heads/master
|
tools/WebIDLBinder/third_party/ply/example/BASIC/basparse.py
|
166
|
# An implementation of Dartmouth BASIC (1964)
#
from ply import *
import basiclex
tokens = basiclex.tokens
precedence = (
('left', 'PLUS','MINUS'),
('left', 'TIMES','DIVIDE'),
('left', 'POWER'),
('right','UMINUS')
)
#### A BASIC program is a series of statements. We represent the program as a
#### dictionary of tuples indexed by line number.
def p_program(p):
'''program : program statement
| statement'''
if len(p) == 2 and p[1]:
p[0] = { }
line,stat = p[1]
p[0][line] = stat
elif len(p) ==3:
p[0] = p[1]
if not p[0]: p[0] = { }
if p[2]:
line,stat = p[2]
p[0][line] = stat
#### This catch-all rule is used for any catastrophic errors. In this case,
#### we simply return nothing
def p_program_error(p):
'''program : error'''
p[0] = None
p.parser.error = 1
#### Format of all BASIC statements.
def p_statement(p):
'''statement : INTEGER command NEWLINE'''
if isinstance(p[2],str):
print("%s %s %s" % (p[2],"AT LINE", p[1]))
p[0] = None
p.parser.error = 1
else:
lineno = int(p[1])
p[0] = (lineno,p[2])
#### Interactive statements.
def p_statement_interactive(p):
'''statement : RUN NEWLINE
| LIST NEWLINE
| NEW NEWLINE'''
p[0] = (0, (p[1],0))
#### Blank line number
def p_statement_blank(p):
'''statement : INTEGER NEWLINE'''
p[0] = (0,('BLANK',int(p[1])))
#### Error handling for malformed statements
def p_statement_bad(p):
'''statement : INTEGER error NEWLINE'''
print("MALFORMED STATEMENT AT LINE %s" % p[1])
p[0] = None
p.parser.error = 1
#### Blank line
def p_statement_newline(p):
'''statement : NEWLINE'''
p[0] = None
#### LET statement
def p_command_let(p):
'''command : LET variable EQUALS expr'''
p[0] = ('LET',p[2],p[4])
def p_command_let_bad(p):
'''command : LET variable EQUALS error'''
p[0] = "BAD EXPRESSION IN LET"
#### READ statement
def p_command_read(p):
'''command : READ varlist'''
p[0] = ('READ',p[2])
def p_command_read_bad(p):
'''command : READ error'''
p[0] = "MALFORMED VARIABLE LIST IN READ"
#### DATA statement
def p_command_data(p):
'''command : DATA numlist'''
p[0] = ('DATA',p[2])
def p_command_data_bad(p):
'''command : DATA error'''
p[0] = "MALFORMED NUMBER LIST IN DATA"
#### PRINT statement
def p_command_print(p):
'''command : PRINT plist optend'''
p[0] = ('PRINT',p[2],p[3])
def p_command_print_bad(p):
'''command : PRINT error'''
p[0] = "MALFORMED PRINT STATEMENT"
#### Optional ending on PRINT. Either a comma (,) or semicolon (;)
def p_optend(p):
'''optend : COMMA
| SEMI
|'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = None
#### PRINT statement with no arguments
def p_command_print_empty(p):
'''command : PRINT'''
p[0] = ('PRINT',[],None)
#### GOTO statement
def p_command_goto(p):
'''command : GOTO INTEGER'''
p[0] = ('GOTO',int(p[2]))
def p_command_goto_bad(p):
'''command : GOTO error'''
p[0] = "INVALID LINE NUMBER IN GOTO"
#### IF-THEN statement
def p_command_if(p):
'''command : IF relexpr THEN INTEGER'''
p[0] = ('IF',p[2],int(p[4]))
def p_command_if_bad(p):
'''command : IF error THEN INTEGER'''
p[0] = "BAD RELATIONAL EXPRESSION"
def p_command_if_bad2(p):
'''command : IF relexpr THEN error'''
p[0] = "INVALID LINE NUMBER IN THEN"
#### FOR statement
def p_command_for(p):
'''command : FOR ID EQUALS expr TO expr optstep'''
p[0] = ('FOR',p[2],p[4],p[6],p[7])
def p_command_for_bad_initial(p):
'''command : FOR ID EQUALS error TO expr optstep'''
p[0] = "BAD INITIAL VALUE IN FOR STATEMENT"
def p_command_for_bad_final(p):
'''command : FOR ID EQUALS expr TO error optstep'''
p[0] = "BAD FINAL VALUE IN FOR STATEMENT"
def p_command_for_bad_step(p):
'''command : FOR ID EQUALS expr TO expr STEP error'''
p[0] = "MALFORMED STEP IN FOR STATEMENT"
#### Optional STEP qualifier on FOR statement
def p_optstep(p):
'''optstep : STEP expr
| empty'''
if len(p) == 3:
p[0] = p[2]
else:
p[0] = None
#### NEXT statement
def p_command_next(p):
'''command : NEXT ID'''
p[0] = ('NEXT',p[2])
def p_command_next_bad(p):
'''command : NEXT error'''
p[0] = "MALFORMED NEXT"
#### END statement
def p_command_end(p):
'''command : END'''
p[0] = ('END',)
#### REM statement
def p_command_rem(p):
'''command : REM'''
p[0] = ('REM',p[1])
#### STOP statement
def p_command_stop(p):
'''command : STOP'''
p[0] = ('STOP',)
#### DEF statement
def p_command_def(p):
'''command : DEF ID LPAREN ID RPAREN EQUALS expr'''
p[0] = ('FUNC',p[2],p[4],p[7])
def p_command_def_bad_rhs(p):
'''command : DEF ID LPAREN ID RPAREN EQUALS error'''
p[0] = "BAD EXPRESSION IN DEF STATEMENT"
def p_command_def_bad_arg(p):
'''command : DEF ID LPAREN error RPAREN EQUALS expr'''
p[0] = "BAD ARGUMENT IN DEF STATEMENT"
#### GOSUB statement
def p_command_gosub(p):
'''command : GOSUB INTEGER'''
p[0] = ('GOSUB',int(p[2]))
def p_command_gosub_bad(p):
'''command : GOSUB error'''
p[0] = "INVALID LINE NUMBER IN GOSUB"
#### RETURN statement
def p_command_return(p):
'''command : RETURN'''
p[0] = ('RETURN',)
#### DIM statement
def p_command_dim(p):
'''command : DIM dimlist'''
p[0] = ('DIM',p[2])
def p_command_dim_bad(p):
'''command : DIM error'''
p[0] = "MALFORMED VARIABLE LIST IN DIM"
#### List of variables supplied to DIM statement
def p_dimlist(p):
'''dimlist : dimlist COMMA dimitem
| dimitem'''
if len(p) == 4:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
#### DIM items
def p_dimitem_single(p):
'''dimitem : ID LPAREN INTEGER RPAREN'''
p[0] = (p[1],eval(p[3]),0)
def p_dimitem_double(p):
'''dimitem : ID LPAREN INTEGER COMMA INTEGER RPAREN'''
p[0] = (p[1],eval(p[3]),eval(p[5]))
#### Arithmetic expressions
def p_expr_binary(p):
'''expr : expr PLUS expr
| expr MINUS expr
| expr TIMES expr
| expr DIVIDE expr
| expr POWER expr'''
p[0] = ('BINOP',p[2],p[1],p[3])
def p_expr_number(p):
'''expr : INTEGER
| FLOAT'''
p[0] = ('NUM',eval(p[1]))
def p_expr_variable(p):
'''expr : variable'''
p[0] = ('VAR',p[1])
def p_expr_group(p):
'''expr : LPAREN expr RPAREN'''
p[0] = ('GROUP',p[2])
def p_expr_unary(p):
'''expr : MINUS expr %prec UMINUS'''
p[0] = ('UNARY','-',p[2])
#### Relational expressions
def p_relexpr(p):
'''relexpr : expr LT expr
| expr LE expr
| expr GT expr
| expr GE expr
| expr EQUALS expr
| expr NE expr'''
p[0] = ('RELOP',p[2],p[1],p[3])
#### Variables
def p_variable(p):
'''variable : ID
| ID LPAREN expr RPAREN
| ID LPAREN expr COMMA expr RPAREN'''
if len(p) == 2:
p[0] = (p[1],None,None)
elif len(p) == 5:
p[0] = (p[1],p[3],None)
else:
p[0] = (p[1],p[3],p[5])
#### Builds a list of variable targets as a Python list
def p_varlist(p):
'''varlist : varlist COMMA variable
| variable'''
if len(p) > 2:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
#### Builds a list of numbers as a Python list
def p_numlist(p):
'''numlist : numlist COMMA number
| number'''
if len(p) > 2:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
#### A number. May be an integer or a float
def p_number(p):
'''number : INTEGER
| FLOAT'''
p[0] = eval(p[1])
#### A signed number.
def p_number_signed(p):
'''number : MINUS INTEGER
| MINUS FLOAT'''
p[0] = eval("-"+p[2])
#### List of targets for a print statement
#### Returns a list of tuples (label,expr)
def p_plist(p):
'''plist : plist COMMA pitem
| pitem'''
if len(p) > 3:
p[0] = p[1]
p[0].append(p[3])
else:
p[0] = [p[1]]
def p_item_string(p):
'''pitem : STRING'''
p[0] = (p[1][1:-1],None)
def p_item_string_expr(p):
'''pitem : STRING expr'''
p[0] = (p[1][1:-1],p[2])
def p_item_expr(p):
'''pitem : expr'''
p[0] = ("",p[1])
#### Empty
def p_empty(p):
'''empty : '''
#### Catastrophic error handler
def p_error(p):
if not p:
print("SYNTAX ERROR AT EOF")
bparser = yacc.yacc()
def parse(data,debug=0):
bparser.error = 0
p = bparser.parse(data,debug=debug)
if bparser.error: return None
return p
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/streamtube/colorbar/_tickvals.py
|
1
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="streamtube.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
manthey/girder
|
refs/heads/master
|
plugins/autojoin/plugin_tests/autojoin_test.py
|
3
|
from girder.constants import AccessType
from girder.models.group import Group
from girder.models.user import User
from tests import base
import json
def setUpModule():
base.enabledPlugins.append('autojoin')
base.startServer()
def tearDownModule():
base.stopServer()
class AutoJoinTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@u.com' % num)
for num in [0, 1]]
def testCuration(self):
admin, user = self.users
# create some groups
g1 = Group().createGroup('g1', admin)
g2 = Group().createGroup('g2', admin)
g3 = Group().createGroup('g3', admin)
# set auto join rules
rules = [
{
'pattern': '@test.com',
'groupId': str(g1['_id']),
'level': AccessType.ADMIN
},
{
'pattern': '@example.com',
'groupId': str(g2['_id']),
'level': AccessType.READ
},
{
'pattern': '@example.com',
'groupId': str(g3['_id']),
'level': AccessType.WRITE
},
]
params = {
'list': json.dumps([{'key': 'autojoin', 'value': rules}])
}
resp = self.request('/system/setting', user=admin, method='PUT', params=params)
self.assertStatusOk(resp)
# create users
user1 = User().createUser('user1', 'password', 'John', 'Doe', 'user1@example.com')
user2 = User().createUser('user2', 'password', 'John', 'Doe', 'user2@test.com')
user3 = User().createUser('user3', 'password', 'John', 'Doe', 'user3@test.co')
# check correct groups were joined
self.assertEqual(user1['groups'], [g2['_id'], g3['_id']])
self.assertEqual(user2['groups'], [g1['_id']])
self.assertEqual(user3['groups'], [])
# check correct access levels
g1 = Group().load(g1['_id'], force=True)
g3 = Group().load(g3['_id'], force=True)
self.assertIn(
{'id': user2['_id'], 'level': AccessType.ADMIN, 'flags': []},
g1['access']['users'])
self.assertIn(
{'id': user1['_id'], 'level': AccessType.WRITE, 'flags': []},
g3['access']['users'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.