repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
lidiamcfreitas/FenixScheduleMaker | refs/heads/master | oldFiles/project-env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py | 2057 | try:
# Python 3.2+
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
jrauch/eventnotifier | refs/heads/master | dn.py | 1 | import time
from Foundation import NSObject, NSAppleScript, NSBundle, NSDistributedNotificationCenter
from AppKit import NSWorkspace, NSWorkspaceDidWakeNotification, NSWorkspaceDidLaunchApplicationNotification, NSWorkspaceDidTerminateApplicationNotification
from PyObjCTools import AppHelper
class NotificationHandler(NSObject):
def handler_(self, aNotification):
print aNotification.name()
return
dnc = NSDistributedNotificationCenter.defaultCenter()
no = NotificationHandler.new()
dnc.addObserver_selector_name_object_(no, "handler:", None, None)
AppHelper.runConsoleEventLoop()
|
MoroGasper/client | refs/heads/master | client/patch.py | 1 | #!/usr/bin/env python
# encoding: utf-8
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import sys
import time
import stat
import yaml
import gevent
import struct
import binascii
import tarfile
import subprocess
import hashlib
import traceback
import tempfile
import glob
import bz2
import json
import requests
import bsdiff4
import shutil
from gevent import Timeout
from cStringIO import StringIO
from gevent.pool import Group
from gevent.event import Event
from gevent.lock import Semaphore
from gevent.threadpool import ThreadPool
from Crypto.PublicKey import DSA
from requests.exceptions import ConnectionError
from collections import defaultdict
from dulwich.repo import Repo
from dulwich.client import get_transport_and_path
from . import current, logger, input, settings, reconnect, db, interface, loader, event
from .plugintools import Url
from .config import globalconfig
from .scheme import transaction, Table, Column, filter_objects_callback
from .api import proto
config = globalconfig.new('patch')
config.default('branch', 'stable', str)
config.default('patchtest', False, bool)
config.default('restart', None, str, allow_none=True)
config.default('patch_check_interval', 3600, int)
log = logger.get("patch")
patch_group = Group()
patch_all_lock = Semaphore()
git_threadpool = ThreadPool(10)
test_mode = False
module_initialized = Event()
# update patch interval
@event.register('config:loaded')
def on_config_loaded(e):
config.patch_check_interval = 3600
# get our platform
platform = sys.platform
if platform == 'linux2':
import platform
if platform.machine() == 'x86_64':
platform = 'linux-amd64'
else:
platform = 'linux-i386'
elif platform == "darwin":
platform = "macos"
# events
@config.register('branch')
def branch_changed(value, old):
if value != old:
gevent.spawn_later(5, patch_all)
# signature functions
def check_signature(source, data):
sizex, sizey = struct.unpack_from('BB', buffer(data, len(data)-2))
end = len(data) - sizex - sizey-2
signature = [int(binascii.hexlify(i), 16) for i in struct.unpack_from('{}s{}s'.format(sizex, sizey), buffer(data, end, sizex+sizey))]
data = buffer(data, 0, end)
checksum = hashlib.sha384(data).digest()
if not source.dsa_key.verify(checksum, signature):
raise RuntimeError("External signature could not be verified.")
return data
# bindiff patch (outdated?!)
def patch_source_bindiff(source, patch):
start = "iiQQQQ"
size = struct.calcsize(start)
mv = buffer(patch)
crcsource, crcdest, lencontrol, lendiff, lendest, lenextra = struct.unpack_from(start, mv)
unpacked = struct.unpack_from("{}s{}s".format(lendiff, lenextra) + "q"*lencontrol, mv, size)
fc = unpacked[2:]
control = [tuple(fc[i:i+3]) for i in xrange(0, lencontrol, 3)]
assert binascii.crc32(source) == crcsource
dest = bsdiff4.core.patch(source, lendest, control, unpacked[0], unpacked[1])
assert binascii.crc32(dest) == crcdest
return dest
# http request functions
def patch_get(url, *args, **options):
url = "/".join([url.split('#', 1)[0].rstrip('/')]+map(str, args))
return requests.get(url, **options)
def patch_post(url, *args, **options):
url = "/".join([url.split('#', 1)[0].rstrip('/')]+map(str, args))
return requests.post(url, **options)
# the big bad patch process
def rmtree_onerror(func, path, exc_info):
"""
# http://www.voidspace.org.uk/downloads/pathutils.py
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def really_clean_repo(path):
last_e = None
for i in xrange(10):
try:
shutil.rmtree(path, onerror=rmtree_onerror)
break
except OSError as e:
if e.errno == 2:
break
last_e = e
except BaseException as e:
last_e = e
gevent.sleep(0.1)
else:
raise last_e
class BasicPatchWorker(object):
def __init__(self, source):
self.source = source
self.external = defaultdict(list)
class GitWorker(BasicPatchWorker):
def __init__(self, source):
BasicPatchWorker.__init__(self, source)
def patch(self):
return self.fetch()
def fetch(self, retry=False):
def on_error(e):
self.source.log.exception('failed fetching repository')
with transaction:
self.source.last_error = 'failed fetching repository: {}'.format(e)
return False
old_version = self.source.version
try:
repo = self.source._open_repo()
if repo is None:
p = self.source.basepath
if not os.path.exists(p):
os.makedirs(p)
repo = Repo.init_bare(p)
client, host_path = get_transport_and_path(self.source.url)
remote_refs = client.fetch(host_path, repo)
repo["HEAD"] = remote_refs["HEAD"]
except (KeyboardInterrupt, SystemExit, gevent.GreenletExit):
self.source.unlink() # it is possible that the clone process is broken when the operation was interrupted
raise
except BaseException as e:
if retry:
return on_error(e)
self.source.log.exception('failed fetching repository; deleting repo')
del repo
try:
really_clean_repo(self.source.basepath)
except:
m = re.match('^(.+)-tmp(\d+)$', self.source.basepath)
if m:
basepath = m.group(1)
tmp = int(m.group(2)) + 1
else:
basepath = self.source.basepath
tmp = 1
while True:
p = '{}-tmp{}'.format(basepath, tmp)
if not os.path.exists(p):
break
tmp += 1
self.source.log.error('failed deleting broken repo, trying alternative base path {}'.format(p))
self.source.basepath = p
return self.fetch(True)
#except BaseException as e:
# return on_error(e)
else:
self.source.log.debug('fetch complete; fetched ({})'.format(', '.join(remote_refs)))
new_version = self.source.version
if old_version == new_version:
return False
self.source.log.info('updated branch {} from {} to {}'.format(self.source.get_branch(), old_version, new_version))
return True
class PatchWorker(BasicPatchWorker):
def __init__(self, source):
BasicPatchWorker.__init__(self, source)
self.new_version = None
self.patch_data = None
self.backups = list()
def patch(self):
try:
self.get()
if self.patch_data:
self.apply()
return True
except ConnectionError as e:
self.source.log.error('patch error: {}'.format(e))
return False
def get(self, retry=1):
resp = patch_get(self.source.url, "check", self.source.id, self.source.get_branch(), self.source.version)
resp.raise_for_status()
if resp.content == "HEAD":
return
self.old_version = self.source.version
if '<html>' in resp.content:
raise ValueError('invalid response from patch server')
self.new_version = resp.content
self.source.log.info('found new version: {}, current version: {}'.format(self.new_version, self.old_version))
resp = patch_get(self.source.url, "download", "patch", self.source.id, self.source.version, self.new_version, allow_redirects=False)
resp.raise_for_status()
if resp.status_code == 200 and resp.headers["content-type"] == "application/force-download":
data = resp.content
else:
if resp.status_code != 201:
self.source.log.debug("patch not ready (retry {}): {}".format(retry, resp.content))
if retry <= 4: # wait at least 10 seconds for the patch
gevent.sleep(retry)
return self.get(retry + 1)
return
resp = requests.get(resp.headers["Location"])
resp.raise_for_status()
data = resp.content
self.patch_data = check_signature(self.source, data)
self.patch_data = bz2.decompress(self.patch_data)
# apply functions
def apply(self):
buf = StringIO()
buf.write(self.patch_data)
buf.seek(0)
tar = tarfile.open(None, "r", buf)
for info in tar.getmembers():
data = tar.extractfile(info)
if data is None:
continue
data = data.read()
if info.name == ".delete":
self.apply_delete(data)
elif info.name.startswith("patch/"):
self.apply_patch(data, info.name)
else:
self.apply_new(data, info.name)
self.source.log.info("updated from {} to {}".format(self.old_version, self.new_version[:7]))
with transaction:
self.source.version = self.new_version[:7]
def add_external_new(self, file, data):
try:
with open(file+'.new', 'wb') as f:
f.write(data)
except (IOError, OSError):
self.source.log.exception('error writing fallback file for {}'.format(file))
# FATAL
return True
self.external['replace'].append(file)
def add_external_delete(self, file):
self.external['delete'].append(file)
def create_backup(self, file, data):
# create backup
try:
with open(file+".old", "wb") as f:
f.write(data)
except (OSError, IOError):
self.source.log.warning("error creating backup for {}".format(file))
self.backups.append(file+'.old')
def apply_delete(self, data):
# files to be deleted
for name in data.splitlines():
path = self.source.relpath(name.strip())
if path is None:
continue
files = [path] + glob.glob(path+"?")
for file in files:
try:
if not os.path.exists(file):
continue
self.source.log.info("deleting file {}".format(file))
# delete .py? files
if file.endswith('.py'):
for f in glob.glob(file+'?'):
try:
os.unlink(f)
except:
self.add_external_delete(f)
# create backup
try:
with open(file, 'rb') as f:
old_data = f.read()
self.create_backup(file, old_data)
except:
traceback.print_exc()
self.source.log.warning("error in backup creation of {} will be ignored.".format(file))
# delete file
os.unlink(file)
except (IOError, OSError) as e:
if e.errno == 2:
self.source.log.warning("file to delete not found: {}".format(name))
else:
self.source.log.critical("error deleting file {}".format(name))
self.add_external_delete(file)
def apply_new(self, data, name):
file = self.source.relpath(name)
if file is None:
return False
# create directories
path = os.path.dirname(file)
if not os.path.exists(path):
os.makedirs(path)
# create backup
try:
with open(file, 'rb') as f:
old_data = f.read()
except (IOError, OSError):
pass
else:
self.create_backup(file, old_data)
# create new file
try:
self.source.log.info("creating file {}".format(file))
with open(file, "wb") as f:
f.write(data)
except (IOError, OSError):
self.source.log.exception("error writing file {}".format(file))
self.add_external_new(file, data)
def apply_original(self, file):
"""replaces a corrupt file with the original
"""
if file.startswith("patch/"):
file = file[6:]
resp = patch_get(self.source.url, 'getfile', self.source.id, self.new_version, params=dict(name=file))
resp.raise_for_status()
data = check_signature(self.source, resp.content)
return self.apply_new(data, file)
def apply_patch(self, patch_data, name):
patch_file, format = os.path.splitext(name)
file = self.source.relpath(patch_file)
if file is None:
return
if format == ".bindiff":
patch_func = patch_source_bindiff
elif format == ".bsdiff4":
patch_func = bsdiff4.patch
else:
self.source.log.error("unknown patch format {} for file {}".format(format, name))
return True
try:
with open(file, "rb") as f:
old_data = f.read()
except (OSError, IOError) as e:
self.source.log.exception("error opening source file {}".format(file))
return self.apply_original(patch_file)
# create backup
self.create_backup(file, old_data)
try:
new_data = patch_func(old_data, patch_data)
except AssertionError:
self.source.log.error("crc check of source or destination file {} failed. downloading original".format(name))
return self.apply_original(patch_file)
for i in xrange(1, 3):
try:
self.source.log.info("apply patch (retry {}): {}".format(i, file))
with open(file, 'wb') as f:
f.write(new_data)
break
except (IOError, OSError) as e:
self.source.log.warning("error replacing {}: {}".format(file, e))
gevent.sleep(0.1)
else:
self.add_external_new(file, new_data)
# patch tasks
def patch_one(patches, source, timeout=180):
with source.lock:
if source.get_config_url() is not None:
source.get_config_url().update()
if source._table_deleted:
return
try:
p = source.get_worker()
with Timeout(timeout):
if p.patch():
patches.append(p)
except Timeout:
source.log.error('patch function timed out')
except requests.ConnectionError as e:
source.log.error('patch error: {}'.format(e))
except (KeyboardInterrupt, SystemExit, gevent.GreenletExit):
raise
except BaseException as e:
with transaction:
source.last_error = str(e)
source.log.exception('patch exception')
def patch_all(timeout=180, external_loaded=True, source_complete_callback=None):
with patch_all_lock:
# check config urls
log.debug('checking config urls')
todo = list()
for source in sources.values():
config_url = source.get_config_url()
if config_url is not None and config_url not in todo:
todo.append(config_url)
group = Group()
for config_url in todo:
g = group.spawn(config_url.update)
patch_group.add(g)
group.join()
log.debug('updating repos')
# check for updates
patches = list()
for source in sources.values():
if source.enabled:
def _patch(patches, source, timeout):
try:
patch_one(patches, source, timeout)
finally:
if source_complete_callback is not None:
source_complete_callback(source)
g = group.spawn(_patch, patches, source, timeout)
patch_group.add(g)
group.join()
finalize_patches(patches, external_loaded=external_loaded)
def patch_loop():
while True:
gevent.sleep(config.patch_check_interval)
try:
if not reconnect.manager.reconnecting:
patch_all()
except (KeyboardInterrupt, SystemExit, gevent.GreenletExit, gevent.GreenletExit):
raise
except:
log.unhandled_exception('patch_loop')
pending_external = dict(replace=list(), delete=list(), deltree=list())
def finalize_patches(patches, external_loaded=True):
"""returns True when app needs a restart
"""
if not patches:
return
for p in patches:
pending_external['replace'] += p.external['replace']
pending_external['delete'] += p.external['delete']
pending_external['deltree'] += p.external['deltree']
if pending_external['replace'] or pending_external['delete'] or pending_external['deltree'] or external_loaded or any(isinstance(p.source, CoreSource) for p in patches):
log.info('applied patches need a restart')
if external_loaded:
# app is running. ask the user if we should restart
restart_app()
else:
# currently in bootstrap. instant restart
execute_restart()
else:
log.info('patchs applied on the fly')
# restart functions
def restart_app():
from . import download, core
if download.strategy.has('patch'):
return
while True:
if not core.global_status.files_working:
result = "now"
break
if config.restart is not None:
if config.restart != "never":
result = config.restart
break
elements = list()
elements += [input.Text('Some new updates were installed. You have to restart Download.am to apply them.')]
elements += [input.Text('Restart now?')]
elements.append(input.Input('remember', 'checkbox', default=False, label='Remember decision?'))
elements += [input.Choice('answer', choices=[
{"value": 'now', "content": "Yes"},
{"value": 'later', "content": "Ask me later"},
{"value": 'after_download', "content": "When downloads are complete"},
{"value": 'never', "content": "No"}
])]
try:
r = input.get(elements, type='patch', timeout=120)
result = r.get('answer', 'later')
if r.get('remember', False):
config.restart = result
except input.InputTimeout:
log.warning('input timed out')
result = 'later'
except input.InputError:
log.exception('input was aborted')
result = 'later'
except BaseException as e:
log.exception('input was aborted due to exception: {}'.format(e))
result = 'later'
break
if result == 'never':
log.info('will not restart to apply the update')
elif result == 'later':
gevent.spawn_later(600, restart_app)
elif result == 'now':
gevent.sleep(0.5)
execute_restart()
elif result == 'after_download':
if not download.strategy.has('patch'):
# downloads need time to move to complete folder so sleep a short time
download.strategy.off('patch', gevent.spawn_later, 10, execute_restart)
else:
raise NotImplementedError()
def execute_restart():
replace = pending_external and pending_external['replace'] or list()
delete = pending_external and pending_external['delete'] or list()
deltree = pending_external and pending_external['deltree'] or list()
if replace or delete or deltree:
if platform == "win32":
return _external_rename_bat(replace, delete, deltree)
else:
return _external_rename_sh(replace, delete, deltree)
else:
if platform == "macos":
replace_app(sys.executable, *sys.argv)
elif platform.startswith("linux"):
replace_app(sys.executable, ' '.join(sys.argv))
else:
cmd = 'cmd /c start "" "' + sys.executable + '"'
argv = sys.argv[1:]
if not module_initialized.is_set():
if '--no-browser' not in argv:
argv.append('--no-browser')
if '--disable-splash' not in argv:
argv.append('--disable-splash')
cmd += ' "' + '" "'.join(argv) + '"'
replace_app(cmd)
def _external_rename_bat(replace, delete, deltree):
code = list()
code.append('@echo off')
code.append('ping -n 3 127.0.0.1 >NUL') # dirty method to sleep 2 seconds
for file in replace:
code.append('move /y "{}" "{}"'.format(file+'.new', file))
for file in delete:
code.append('del /Q "{}"'.format(file))
for file in deltree:
code.append('del /S/Q "{}"'.format(file))
argv = sys.argv[1:]
if '--no-browser' not in argv:
argv.append('--no-browser')
if '--disable-splash' not in argv:
argv.append('--disable-splash')
cmd = '"' + '" "'.join([sys.executable] + argv) + '"'
if not sys.__stdout__.isatty():
cmd = 'start "" '+cmd
code.append(cmd)
code.append('del /Q "%0"')
print '\r\n'.join(code)
tmp = tempfile.NamedTemporaryFile(suffix=".bat", delete=False)
tmp.write('\r\n'.join(code))
tmp.close()
replace_app(tmp.name)
def _external_rename_sh(replace, delete, deltree):
code = list()
code.append('sleep 2')
for file in replace:
file = file.replace("'", "\\'")
code.append("mv '{}' '{}'".format(file+'.new', file))
for file in delete:
file = file.replace("'", "\\'")
code.append("rm -f '{}'".format(file))
for file in deltree:
file = file.replace("'", "\\'")
code.append("rm -rf '{}'".format(file))
code.append('rm "$0"')
if platform == "macos":
code.append('open {}'.format(settings.app_dir))
else:
# this code is only tested with console version
code.append("'{}' '{}'".format(sys.executable, "' '".join(sys.argv)))
print '\n'.join(code)
with tempfile.NamedTemporaryFile(suffix=".sh", delete=False) as tmp:
tmp.write('\n'.join(code))
replace_app("/bin/sh", tmp.name)
def replace_app(cmd, *args):
args = list(args)
if platform == 'macos':
from PyObjCTools import AppHelper
AppHelper.stopEventLoop()
aboot = args[0].replace('loader_darwin', '__boot__')
if os.path.exists(aboot):
args[0] = aboot
elif platform == 'linux':
os.chdir(settings.app_dir)
try:
if platform != "macos":
loader.terminate()
finally:
if hasattr(sys, 'exitfunc'):
sys.exitfunc()
#os.chdir(settings.app_dir)
if platform == 'win32':
subprocess.Popen(cmd, creationflags=0x08000000)
else:
os.execl(cmd, cmd, *args)
sys.exit(0)
# file iterator classes
class HddIterator(object):
def __init__(self, path, walk):
self.path = path
self.walk = walk
def __iter__(self):
if os.path.isfile(self.path):
path, name = os.path.split(self.path)
yield HddFile(path, name)
else:
for name in os.listdir(self.path):
path = os.path.join(self.path, name)
if self.walk and os.path.isdir(path):
for file in HddIterator(path, True):
yield file
elif os.path.isfile(path):
yield HddFile(self.path, name)
class HddFile(object):
def __init__(self, path, name):
self.path = path
self.name = name
def get_contents(self):
gevent.sleep(0)
with open(os.path.join(self.path, self.name), 'rb') as f:
return f.read()
class GitIterator(object):
def __init__(self, repo, tree, startswith):
self.repo = repo
self.tree = tree
self.startswith = startswith
def __iter__(self):
for entry in self.repo.object_store.iter_tree_contents(self.tree):
path = entry.in_path(self.repo.path).path
if platform == 'win32':
path = path.replace('/', os.sep)
if not path.startswith(self.startswith):
continue
gevent.sleep(0)
yield GitFile(path, self.repo[entry.sha].as_raw_string())
class GitFile(object):
def __init__(self, path, contents):
self.path, self.name = os.path.split(path)
self.contents = contents
def get_contents(self):
return self.contents
# source classes
sources = dict()
config_urls = dict()
class ConfigUrl(object):
def __init__(self, url):
self.url = url
self.lock = Semaphore()
self.last_update = None
@property
def log(self):
if not hasattr(self, '_log'):
self._log = logger.get('patch.config_url.{}'.format(self.url))
return self._log
def update(self):
locked = self.lock.locked()
with self.lock:
if locked:
return
if self.last_update is not None and time.time() - self.last_update < 60:
return
try:
self._update()
except requests.ConnectionError as e:
self.log.error('update error: {}'.format(e))
finally:
self.last_update = time.time()
def _update(self):
found_sources = list()
resp = requests.get(self.url, stream=True)
try:
resp.raise_for_status()
data = yaml.load(resp.raw)
finally:
resp.close()
assert len(data.keys()) > 0
group = Group()
def _add_source(url):
try:
source = add_source(url, self.url)
except:
self.log.warning('error adding new repo {}'.format(url))
else:
found_sources.append(source)
for name, url in data.iteritems():
try:
Url(url)
except:
self.log.warning('invalid patch source entry: {}'.format(url))
try:
source = sources[name]
except KeyError:
self.log.info('adding new repo {}'.format(url))
group.spawn(_add_source, url)
else:
found_sources.append(source)
if source.url != url:
source.log.info('changing url to {}'.format(url))
with transaction:
source.url = url
source.unlink()
group.join()
for source in sources.values():
if source.config_url == self.url and source not in found_sources:
source.log.info('erasing repo')
source.delete(True)
class BasicSource(Table):
_table_name = "patch_source"
_table_collection = sources
id = Column()
enabled = Column(always_use_getter=True)
last_error = Column(always_use_getter=True)
def __init__(self, enabled=True, config_url=None, **kwargs):
self.enabled = enabled
self.config_url = config_url
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.last_error = None
self.basepath = os.path.join(settings.external_plugins, self.id)
self.lock = Semaphore()
self.get_config_url()
@property
def log(self):
if not hasattr(self, '_log'):
self._log = logger.get('patch.source.{}'.format(self.id))
return self._log
def get_config_url(self):
if self.config_url is None:
return None
if self.config_url not in config_urls:
config_urls[self.config_url] = ConfigUrl(self.config_url)
return config_urls[self.config_url]
def on_get_enabled(self, value):
if os.path.exists(os.path.join(self.basepath, '.git')):
return False
return value
def on_get_last_error(self, value):
if value is not None:
return value
if os.path.exists(os.path.join(self.basepath, '.git')):
return 'developement mode'
return None
def get_branch(self):
branches = self.branches
for branch in (('{}-{}'.format(self.id, config.branch), config.branch), (config.branch, config.branch), ('{}-master'.format(self.id), 'master'), ('master', 'master')):
if branch[0] in branches:
return branch[1]
return "master"
#raise ValueError("repo has no master branch")
def iter_files(self, path=None):
raise NotImplementedError()
def get_repo_url(self):
raise NotImplementedError()
def get_worker(self):
raise NotImplementedError()
def check(self):
raise NotImplementedError()
def send_error(self, id, name, type, message, content):
raise NotImplementedError()
def unlink(self):
raise NotImplementedError()
def delete(self, erase):
raise NotImplementedError()
class PublicSource(object):
pass
class BasicPatchSource(BasicSource):
def __init__(self, branches=None, version=None, **kwargs):
BasicSource.__init__(self, **kwargs)
self.branches = branches if branches else dict()
self.version = version or '0'*7
self.sent_errors = set()
@property
def dsa_key(self):
if not hasattr(self, '_dsa_key'):
self._dsa_key = DSA.construct(self.sig)
return self._dsa_key
def get_repo_url(self):
return '{}#{}'.format(self.url, self.id)
def relpath(self, path):
if path.startswith("patch/"):
path = path[6:]
return os.path.join(self.basepath, path)
def get_worker(self):
return PatchWorker(self)
def check(self):
with self.lock:
if not self.enabled:
return
with transaction:
self.last_error = None
try:
resp = patch_get(self.url, 'expose', self.id)
resp.raise_for_status()
data = resp.json()
data['sig'] = [long(n) for n in data['sig']]
if data["sig"] != self.sig:
self.log.error("signature changed! this could be bad")
result = self.on_invalid_sig()
if result == "disable":
with transaction:
self.enabled = False
raise ValueError('invalid signature')
with transaction:
self.sig = data['sig']
with transaction:
self.contact = data['contact']
self.branches = data['repo']
except (KeyboardInterrupt, SystemExit, gevent.GreenletExit):
raise
except BaseException as e:
with transaction:
self.last_error = str(e)
raise
def on_invalid_sig(self):
elements = list()
elements += [input.Text('Signature of source {} changed.'.format(self.url))]
elements += [input.Text('This could be an attempt to attack your computer. Please contact the author {}'.format(self.contact))]
elements += [input.Choice('answer', choices=[
{"value": 'ignore', "content": "Ignore and update signature?"},
{"value": 'disable', "content": "Disable source? (very recommended)"},
])]
try:
r = input.get(elements, type='patch', timeout=120)
return r['answer']
except input.InputTimeout:
self.log.warning('input timed out')
return 'disable'
except input.InputError:
self.log.error('input was aborted')
return 'disable'
def send_error(self, id, name, type, message, content):
if self.version == 'DEV':
return
with self.lock:
if id in self.sent_errors:
return
self.sent_errors.add(id)
data = dict(
id=id,
version='{}:{}@{}'.format(platform, self.get_branch(), str(self.version)[:7]),
name=name,
type=type,
message=message,
content=content)
for i in xrange(10):
try:
resp = patch_post(self.url, 'log_error', data=data)
resp.raise_for_status()
self.log.info('sent error {} to backend'.format(id))
return
except (KeyboardInterrupt, SystemExit, gevent.GreenletExit):
raise
except requests.ConnectionError as e:
self.log.error('error sending error: {}'.format(e))
except BaseException:
self.log.exception('error while sending error {} to backend'.format(id))
gevent.sleep(10)
self.log.error('giving up sending error {} to backend'.format(id))
class CoreSource(BasicPatchSource):
version = Column(always_use_getter=True)
def __init__(self, **kwargs):
BasicPatchSource.__init__(self, **kwargs)
self.basepath = settings.app_dir
def on_get_version(self, value):
return current.current[:7]
def on_set_version(self, value):
# just ignore this call. version is set over current.py
return None
def on_get_enabled(self, value):
if current.current == 'DEV':
return False
return BasicPatchSource.on_get_enabled(self, value)
def get_branch(self):
return config.branch
def relpath(self, p):
if self.id == "win32" and sys.frozen:
if "download.am/" not in p:
log.error("path {} in patchfile is not part of distribution.".format(p))
return None
return os.path.join(settings.app_dir, p.split("download.am/", 1)[1].replace("/", os.sep))
elif self.id == "macos":
if "download.am.app/" not in p:
log.error("path {} in patchfile is not part of distribution.".format(p))
return None
after = p.rsplit("download.am.app/", 1)[1]
return os.path.join(settings.app_dir, after)
elif config["patchtest"]:
return os.path.join(settings.app_dir, p.split("download.am/", 1)[1].replace("/", os.sep))
else:
# xxx should use git directly probably when using undistributed environment
return None
def delete(self, erase):
raise RuntimeError('not allowed to delete core source')
class PatchSource(BasicPatchSource, PublicSource):
id = Column(('db', 'api'))
url = Column(('db', 'api'))
config_url = Column(('db', 'api'))
sig = Column('db')
contact = Column(('db', 'api'))
branches = Column(('db', 'api'))
version = Column(('db', 'api'))
enabled = Column(('db', 'api'), read_only=False, always_use_getter=True)
last_error = Column(('db', 'api'))
def __init__(self, **kwargs):
BasicPatchSource.__init__(self, **kwargs)
def on_get_version(self, value):
if not os.path.exists(self.basepath):
return '0'*7
return value
def iter_files(self, path=None, walk=False):
path = self.basepath if path is None else os.path.join(self.basepath, path)
return HddIterator(path, walk)
def unlink(self):
try:
really_clean_repo(self.basepath)
except:
pending_external['deltree'].append(self.basepath)
def delete(self, erase):
if erase:
self.unlink()
with transaction:
self.table_delete()
self.log.info('deleted')
gevent.spawn_later(1, restart_app)
def send_error(self, id, name, type, message, content):
return core_source.send_error(self, id, name, type, message, content)
class GitSource(BasicSource, PublicSource):
id = Column(('db', 'api'))
url = Column(('db', 'api'))
config_url = Column(('db', 'api'))
branches = Column('api', always_use_getter=True)
version = Column('api', always_use_getter=True)
enabled = Column(('db', 'api'), read_only=False, always_use_getter=True)
last_error = Column(('db', 'api'))
def __init__(self, **kwargs):
BasicSource.__init__(self, **kwargs)
def _open_repo(self):
if os.path.exists(os.path.join(self.basepath, '.git')):
return None
try:
return Repo(self.basepath)
except:
return None
def on_get_branches(self, value):
repo = self._open_repo()
if repo is None:
return list()
return list(i.rsplit("/", 1)[1] for i in repo.get_refs() if i.startswith("refs/heads/"))
def on_get_version(self, value):
repo = self._open_repo()
if repo is None:
return '0'*7
try:
x = repo.get_refs()["refs/heads/" + self.get_branch()]
return x[:7]
except KeyError:
return '0'*7
def get_worker(self):
return GitWorker(self)
def get_repo_url(self):
return self.url
def check(self):
pass
send_error = None
def iter_files(self, path='', walk=False):
if os.path.exists(os.path.join(self.basepath, '.git')):
path = self.basepath if path == '' else os.path.join(self.basepath, path)
return HddIterator(path, walk)
else:
repo = self._open_repo()
if repo is None:
return list()
tree = repo["refs/heads/"+self.get_branch()].tree
return GitIterator(repo, tree, startswith=os.path.join(self.basepath, path))
def unlink(self):
try:
really_clean_repo(self.basepath)
except:
pending_external['deltree'].append(self.basepath)
def delete(self, erase):
with self.lock:
if erase:
self.unlink()
with transaction:
self.table_delete()
self.log.info('deleted')
gevent.spawn_later(1, restart_app)
############### get source file iterators
def get_file_iterator(source_name, path=None, walk=False):
kwargs = dict(walk=walk)
if path is not None:
kwargs['path'] = path
try:
source = sources[source_name]
except KeyError:
kwargs['path'] = os.path.join(settings.external_plugins, source_name, path)
files = HddIterator(**kwargs)
else:
files = source.iter_files(**kwargs)
return files
############### add sources
def add_config_source(url, config_url=None):
if config_url is not None:
raise ValueError('config url not allowed on config sources')
resp = requests.get(url, stream=True)
try:
resp.raise_for_status()
data = yaml.load(resp.raw)
except:
log.exception('error adding config source')
finally:
resp.close()
assert len(data.keys()) > 0
if url in config_urls:
#config_urls[url].update()
pass
else:
config_urls[url] = ConfigUrl(url)
config_urls[url].update()
return config_urls[url]
def add_git_source(url, config_url=None):
u = Url(url)
try:
id = os.path.split(u.path)[1]
id = os.path.splitext(id)[0]
except:
raise ValueError('not a git url')
if id in sources:
raise ValueError('source with name {} already exists'.format(id))
with transaction:
return GitSource(id=id, url=url, config_url=config_url)
def add_patch_source(url, config_url=None):
if not url.startswith('http'):
url = 'http://{}'.format(url)
if '#' not in url and not url.endswith('/'):
url = '{}/'.format(url)
u = Url(url)
if not u.fragment:
# try to add all repos from this server
resp = patch_get(url.rstrip('/'), 'expose')
resp.raise_for_status()
try:
data = resp.json()
for id in data['repos']:
if id not in sources and id not in ('win32', 'macos'):
try:
add_patch_source(url+'#'+id, False)
except BaseException as e:
log.error('failed adding repo {}: {}'.format(id, e))
return
except:
raise ValueError('invalid repo url')
if u.fragment in sources:
raise ValueError('source with name {} already exists'.format(u.fragment))
id = u.fragment
try:
resp = patch_get(url, 'expose', id)
resp.raise_for_status()
data = resp.json()
except (ConnectionError, requests.HTTPError) as e:
log.info("error checking for repo {} at {}: {}".format(id, url, e))
return
id = data['name']
if id in sources:
raise ValueError('source with name {} already exists'.format(id))
data = json.loads(resp.content)
with transaction:
return PatchSource(id=id, url=url, config_url=config_url, sig=data['sig'], contact=data['contact'], branches=data['repo'])
source_types = dict(
git=add_git_source,
patch=add_patch_source,
config=add_config_source)
def identify_source(url):
# repair url
if '://' not in url:
url = 'http://'+url
if not Url(url).path:
url = url+'/'
# make deep request
try:
resp = requests.get(url, allow_redirects=False)
resp.raise_for_status()
except:
return
else:
# check for git
if url.endswith('.git'):
return 'git', url
# check for patch
if '<h2>Add to Download.am</h2>' in resp.text:
return 'patch', url
# check for config
if 'dlam-config.yaml' in url:
return 'config', url
# check direct dlam-config url
if 'dlam-config.yaml' not in url:
u = url.rstrip('/')+'/dlam-config.yaml'
try:
resp = requests.get(u, stream=True)
try:
resp.raise_for_status()
data = yaml.load(resp.raw)
finally:
resp.close()
assert len(data.keys()) > 0
return 'config', u
except:
traceback.print_exc()
pass
# check patch subdomain
u = Url(url)
if not u.host.startswith('repo.'):
u.host = 'repo.{}'.format(u.host)
try:
resp = requests.get(u.to_string())
resp.raise_for_status()
if '<h2>Add to Download.am</h2>' in resp.text:
return 'patch', u.to_string()
except:
pass
log.warning('could not identify source type. using default git')
return None, url
def add_source(url, config_url=None, type=None):
if type is None:
type, url = identify_source(url)
if type is None:
return
return source_types[type](url, config_url)
# startup/shutdown
patch_loop_greenlet = None
core_source = None
def init():
global patch_loop_greenlet
global core_source
# add core source
sig = [
14493609762890313342166277786717882067186706504725349899906780741747713356290787356528733464152980047783620946593111196306463577744063955815402148552860145629259653950818107505393643383587083768290613402372295707034951885912924020308782786221888333312179957359121890467597304281160325135791414295786807436357,
1836340799499544967344676626569366761238237327637553699677615341837866857178638560803752775147141401436473176143062386392930849127511639810150938435062071285028855634164277748937448362731305104091415548874264676030905340846245037152836818535938439214826659048244377315288514582697466079356264083762738266643,
89884656743115795873895609296394864029741047392531316591432509289601210992615631812974174607675153482641606235553368183778569185786977952044726620763937252233940116059625337686768538445873713070762889839480360220508177637118657209098549890835520224254015051271431737736621385544038152276933973262030194906397,
1224239220300762038953555488069442663256999688439
]
with transaction:
core_source = CoreSource(id=platform, url=settings.patchserver, sig=sig, contact='contact@download.am')
# load sources
with transaction, db.Cursor() as c:
aa = c.execute("SELECT * FROM patch_source")
for a in aa.fetchall():
try:
id = json.loads(a['id'])
data = json.loads(a['data'])
# update old repo urls
if 'url' in data and data['url'].startswith('http://patch.download.am'):
data['url'] = data['url'].replace('http://patch.download.am', 'http://repo.download.am')
if 'url' in data and data['url'].endswith('.git'):
source = GitSource(id=id, **data)
else:
source = PatchSource(id=id, **data)
if source.enabled:
patch_group.spawn(source.check)
except TypeError:
log.critical("broken row: {}".format(a))
traceback.print_exc()
# delete useless repos
for extern in os.listdir(settings.external_plugins):
if extern not in sources or not sources[extern].enabled:
path = os.path.join(settings.external_plugins, extern)
if os.path.isdir(path) and not os.path.exists(os.path.join(path, '.git')):
log.info('deleting useless external repo {}'.format(path))
try:
really_clean_repo(path)
except:
pass
default_sources = dict(
downloadam='http://community.download.am/dlam-config.yaml'
)
if not test_mode:
for id, url in default_sources.iteritems():
if id not in sources and url not in config_urls:
yield 'adding default repo {}'.format(id)
try:
source = add_source(url)
if source is None:
continue
except:
traceback.print_exc()
else:
if isinstance(source, BasicSource) and source.enabled:
patch_group.spawn(source.check)
# check and apply updates
from gevent.queue import JoinableQueue
y = JoinableQueue()
complete = list()
def source_complete_callback(source):
complete.append(source)
if len(complete) == len(sources):
y.put('updating {} / {}'.format(len(complete), len(sources)))
gevent.spawn(patch_all, 30, False, source_complete_callback=source_complete_callback)
gevent.sleep(0.2)
yield 'updating {} / {}'.format(len(complete), len(sources))
while len(patch_group):
try:
x = y.get(timeout=1)
except:
continue
yield x
patch_group.join()
execute_restart()
# start the patch loop
patch_loop_greenlet = gevent.spawn(patch_loop)
def terminate():
patch_group.join()
if patch_loop_greenlet:
try:
patch_loop_greenlet.kill()
except AssertionError:
pass
# interface
@interface.register
class ExternalSource(interface.Interface):
name = "patch"
@interface.protected
def add_source(url=None):
if add_source(url):
gevent.spawn_later(1, patch_all)
return True
return False
@interface.protected
def modify_source(update=None, **filter):
with transaction:
filter_objects_callback([s for s in sources.values() if isinstance(s, PublicSource)], filter, lambda obj: obj.modify_table(update))
def check_source(**filter):
def check(obj):
with transaction:
obj.enabled = True
gevent.spawn(obj.check)
filter_objects_callback([s for s in sources.values() if isinstance(s, PublicSource)], filter, check)
def patch_all():
patch_all()
@interface.protected
def remove_source(erase=True, **filter):
filter_objects_callback([s for s in sources.values() if isinstance(s, PublicSource)], filter, lambda obj: obj.delete(erase))
@interface.protected
def sync_sources(clients=None):
for source in sources.values():
data = dict(url=source.get_repo_url())
for client in clients:
if client == settings.app_uuid:
continue
proto.send('client', 'patch.add_source', payload=data, channel=client)
def version():
return dict(version=core_source.version)
|
jamiebull1/geomeppy | refs/heads/master | geomeppy/geom/polygons.py | 1 | """Heavy lifting geometry for IDF surfaces."""
from collections import MutableSequence
from itertools import product
from math import atan2, pi
from typing import Any, List, Optional, Tuple, Union # noqa
from eppy.geometry.surface import area
from eppy.idf_msequence import Idf_MSequence # noqa
import numpy as np
from shapely import wkt
from shapely.geometry.polygon import Polygon as SPoly
from shapely.geometry.polygon import orient
from six.moves import zip
from .clippers import Clipper2D, Clipper3D
from .segments import Segment
from .transformations import align_face, invert_align_face
from .vectors import Vector2D, Vector3D
from ..utilities import almostequal
class Polygon(Clipper2D, MutableSequence):
"""Base class for 2D and 3D polygons."""
@property
def n_dims(self):
pass
@property
def vector_class(self):
pass
@property
def normal_vector(self):
pass
def __init__(self, vertices):
# type: (Any) -> None
super(Polygon, self).__init__()
self.vertices = [self.vector_class(*v) for v in vertices]
self.as_2d = Polygon2D
def __repr__(self):
# type: () -> str
class_name = type(self).__name__
return "{}({!r})".format(class_name, self.vertices)
def __len__(self):
# type: () -> int
return len(self.vertices)
def __delitem__(self, key):
del self.vertices[key]
def __getitem__(self, key):
# type: (Union[int, slice]) -> Any
return self.vertices[key]
def __setitem__(self, key, value):
self.vertices[key] = value
def __add__(self, other): # type: (Polygon) -> Union[None, Polygon]
if len(self) == len(other) and hasattr(other[0], "__len__"):
# add together two equal polygons
vertices = [v1 + v2 for v1, v2 in zip(self, other)]
elif len(self[0]) == len(other):
# translate by a vector
vertices = [v + other for v in self]
else:
raise ValueError("Incompatible objects: %s + %s" % (self, other))
return self.__class__(vertices)
def __sub__(self, other):
if len(self) == len(other) and hasattr(other[0], "__len__"):
# subtract two equal polygons
vertices = [v1 - v2 for v1, v2 in zip(self, other)]
elif len(self[0]) == len(other):
# translate by a vector
vertices = [v - other for v in self]
else:
raise ValueError("Incompatible objects: %s + %s" % (self, other))
return self.__class__(vertices)
def insert(self, key, value):
self.vertices.insert(key, value)
@property
def area(self):
# type: () -> np.float64
return area(self)
@property
def bounding_box(self):
# type: () -> Polygon
aligned = align_face(self)
top_left = Vector3D(min(aligned.xs), max(aligned.ys), max(aligned.zs))
bottom_left = Vector3D(min(aligned.xs), min(aligned.ys), min(aligned.zs))
bottom_right = Vector3D(max(aligned.xs), min(aligned.ys), min(aligned.zs))
top_right = Vector3D(max(aligned.xs), max(aligned.ys), max(aligned.zs))
bbox = Polygon3D([top_left, bottom_left, bottom_right, top_right])
return invert_align_face(self, bbox)
def buffer(self, distance=None, join_style=2):
# type: (Optional[float], Optional[int]) -> Polygon2D
"""Returns a representation of all points within a given distance of the polygon.
:param join_style: The styles of joins between offset segments: 1 (round), 2 (mitre), and 3 (bevel).
"""
s_poly = SPoly(self.vertices)
core = orient(s_poly.buffer(distance=distance, join_style=join_style), sign=1.0)
return Polygon2D(core.boundary.coords)
@property
def centroid(self):
# type: () -> Vector2D
"""The centroid of a polygon."""
return self.vector_class(
sum(self.xs) / len(self), sum(self.ys) / len(self), sum(self.zs) / len(self)
)
@property
def edges(self):
# type: () -> List[Segment]
"""A list of edges represented as Segment objects."""
vertices = self.vertices
edges = [
Segment(vertices[i], vertices[(i + 1) % len(self)])
for i in range(len(self))
]
return edges
def invert_orientation(self):
# type: () -> Polygon
"""Reverse the order of the vertices.
This can be used to create a matching surface, e.g. the other side of a wall.
:returns: A polygon.
"""
return self.__class__(reversed(self.vertices))
@property
def is_convex(self):
return is_convex_polygon(self.vertices_list)
return False
@property
def points_matrix(self):
# type: () -> np.ndarray
"""Matrix representing the points in a polygon.
Format::
[[x1, x2,... xn]
[y1, y2,... yn]
[z1, z2,... zn] # all 0 for 2D polygon
"""
points = np.zeros((len(self.vertices), self.n_dims))
for i, v in enumerate(self.vertices):
points[i, :] = v.as_array(dims=self.n_dims)
return points
@property
def vertices_list(self):
# type: () -> List[Tuple[float, float, Optional[float]]]
"""A list of the vertices in the format required by pyclipper.
:returns: A list of tuples like [(x1, y1), (x2, y2),... (xn, yn)].
"""
return [pt.as_tuple(dims=self.n_dims) for pt in self.vertices]
@property
def xs(self):
# type: () -> List[float]
return [pt.x for pt in self.vertices]
@property
def ys(self):
# type: () -> List[float]
return [pt.y for pt in self.vertices]
@property
def zs(self):
pass
class Polygon2D(Polygon):
"""Two-dimensional polygon."""
n_dims = 2
vector_class = Vector2D
def __eq__(self, other):
if self.__dict__ == other.__dict__: # try the simple case first
return True
else: # also cover same shape in different rotation
if self.difference(other):
return False
if almostequal(self.normal_vector, other.normal_vector):
return True
return False
@property
def normal_vector(self):
# type: () -> Vector3D
as_3d = Polygon3D((v.x, v.y, 0) for v in self)
return as_3d.normal_vector
def project_to_3D(self, example3d):
# type: (Polygon3D) -> Polygon3D
"""Project the 2D polygon rotated into 3D space.
This is used to return a previously rotated 3D polygon back to its original orientation, or to to put polygons
generated from pyclipper into the desired orientation.
:param example3D: A 3D polygon in the desired plane.
:returns: A 3D polygon.
"""
points = self.points_matrix
proj_axis = example3d.projection_axis
a = example3d.distance
v = example3d.normal_vector
projected_points = project_to_3D(points, proj_axis, a, v)
return Polygon3D(projected_points)
@property
def zs(self):
# type: () -> List[float]
return [0.0] * len(self.vertices)
class Polygon3D(Clipper3D, Polygon):
"""Three-dimensional polygon."""
n_dims = 3
vector_class = Vector3D
def __eq__(self, other):
# check they're in the same plane
if not almostequal(self.normal_vector, other.normal_vector):
return False
if not almostequal(self.distance, other.distance):
return False
# if they are in the same plane, check they completely overlap in 2D
return self.project_to_2D() == other.project_to_2D()
@property
def zs(self):
# type: () -> List[float]
return [pt.z for pt in self.vertices]
@property
def normal_vector(self):
"""Unit normal vector perpendicular to the polygon in the outward direction.
We use Newell's Method since the cross-product of two edge vectors is not valid for concave polygons.
https://www.opengl.org/wiki/Calculating_a_Surface_Normal#Newell.27s_Method
"""
n = [0.0, 0.0, 0.0]
for i, v_curr in enumerate(self.vertices):
v_next = self.vertices[(i + 1) % len(self.vertices)]
n[0] += (v_curr.y - v_next.y) * (v_curr.z + v_next.z)
n[1] += (v_curr.z - v_next.z) * (v_curr.x + v_next.x)
n[2] += (v_curr.x - v_next.x) * (v_curr.y + v_next.y)
return Vector3D(*n).normalize()
@property
def distance(self):
# type: () -> np.float64
"""Distance from the origin to the polygon.
Where v[0] * x + v[1] * y + v[2] * z = a is the equation of the plane containing the polygon (and where v
is the polygon normal vector).
:returns: The distance from the origin to the polygon.
"""
v = self.normal_vector
pt = self.points_matrix[0] # arbitrary point in the polygon
d = np.dot(v, pt)
return d
@property
def projection_axis(self):
# type: () -> int
"""An axis which will not lead to a degenerate surface.
:returns: The axis index.
"""
proj_axis = max(range(3), key=lambda i: abs(self.normal_vector[i]))
return proj_axis
@property
def is_horizontal(self):
# type: () -> bool
"""Check if polygon is in the xy plane.
:returns: True if the polygon is in the xy plane, else False.
"""
return bool(np.array(self.zs).std() < 1e-8)
def is_clockwise(self, viewpoint):
# type: (Vector3D) -> np.bool_
"""Check if vertices are ordered clockwise
This function checks the vertices as seen from the viewpoint.
:param viewpoint: A point from which to view the polygon.
:returns: True if vertices are ordered clockwise when observed from the given viewpoint.
"""
arbitrary_pt = self.vertices[0]
v = arbitrary_pt - viewpoint
n = self.normal_vector
sign = np.dot(v, n)
return sign > 0
def is_coplanar(self, other):
# type: (Polygon3D) -> bool
"""Check if polygon is in the same plane as another polygon.
This includes the same plane but opposite orientation.
:param other: Another polygon.
:returns: True if the two polygons are coplanar, else False.
"""
n1 = self.normal_vector
n2 = other.normal_vector
d1 = self.distance
d2 = other.distance
if almostequal(n1, n2) and almostequal(d1, d2):
return True
elif almostequal(n1, -n2) and almostequal(d1, -d2):
return True
else:
return False
def outside_point(self, entry_direction="counterclockwise"):
# type: (str) -> Vector3D
"""Return a point outside the zone to which the surface belongs.
The point will be outside the zone, respecting the global geometry rules
for vertex entry direction.
:param entry_direction: Either "clockwise" or "counterclockwise", as seen from outside the space.
:returns: A point vector.
"""
entry_direction = entry_direction.lower()
if entry_direction == "clockwise":
inside = self.vertices[0] - self.normal_vector
elif entry_direction == "counterclockwise":
inside = self.vertices[0] + self.normal_vector
else:
raise ValueError("invalid value for entry_direction '%s'" % entry_direction)
return inside
def order_points(self, starting_position):
# type: (str) -> Polygon3D
"""Reorder the vertices based on a starting position rule.
:param starting_position: The string that defines vertex starting position in EnergyPlus.
:returns: The reordered polygon.
"""
if starting_position == "upperleftcorner":
bbox_corner = self.bounding_box[0]
elif starting_position == "lowerleftcorner":
bbox_corner = self.bounding_box[1]
elif starting_position == "lowerrightcorner":
bbox_corner = self.bounding_box[2]
elif starting_position == "upperrightcorner":
bbox_corner = self.bounding_box[3]
else:
raise ValueError("%s is not a valid starting position" % starting_position)
start_index = self.index(bbox_corner.closest(self))
new_vertices = [self[(start_index + i) % len(self)] for i in range(len(self))]
return Polygon3D(new_vertices)
def project_to_2D(self):
# type: () -> Polygon2D
"""Project the 3D polygon into 2D space.
This is so that we can perform operations on it using pyclipper library.
Project onto either the xy, yz, or xz plane. (We choose the one that
avoids degenerate configurations, which is the purpose of proj_axis.)
:returns: A 2D polygon.
"""
points = self.points_matrix
projected_points = project_to_2D(points, self.projection_axis)
return Polygon2D([pt[:2] for pt in projected_points])
def normalize_coords(self, ggr):
"""Order points, respecting the global geometry rules
:param ggr: EnergyPlus GlobalGeometryRules object.
:returns: The normalized polygon.
"""
try:
entry_direction = ggr.Vertex_Entry_Direction
except AttributeError:
entry_direction = "counterclockwise"
outside_point = self.outside_point(entry_direction)
return normalize_coords(self, outside_point, ggr)
def from_wkt(self, wkt_poly):
# type: (str) -> Polygon3D
"""Convert a wkt representation of a polygon to GeomEppy.
This also accounts for the possible presence of inner rings by linking them to the outer ring.
:param wkt_poly: A text representation of a polygon in well known text (wkt) format.
:returns: A polygon.
"""
poly = wkt.loads(wkt_poly)
exterior = Polygon3D(poly.exterior.coords)
if poly.interiors:
# make the exterior into a geomeppy poly
for inner_ring in poly.interiors:
# make the interior into a geomeppy poly
interior = Polygon3D(inner_ring.coords)
# find the nearest points on the exterior and interior
links = list(product(interior, exterior))
links = sorted(links, key=lambda x: x[0].relative_distance(x[1]))
on_interior = links[0][0]
on_exterior = links[0][1]
# join them up
exterior = Polygon3D(
exterior[exterior.index(on_exterior) :]
+ exterior[: exterior.index(on_exterior) + 1]
)
interior = Polygon3D(
interior[interior.index(on_interior) :]
+ interior[: interior.index(on_interior) + 1]
)
exterior = Polygon3D(exterior[:] + interior[:])
return exterior
def break_polygons(poly, hole):
# type: (Polygon, Polygon) -> List[Polygon]
"""Break up a surface with a hole in it.
This produces two surfaces, neither of which have a hole in them.
:param poly: The surface with a hole in.
:param hole: The hole.
:returns: Two Polygon3D objects.
"""
# take the two closest points on the surface perimeter
links = list(product(poly, hole))
links = sorted(
links, key=lambda x: x[0].relative_distance(x[1])
) # fast distance check
first_on_poly = links[0][0]
last_on_poly = links[1][0]
first_on_hole = links[1][1]
last_on_hole = links[0][1]
new_poly = section(first_on_poly, last_on_poly, poly[:] + poly[:]) + section(
first_on_hole, last_on_hole, reversed(hole[:] + hole[:])
)
new_poly = Polygon3D(new_poly)
union = hole.union(new_poly)[0]
new_poly2 = poly.difference(union)[0]
if not almostequal(new_poly.normal_vector, poly.normal_vector):
new_poly = new_poly.invert_orientation()
if not almostequal(new_poly2.normal_vector, poly.normal_vector):
new_poly2 = new_poly2.invert_orientation()
return [new_poly, new_poly2]
def section(first, last, coords):
section_on_hole = []
for item in coords:
if item == first:
section_on_hole.append(item)
elif section_on_hole:
section_on_hole.append(item)
if item == last:
break
return section_on_hole
def project(pt, proj_axis):
# type: (np.ndarray, int) -> Any
"""Project point pt onto either the xy, yz, or xz plane
We choose the one that avoids degenerate configurations, which is the
purpose of proj_axis.
See http://stackoverflow.com/a/39008641/1706564
"""
return tuple(c for i, c in enumerate(pt) if i != proj_axis)
def project_inv(
pt, proj_axis, a, v
): # type: (np.ndarray, int, np.float64, Vector3D) -> Any
"""Returns the vector w in the surface's plane such that project(w) equals x.
See http://stackoverflow.com/a/39008641/1706564
:param pt: A two-dimensional point.
:param proj_axis: The axis to project into.
:param a: Distance to the origin for the plane to project into.
:param v: Normal vector of the plane to project into.
:returns: The transformed point.
"""
w = list(pt)
w[proj_axis:proj_axis] = [0.0]
c = a
for i in range(3):
c -= w[i] * v[i]
c /= v[proj_axis]
w[proj_axis] = c
return tuple(w)
def project_to_2D(vertices, proj_axis):
# type: (np.ndarray, int) -> List[Tuple[np.float64, np.float64]]
"""Project a 3D polygon into 2D space.
:param vertices: The three-dimensional vertices of the polygon.
:param proj_axis: The axis to project into.
:returns: The transformed vertices.
"""
points = [project(x, proj_axis) for x in vertices]
return points
def project_to_3D(
vertices, proj_axis, a, v
): # type: (np.ndarray, int, np.float64, Vector3D) -> List[Tuple[np.float64, np.float64, np.float64]]
"""Project a 2D polygon into 3D space.
:param vertices: The two-dimensional vertices of the polygon.
:param proj_axis: The axis to project into.
:param a: Distance to the origin for the plane to project into.
:param v: Normal vector of the plane to project into.
:returns: The transformed vertices.
"""
return [project_inv(pt, proj_axis, a, v) for pt in vertices]
def normalize_coords(
poly, outside_pt, ggr=None
): # type: (Polygon3D, Vector3D, Union[List, None, Idf_MSequence]) -> Polygon3D
"""Put coordinates into the correct format for EnergyPlus dependent on Global Geometry Rules (GGR).
:param poly: Polygon with new coordinates, but not yet checked for compliance with GGR.
:param outside_pt: An outside point of the new polygon.
:param ggr: EnergyPlus GlobalGeometryRules object.
:returns: The normalized polygon.
"""
# check and set entry direction
poly = set_entry_direction(poly, outside_pt, ggr)
# check and set starting position
poly = set_starting_position(poly, ggr)
return poly
def set_entry_direction(poly, outside_pt, ggr=None):
"""Check and set entry direction for a polygon.
:param poly: A polygon.
:param outside_pt: A point beyond the outside face of the polygon.
:param ggr: EnergyPlus global geometry rules
:return: A polygon with the vertices correctly oriented.
"""
if not ggr:
entry_direction = "counterclockwise" # EnergyPlus default
else:
entry_direction = ggr.Vertex_Entry_Direction.lower()
if entry_direction == "counterclockwise":
if poly.is_clockwise(outside_pt):
poly = poly.invert_orientation()
elif entry_direction == "clockwise":
if not poly.is_clockwise(outside_pt):
poly = poly.invert_orientation()
return poly
def set_starting_position(poly, ggr=None):
"""Check and set starting position."""
if not ggr:
starting_position = "upperleftcorner" # EnergyPlus default
else:
starting_position = ggr.Starting_Vertex_Position.lower()
poly = poly.order_points(starting_position)
return poly
def intersect(poly1, poly2):
# type: (Polygon, Polygon) -> List[Polygon]
"""Calculate the polygons to represent the intersection of two polygons.
:param poly1: The first polygon.
:param poly2: The second polygon.
:returns: A list of unique polygons.
"""
polys = [] # type: List[Polygon]
polys.extend(poly1.intersect(poly2))
polys.extend(poly2.intersect(poly1))
if is_hole(poly1, poly2):
polys.extend(break_polygons(poly1, poly2))
elif is_hole(poly2, poly1):
polys.extend(break_polygons(poly2, poly1))
else:
polys.extend(poly1.difference(poly2))
polys.extend(poly2.difference(poly1))
return polys
def is_hole(surface, possible_hole):
# type: (Polygon, Polygon) -> bool
"""Identify if an intersection is a hole in the surface.
Check the intersection touches an edge of the surface. If it doesn't then it represents a hole, and this needs
further processing into valid EnergyPlus surfaces.
:param surface: The first surface.
:param possible_hole: The intersection into the surface.
:returns: True if the possible hole is a hole in the surface.
"""
if surface.area < possible_hole.area:
return False
collinear_edges = (
edges[0]._is_collinear(edges[1])
for edges in product(surface.edges, possible_hole.edges)
)
return not any(collinear_edges)
def bounding_box(polygons):
"""The bounding box which encompasses all of the polygons in the x,y plane.
:param polygons: A list of polygons.
:return: A 2D polygon.
"""
top_left = (
min(min(c[0] for c in f.coords) for f in polygons),
max(max(c[1] for c in f.coords) for f in polygons),
)
bottom_left = (
min(min(c[0] for c in f.coords) for f in polygons),
min(min(c[1] for c in f.coords) for f in polygons),
)
bottom_right = (
max(max(c[0] for c in f.coords) for f in polygons),
min(min(c[1] for c in f.coords) for f in polygons),
)
top_right = (
max(max(c[0] for c in f.coords) for f in polygons),
max(max(c[1] for c in f.coords) for f in polygons),
)
return Polygon2D([top_left, bottom_left, bottom_right, top_right])
def is_convex_polygon(polygon): # noqa
"""Return True if the polynomial defined by the sequence of 2D
points is 'strictly convex': points are valid, side lengths non-
zero, interior angles are strictly between zero and a straight
angle, and the polygon does not intersect itself.
See: https://stackoverflow.com/a/45372025/1706564
:: NOTES:
1. Algorithm: the signed changes of the direction angles
from one side to the next side must be all positive or
all negative, and their sum must equal plus-or-minus
one full turn (2 pi radians). Also check for too few,
invalid, or repeated points.
2. No check is explicitly done for zero internal angles
(180 degree direction-change angle) as this is covered
in other ways, including the `n < 3` check.
"""
two_pi = 2 * pi
try: # needed for any bad points or direction changes
# Check for too few points
if len(polygon) < 3:
return False
# Get starting information
old_x, old_y = polygon[-2]
new_x, new_y = polygon[-1]
new_direction = atan2(new_y - old_y, new_x - old_x)
angle_sum = 0.0
# Check each point (the side ending there, its angle) and accum. angles
for ndx, newpoint in enumerate(polygon):
# Update point coordinates and side directions, check side length
old_x, old_y, old_direction = new_x, new_y, new_direction
new_x, new_y = newpoint
new_direction = atan2(new_y - old_y, new_x - old_x)
if old_x == new_x and old_y == new_y:
return False # repeated consecutive points
# Calculate & check the normalized direction-change angle
angle = new_direction - old_direction
if angle <= -pi:
angle += two_pi # make it in half-open interval (-Pi, Pi]
elif angle > pi:
angle -= two_pi
if ndx == 0: # if first time through loop, initialize orientation
if angle == 0.0:
return False
orientation = 1.0 if angle > 0.0 else -1.0
else: # if other time through loop, check orientation is stable
if orientation * angle <= 0.0: # not both pos. or both neg.
return False
# Accumulate the direction-change angle
angle_sum += angle
# Check that the total number of full turns is plus-or-minus 1
return abs(round(angle_sum / two_pi)) == 1
except (ArithmeticError, TypeError, ValueError):
return False # any exception means not a proper convex polygon
|
Mazecreator/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/preprocessing/categorical_vocabulary.py | 63 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical vocabulary classes to map categories to indexes.
Can be used for categorical variables, sparse variables and words.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
class CategoricalVocabulary(object):
"""Categorical variables vocabulary class.
Accumulates and provides mapping from classes to indexes.
Can be easily used for words.
"""
def __init__(self, unknown_token="<UNK>", support_reverse=True):
self._unknown_token = unknown_token
self._mapping = {unknown_token: 0}
self._support_reverse = support_reverse
if support_reverse:
self._reverse_mapping = [unknown_token]
self._freq = collections.defaultdict(int)
self._freeze = False
def __len__(self):
"""Returns total count of mappings. Including unknown token."""
return len(self._mapping)
def freeze(self, freeze=True):
"""Freezes the vocabulary, after which new words return unknown token id.
Args:
freeze: True to freeze, False to unfreeze.
"""
self._freeze = freeze
def get(self, category):
"""Returns word's id in the vocabulary.
If category is new, creates a new id for it.
Args:
category: string or integer to lookup in vocabulary.
Returns:
interger, id in the vocabulary.
"""
if category not in self._mapping:
if self._freeze:
return 0
self._mapping[category] = len(self._mapping)
if self._support_reverse:
self._reverse_mapping.append(category)
return self._mapping[category]
def add(self, category, count=1):
"""Adds count of the category to the frequency table.
Args:
category: string or integer, category to add frequency to.
count: optional integer, how many to add.
"""
category_id = self.get(category)
if category_id <= 0:
return
self._freq[category] += count
def trim(self, min_frequency, max_frequency=-1):
"""Trims vocabulary for minimum frequency.
Remaps ids from 1..n in sort frequency order.
where n - number of elements left.
Args:
min_frequency: minimum frequency to keep.
max_frequency: optional, maximum frequency to keep.
Useful to remove very frequent categories (like stop words).
"""
# Sort by alphabet then reversed frequency.
self._freq = sorted(
sorted(
six.iteritems(self._freq),
key=lambda x: (isinstance(x[0], str), x[0])),
key=lambda x: x[1],
reverse=True)
self._mapping = {self._unknown_token: 0}
if self._support_reverse:
self._reverse_mapping = [self._unknown_token]
idx = 1
for category, count in self._freq:
if max_frequency > 0 and count >= max_frequency:
continue
if count <= min_frequency:
break
self._mapping[category] = idx
idx += 1
if self._support_reverse:
self._reverse_mapping.append(category)
self._freq = dict(self._freq[:idx - 1])
def reverse(self, class_id):
"""Given class id reverse to original class name.
Args:
class_id: Id of the class.
Returns:
Class name.
Raises:
ValueError: if this vocabulary wasn't initialized with support_reverse.
"""
if not self._support_reverse:
raise ValueError("This vocabulary wasn't initialized with "
"support_reverse to support reverse() function.")
return self._reverse_mapping[class_id]
|
liorvh/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py | 121 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.tool.bot.botinfo import BotInfo
from webkitpy.tool.mocktool import MockTool
from webkitpy.common.net.statusserver_mock import MockStatusServer
from webkitpy.port.test import TestPort
class BotInfoTest(unittest.TestCase):
def test_summary_text(self):
tool = MockTool()
tool.status_server = MockStatusServer("MockBotId")
self.assertEqual(BotInfo(tool, 'port-name').summary_text(), "Bot: MockBotId Port: port-name Platform: MockPlatform 1.0")
|
Note-2/android_kernel_samsung_smdk4412 | refs/heads/cm-14.1 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
zorroblue/scikit-learn | refs/heads/master | doc/datasets/conftest.py | 7 | from os.path import exists
from os.path import join
import numpy as np
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_network
from sklearn.datasets import get_data_home
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def setup_labeled_faces():
data_home = get_data_home()
if not exists(join(data_home, 'lfw_home')):
raise SkipTest("Skipping dataset loading doctests")
def setup_mldata():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_mldata():
uninstall_mldata_mock()
def setup_rcv1():
check_skip_network()
# skip the test in rcv1.rst if the dataset is not already loaded
rcv1_dir = join(get_data_home(), "RCV1")
if not exists(rcv1_dir):
raise SkipTest("Download RCV1 dataset to run this test.")
def setup_twenty_newsgroups():
data_home = get_data_home()
if not exists(join(data_home, '20news_home')):
raise SkipTest("Skipping dataset loading doctests")
def setup_working_with_text_data():
check_skip_network()
def pytest_runtest_setup(item):
fname = item.fspath.strpath
if fname.endswith('datasets/labeled_faces.rst'):
setup_labeled_faces()
elif fname.endswith('datasets/mldata.rst'):
setup_mldata()
elif fname.endswith('datasets/rcv1.rst'):
setup_rcv1()
elif fname.endswith('datasets/twenty_newsgroups.rst'):
setup_twenty_newsgroups()
elif fname.endswith('datasets/working_with_text_data.rst'):
setup_working_with_text_data()
def pytest_runtest_teardown(item):
fname = item.fspath.strpath
if fname.endswith('datasets/mldata.rst'):
teardown_mldata()
|
sungkim11/mhargadh | refs/heads/master | django/contrib/flatpages/models.py | 410 | from django.db import models
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'))
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_("Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'."))
registration_required = models.BooleanField(_('registration required'), help_text=_("If this is checked, only logged-in users will be able to view the page."))
sites = models.ManyToManyField(Site)
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __unicode__(self):
return u"%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
return self.url
|
justinfinkle/pydiffexp | refs/heads/master | scripts/mcf10a_analyze_sim_results.py | 1 | import ast
import numpy as np
import pandas as pd
from pydiffexp import get_scores, DEResults
from pydiffexp.analyze import pairwise_corr
from scipy import stats
calc_correlation = True
sim_data = pd.read_csv('intermediate_data/sim_stats_mcf10a_censoredtimes.tsv', sep='\t', index_col=[0, 1, 2], header=[0,1])
dea = pd.read_pickle('intermediate_data/GSE69822_dea.pkl')
der = dea.results['pten-wt'] # type: DEResults
sim_data.fillna(0, inplace=True)
# Remove clusters that have no dynamic DE (i.e. all 1, -1, 0)
p_results = pd.read_pickle('intermediate_data/GSE69822_ptest.pkl')
scores = p_results.loc[(der.top_table()['adj_pval'] < 0.001) & (p_results['p_value'] < 0.001)]
interesting = scores.loc[scores.Cluster.apply(ast.literal_eval).apply(set).apply(len) > 1]
filtered_data = dea.data.loc[interesting.index, ['pten', 'ko']]
print(filtered_data.shape)
if calc_correlation:
print('Computing pairwise')
gene_mean = filtered_data.groupby(level=['condition', 'time'], axis=1).mean()
gene_mean_grouped = gene_mean.groupby(level='condition', axis=1)
mean_z = gene_mean_grouped.transform(stats.zscore, ddof=1).fillna(0)
# Correlate zscored means for each gene with each node in every simulation
sim_means = sim_data.loc[:, ['ko_mean', 'wt_mean']]
sim_mean_z = sim_means.groupby(level='stat', axis=1).transform(stats.zscore, ddof=1).fillna(0)
pcorr = pairwise_corr(sim_mean_z, mean_z, axis=1).T
print(pcorr.shape)
print('Done')
pcorr.to_hdf('intermediate_data/mcf10a_sim_corr.hdf', 'mydata')
else:
pcorr = pd.read_hdf('intermediate_data/mcf10a_sim_corr.hdf', "mydata")
# Cluster and rank the simulations
weighted_lfc = ((1 - sim_data.loc[:, 'lfc_pvalue']) * sim_data.loc[:, 'lfc'])
sim_discrete = sim_data.loc[:, 'lfc'].apply(np.sign).fillna(0).astype(int)
sim_clusters = der.cluster_discrete((sim_discrete*(sim_data.loc[:, 'lfc_pvalue']<0.05)))
sim_g = sim_clusters.groupby('Cluster')
sim_scores = get_scores(sim_g, sim_data.loc[:, 'lfc'], weighted_lfc).sort_index()
sim_interesting = sim_scores.loc[sim_scores.Cluster.apply(ast.literal_eval).apply(set).apply(len) > 1]
# print(sim_interesting.sort_values(['Cluster', 'score'], ascending=False))
sim_interesting.set_index(['x_perturbation', 'id'], append=True, inplace=True)
sim_interesting = sim_interesting.swaplevel(i='id', j='gene')
sim_interesting.sort_index(inplace=True)
pcorr.sort_index(inplace=True)
pd.set_option('display.width', 250)
idx = pd.IndexSlice
matching_results = pd.DataFrame()
for gene, row in interesting.iterrows():
candidate_nets = sim_interesting.loc[sim_interesting.Cluster == row.Cluster]
ranking = pd.concat([candidate_nets, pcorr.loc[candidate_nets.index, gene]], axis=1)
ranking['mean'] = (ranking['score'] + ranking[gene])/2
ranking = ranking.loc[ranking.index.get_level_values(2) == 'y']
ranking['true_gene'] = gene
matching_results = pd.concat([matching_results, ranking.reset_index()], ignore_index=True, join='inner')
# Save matching results
matching_results.to_hdf('intermediate_data/mcf10a_motif_match.hdf', 'mydata') |
soellman/copernicus | refs/heads/master | cpc/lib/benchmark/sleep.py | 2 | import logging
import os
import time
import math
from cpc.dataflow import IntValue, FloatValue, StringValue
import cpc.command
from cpc.lib.gromacs import iterate
log=logging.getLogger(__name__)
def sleep(inp):
if (inp.testing()):
return
pers = cpc.dataflow.Persistence(os.path.join(inp.getPersistentDir(),
"starttime.dat"))
fo = inp.getFunctionOutput()
sleepTime = inp.getInput('sleep_time')
if inp.cmd is None:
startTime = int(time.time())
pers.set("startTime", startTime)
#add the sleep command on the queue
cmd = cpc.command.Command(inp.getPersistentDir(), "benchmark/sleep",
[sleepTime])
fo.addCommand(cmd)
else:
endTime = int(time.time())
startTime = pers.get("startTime")
roundtripTime = endTime - startTime
fo.setOut("exec_time.end_timestamp", IntValue(endTime))
fo.setOut("exec_time.start_timestamp", IntValue(pers.get("startTime")))
fo.setOut("exec_time.roundtrip_time", IntValue(roundtripTime))
pers.write()
return fo
def collectResults(inp):
if (inp.testing()):
return
fo = inp.getFunctionOutput()
pers = cpc.dataflow.Persistence(os.path.join(inp.getPersistentDir(),
"persistent.dat"))
init = 0
if (pers.get("init")):
init = pers.get("init")
startTime = None
log.debug("init is %s"%init)
if (inp.getInputValue('sleep_time_array').isUpdated()):
if(init == 0):
log.debug("DOING INIT")
startTime = int(time.time())
fo.setOut('start_time', IntValue(startTime))
pers.set('startTime', startTime)
init= 1
if (init ==1):
num_samples = inp.getInput('num_samples')
log.debug("Calculating")
#calculating results all the time
endTime = int(time.time())
if startTime==None:
startTime = pers.get('startTime')
log.debug("start %s end %s"%(startTime,endTime))
fo.setOut('end_time', IntValue(endTime))
totalTime = endTime - startTime
fo.setOut('total_time', IntValue(totalTime))
averageTime = float(endTime - startTime) / float(num_samples)
#averageTime = math.ceil(averageTime)
fo.setOut("csv_result", StringValue("%s,%s,%s" % (num_samples, totalTime,
averageTime)))
fo.setOut("average_time", FloatValue(averageTime))
pers.set("init", init)
pers.write()
return fo |
aweisberg/cassandra-dtest | refs/heads/master | tools/data.py | 8 | import time
import logging
from cassandra import ConsistencyLevel
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import SimpleStatement
from . import assertions
from dtest import create_cf, DtestTimeoutError
from tools.funcutils import get_rate_limited_function
logger = logging.getLogger(__name__)
def create_c1c2_table(tester, session, read_repair=None):
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}, read_repair=read_repair)
def insert_c1c2(session, keys=None, n=None, consistency=ConsistencyLevel.QUORUM):
if (keys is None and n is None) or (keys is not None and n is not None):
raise ValueError("Expected exactly one of 'keys' or 'n' arguments to not be None; "
"got keys={keys}, n={n}".format(keys=keys, n=n))
if n:
keys = list(range(n))
statement = session.prepare("INSERT INTO cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
statement.consistency_level = consistency
execute_concurrent_with_args(session, statement, [['k{}'.format(k)] for k in keys])
def query_c1c2(session, key, consistency=ConsistencyLevel.QUORUM, tolerate_missing=False, must_be_missing=False):
query = SimpleStatement('SELECT c1, c2 FROM cf WHERE key=\'k%d\'' % key, consistency_level=consistency)
rows = list(session.execute(query))
if not tolerate_missing:
assertions.assert_length_equal(rows, 1)
res = rows[0]
assert len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res
if must_be_missing:
assertions.assert_length_equal(rows, 0)
def insert_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
upds = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%06d\'" % (i, key, i) for i in range(offset * columns_count, columns_count * (offset + 1))]
query = 'BEGIN BATCH %s; APPLY BATCH' % '; '.join(upds)
simple_query = SimpleStatement(query, consistency_level=consistency)
session.execute(simple_query)
def query_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
query = SimpleStatement('SELECT c, v FROM cf WHERE key=\'k%s\' AND c >= \'c%06d\' AND c <= \'c%06d\'' % (key, offset, columns_count + offset - 1), consistency_level=consistency)
res = list(session.execute(query))
assertions.assert_length_equal(res, columns_count)
for i in range(0, columns_count):
assert res[i][1] == 'value{}'.format(i + offset)
# Simple puts and get (on one row), testing both reads by names and by slice,
# with overwrites and flushes between inserts to make sure we hit multiple
# sstables on reads
def putget(cluster, session, cl=ConsistencyLevel.QUORUM):
_put_with_overwrite(cluster, session, 1, cl)
# reads by name
# We do not support proper IN queries yet
# if cluster.version() >= "1.2":
# session.execute('SELECT * FROM cf USING CONSISTENCY %s WHERE key=\'k0\' AND c IN (%s)' % (cl, ','.join(ks)))
# else:
# session.execute('SELECT %s FROM cf USING CONSISTENCY %s WHERE key=\'k0\'' % (','.join(ks), cl))
# _validate_row(cluster, session)
# slice reads
query = SimpleStatement('SELECT * FROM cf WHERE key=\'k0\'', consistency_level=cl)
rows = list(session.execute(query))
_validate_row(cluster, rows)
def _put_with_overwrite(cluster, session, nb_keys, cl=ConsistencyLevel.QUORUM):
for k in range(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i, k, i) for i in range(0, 100)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in range(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 4, k, i * 2) for i in range(0, 50)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in range(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 20, k, i * 5) for i in range(0, 20)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
def _validate_row(cluster, res):
assertions.assert_length_equal(res, 100)
for i in range(0, 100):
if i % 5 == 0:
assert res[i][2] == 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2])
elif i % 2 == 0:
assert res[i][2] == 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2])
else:
assert res[i][2] == 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2])
# Simple puts and range gets, with overwrites and flushes between inserts to
# make sure we hit multiple sstables on reads
def range_putget(cluster, session, cl=ConsistencyLevel.QUORUM):
keys = 100
_put_with_overwrite(cluster, session, keys, cl)
paged_results = session.execute('SELECT * FROM cf LIMIT 10000000')
rows = [result for result in paged_results]
assertions.assert_length_equal(rows, keys * 100)
for k in range(0, keys):
res = rows[:100]
del rows[:100]
_validate_row(cluster, res)
def get_keyspace_metadata(session, keyspace_name):
cluster = session.cluster
cluster.refresh_keyspace_metadata(keyspace_name)
return cluster.metadata.keyspaces[keyspace_name]
def get_schema_metadata(session):
cluster = session.cluster
cluster.refresh_schema_metadata()
return cluster.metadata
def get_table_metadata(session, keyspace_name, table_name):
cluster = session.cluster
cluster.refresh_table_metadata(keyspace_name, table_name)
return cluster.metadata.keyspaces[keyspace_name].tables[table_name]
def rows_to_list(rows):
new_list = [list(row) for row in rows]
return new_list
def index_is_built(node, session, keyspace, table_name, idx_name):
# checks if an index has been built
full_idx_name = idx_name if node.get_cassandra_version() > '3.0' else '{}.{}'.format(table_name, idx_name)
index_query = """SELECT * FROM system."IndexInfo" WHERE table_name = '{}' AND index_name = '{}'""".format(keyspace, full_idx_name)
return len(list(session.execute(index_query))) == 1
def block_until_index_is_built(node, session, keyspace, table_name, idx_name):
"""
Waits up to 30 seconds for a secondary index to be built, and raises
DtestTimeoutError if it is not.
"""
start = time.time()
rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
while time.time() < start + 30:
rate_limited_debug_logger("waiting for index to build")
time.sleep(1)
if index_is_built(node, session, keyspace, table_name, idx_name):
break
else:
raise DtestTimeoutError()
|
shhui/nova | refs/heads/master | nova/console/rpcapi.py | 12 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the console RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('console_topic',
default='console',
help='The topic console proxy nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('console',
help='Set a version cap for messages sent to console services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConsoleAPI(object):
'''Client side of the console rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_backdoor_port()
... Grizzly and Havana support message version 1.1. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.1.
2.0 - Major API rev for Icehouse
'''
VERSION_ALIASES = {
'grizzly': '1.1',
'havana': '1.1',
}
def __init__(self, topic=None, server=None):
super(ConsoleAPI, self).__init__()
topic = topic if topic else CONF.console_topic
target = messaging.Target(topic=topic, server=server, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.console,
CONF.upgrade_levels.console)
self.client = rpc.get_client(target, version_cap=version_cap)
def _get_compat_version(self, current, havana_compat):
if not self.client.can_send_version(current):
return havana_compat
return current
def add_console(self, ctxt, instance_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'add_console', instance_id=instance_id)
def remove_console(self, ctxt, console_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('2.0', '1.0')
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'remove_console', console_id=console_id)
|
shinpeimuraoka/ryu | refs/heads/master | ryu/services/protocols/bgp/speaker.py | 9 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
BGP protocol implementation.
"""
import logging
import socket
import struct
import traceback
from socket import IPPROTO_TCP, TCP_NODELAY
from eventlet import semaphore
from ryu.lib.packet import bgp
from ryu.lib.packet.bgp import AS_TRANS
from ryu.lib.packet.bgp import BGPMessage
from ryu.lib.packet.bgp import BGPOpen
from ryu.lib.packet.bgp import BGPUpdate
from ryu.lib.packet.bgp import BGPKeepAlive
from ryu.lib.packet.bgp import BGPNotification
from ryu.lib.packet.bgp import BGP_MSG_OPEN
from ryu.lib.packet.bgp import BGP_MSG_UPDATE
from ryu.lib.packet.bgp import BGP_MSG_KEEPALIVE
from ryu.lib.packet.bgp import BGP_MSG_NOTIFICATION
from ryu.lib.packet.bgp import BGP_MSG_ROUTE_REFRESH
from ryu.lib.packet.bgp import BGP_CAP_FOUR_OCTET_AS_NUMBER
from ryu.lib.packet.bgp import BGP_CAP_ENHANCED_ROUTE_REFRESH
from ryu.lib.packet.bgp import BGP_CAP_MULTIPROTOCOL
from ryu.lib.packet.bgp import BGP_ERROR_HOLD_TIMER_EXPIRED
from ryu.lib.packet.bgp import BGP_ERROR_SUB_HOLD_TIMER_EXPIRED
from ryu.lib.packet.bgp import get_rf
from ryu.services.protocols.bgp.base import Activity
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.base import CORE_ERROR_CODE
from ryu.services.protocols.bgp.constants import BGP_FSM_CONNECT
from ryu.services.protocols.bgp.constants import BGP_FSM_OPEN_CONFIRM
from ryu.services.protocols.bgp.constants import BGP_FSM_OPEN_SENT
from ryu.services.protocols.bgp.constants import BGP_VERSION_NUM
from ryu.services.protocols.bgp.protocol import Protocol
LOG = logging.getLogger('bgpspeaker.speaker')
# BGP min. and max. message lengths as per RFC.
BGP_MIN_MSG_LEN = 19
BGP_MAX_MSG_LEN = 4096
# Keep-alive singleton.
_KEEP_ALIVE = BGPKeepAlive()
@add_bgp_error_metadata(code=CORE_ERROR_CODE, sub_code=2,
def_desc='Unknown error occurred related to Speaker.')
class BgpProtocolException(BGPSException):
"""Base exception related to peer connection management.
"""
pass
def notification_factory(code, subcode):
"""Returns a `Notification` message corresponding to given codes.
Parameters:
- `code`: (int) BGP error code
- `subcode`: (int) BGP error sub-code
"""
notification = BGPNotification(code, subcode)
if not notification.reason:
raise ValueError('Invalid code/sub-code.')
return notification
class BgpProtocol(Protocol, Activity):
"""Protocol that handles BGP messages.
"""
MESSAGE_MARKER = (b'\xff\xff\xff\xff\xff\xff\xff\xff'
b'\xff\xff\xff\xff\xff\xff\xff\xff')
def __init__(self, socket, signal_bus, is_reactive_conn=False):
# Validate input.
if socket is None:
raise ValueError('Invalid arguments passed.')
self._remotename = self.get_remotename(socket)
self._localname = self.get_localname(socket)
activity_name = ('BgpProtocol %s, %s, %s' % (is_reactive_conn,
self._remotename,
self._localname))
Activity.__init__(self, name=activity_name)
# Initialize instance variables.
self._peer = None
self._recv_buff = b''
self._socket = socket
self._socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
self._sendlock = semaphore.Semaphore()
self._signal_bus = signal_bus
self._holdtime = None
self._keepalive = None
self._expiry = None
# Add socket to Activity's socket container for managing it.
if is_reactive_conn:
self._asso_socket_map['passive_conn'] = self._socket
else:
self._asso_socket_map['active_conn'] = self._socket
self._open_msg = None
self.state = BGP_FSM_CONNECT
self._is_reactive = is_reactive_conn
self.sent_open_msg = None
self.recv_open_msg = None
self._is_bound = False
self.cap_four_octet_as_number = False
@property
def is_reactive(self):
return self._is_reactive
@property
def holdtime(self):
return self._holdtime
@property
def keepalive(self):
return self._keepalive
def is_colliding(self, other_protocol):
if not isinstance(other_protocol, BgpProtocol):
raise ValueError('Currently only support comparing with '
'`BgpProtocol`')
# Compare protocol connection end point's addresses
if (self._remotename[0] == other_protocol._remotename[0] and
self._localname[0] == other_protocol._localname[0]):
return True
return False
def is_local_router_id_greater(self):
"""Compares *True* if local router id is greater when compared to peer
bgp id.
Should only be called after protocol has reached OpenConfirm state.
"""
from ryu.services.protocols.bgp.utils.bgp import from_inet_ptoi
if not self.state == BGP_FSM_OPEN_CONFIRM:
raise BgpProtocolException(desc='Can access remote router id only'
' after open message is received')
remote_id = self.recv_open_msg.bgp_identifier
local_id = self.sent_open_msg.bgp_identifier
return from_inet_ptoi(local_id) > from_inet_ptoi(remote_id)
def is_enhanced_rr_cap_valid(self):
"""Checks is enhanced route refresh capability is enabled/valid.
Checks sent and received `Open` messages to see if this session with
peer is capable of enhanced route refresh capability.
"""
if not self.recv_open_msg:
raise ValueError('Did not yet receive peers open message.')
err_cap_enabled = False
local_caps = self.sent_open_msg.opt_param
peer_caps = self.recv_open_msg.opt_param
local_cap = [cap for cap in local_caps
if cap.cap_code == BGP_CAP_ENHANCED_ROUTE_REFRESH]
peer_cap = [cap for cap in peer_caps
if cap.cap_code == BGP_CAP_ENHANCED_ROUTE_REFRESH]
# Both local and peer should advertise ERR capability for it to be
# enabled.
if local_cap and peer_cap:
err_cap_enabled = True
return err_cap_enabled
def _check_route_fmly_adv(self, open_msg, route_family):
match_found = False
local_caps = open_msg.opt_param
for cap in local_caps:
# Check MP_BGP capability was advertised.
if cap.cap_code == BGP_CAP_MULTIPROTOCOL:
# Iterate over all advertised mp_bgp caps to find a match.
if (route_family.afi == cap.afi and
route_family.safi == cap.safi):
match_found = True
return match_found
def is_route_family_adv(self, route_family):
"""Checks if `route_family` was advertised to peer as per MP_BGP cap.
Returns:
- True: if given address family was advertised.
- False: if given address family was not advertised.
"""
return self._check_route_fmly_adv(self.sent_open_msg, route_family)
def is_route_family_adv_recv(self, route_family):
"""Checks if `route_family` was advertised by peer as per MP_BGP cap.
Returns:
- True: if given address family was advertised.
- False: if given address family was not advertised.
"""
return self._check_route_fmly_adv(self.recv_open_msg, route_family)
@property
def negotiated_afs(self):
local_caps = self.sent_open_msg.opt_param
remote_caps = self.recv_open_msg.opt_param
local_mbgp_cap = [cap for cap in local_caps
if cap.cap_code == BGP_CAP_MULTIPROTOCOL]
remote_mbgp_cap = [cap for cap in remote_caps
if cap.cap_code == BGP_CAP_MULTIPROTOCOL]
# Check MP_BGP capabilities were advertised.
if local_mbgp_cap and remote_mbgp_cap:
local_families = set([
(peer_cap.afi, peer_cap.safi)
for peer_cap in local_mbgp_cap
])
remote_families = set([
(peer_cap.afi, peer_cap.safi)
for peer_cap in remote_mbgp_cap
])
afi_safi = local_families.intersection(remote_families)
else:
afi_safi = set()
afs = []
for afi, safi in afi_safi:
afs.append(get_rf(afi, safi))
return afs
def is_mbgp_cap_valid(self, route_family):
"""Returns True if both sides of this protocol have advertise
capability for this address family.
"""
return (self.is_route_family_adv(route_family) and
self.is_route_family_adv_recv(route_family))
def is_four_octet_as_number_cap_valid(self):
"""Returns True if both sides of this protocol have Four-Octet
AS number capability."""
return (self.cap_four_octet_as_number and
self._peer.cap_four_octet_as_number)
def _run(self, peer):
"""Sends open message to peer and handles received messages.
Parameters:
- `peer`: the peer to which this protocol instance is connected to.
"""
# We know the peer we are connected to, we send open message.
self._peer = peer
self.connection_made()
# We wait for peer to send messages.
self._recv_loop()
def data_received(self, next_bytes):
try:
self._data_received(next_bytes)
except bgp.BgpExc as exc:
LOG.error(
"BGPExc Exception while receiving data: "
"%s \n Traceback %s \n"
% (str(exc), traceback.format_exc())
)
if exc.SEND_ERROR:
self.send_notification(exc.CODE, exc.SUB_CODE)
else:
self._socket.close()
raise exc
@staticmethod
def parse_msg_header(buff):
"""Parses given `buff` into bgp message header format.
Returns a tuple of marker, length, type of bgp message.
"""
return struct.unpack('!16sHB', buff)
def _data_received(self, next_bytes):
"""Maintains buffer of bytes received from peer and extracts bgp
message from this buffer if enough data is received.
Validates bgp message marker, length, type and data and constructs
appropriate bgp message instance and calls handler.
:Parameters:
- `next_bytes`: next set of bytes received from peer.
"""
# Append buffer with received bytes.
self._recv_buff += next_bytes
while True:
# If current buffer size is less then minimum bgp message size, we
# return as we do not have a complete bgp message to work with.
if len(self._recv_buff) < BGP_MIN_MSG_LEN:
return
# Parse message header into elements.
auth, length, ptype = BgpProtocol.parse_msg_header(
self._recv_buff[:BGP_MIN_MSG_LEN])
# Check if we have valid bgp message marker.
# We should get default marker since we are not supporting any
# authentication.
if (auth != BgpProtocol.MESSAGE_MARKER):
LOG.error('Invalid message marker received: %s', auth)
raise bgp.NotSync()
# Check if we have valid bgp message length.
check = (length < BGP_MIN_MSG_LEN or length > BGP_MAX_MSG_LEN)
# RFC says: The minimum length of the OPEN message is 29
# octets (including the message header).
check2 = (ptype == BGP_MSG_OPEN and length < BGPOpen._MIN_LEN)
# RFC says: A KEEPALIVE message consists of only the
# message header and has a length of 19 octets.
check3 = (ptype == BGP_MSG_KEEPALIVE and
length != BGPKeepAlive._MIN_LEN)
# RFC says: The minimum length of the UPDATE message is 23
# octets.
check4 = (ptype == BGP_MSG_UPDATE and
length < BGPUpdate._MIN_LEN)
if any((check, check2, check3, check4)):
raise bgp.BadLen(ptype, length)
# If we have partial message we wait for rest of the message.
if len(self._recv_buff) < length:
return
msg, _, rest = BGPMessage.parser(self._recv_buff)
self._recv_buff = rest
# If we have a valid bgp message we call message handler.
self._handle_msg(msg)
def send_notification(self, code, subcode):
"""Utility to send notification message.
Closes the socket after sending the message.
:Parameters:
- `socket`: (socket) - socket over which to send notification
message.
- `code`: (int) - BGP Notification code
- `subcode`: (int) - BGP Notification sub-code
RFC ref: http://tools.ietf.org/html/rfc4486
http://www.iana.org/assignments/bgp-parameters/bgp-parameters.xhtml
"""
notification = BGPNotification(code, subcode)
reason = notification.reason
self._send_with_lock(notification)
self._signal_bus.bgp_error(self._peer, code, subcode, reason)
if len(self._localname):
LOG.error('Sent notification to %r >> %s', self._localname,
notification)
self._socket.close()
def _send_with_lock(self, msg):
self._sendlock.acquire()
try:
self._socket.sendall(msg.serialize())
except socket.error:
self.connection_lost('failed to write to socket')
finally:
self._sendlock.release()
def send(self, msg):
if not self.started:
raise BgpProtocolException('Tried to send message to peer when '
'this protocol instance is not started'
' or is no longer is started state.')
self._send_with_lock(msg)
if msg.type == BGP_MSG_NOTIFICATION:
LOG.error('Sent notification to %s >> %s', self._remotename, msg)
self._signal_bus.bgp_notification_sent(self._peer, msg)
else:
LOG.debug('Sent msg to %s >> %s', self._remotename, msg)
def stop(self):
Activity.stop(self)
def _validate_open_msg(self, open_msg):
"""Validates BGP OPEN message according from application context.
Parsing modules takes care of validating OPEN message that need no
context. But here we validate it according to current application
settings. RTC or RR/ERR are MUST capability if peer does not support
either one of them we have to end session.
"""
assert open_msg.type == BGP_MSG_OPEN
opt_param_cap_map = open_msg.opt_param_cap_map
# Validate remote AS number.
remote_as = open_msg.my_as
# Try to get AS number from Four-Octet AS number capability.
cap4as = opt_param_cap_map.get(BGP_CAP_FOUR_OCTET_AS_NUMBER, None)
if cap4as is None:
if remote_as == AS_TRANS:
# Raise Bad Peer AS error message, if my_as is AS_TRANS
# and without Four-Octet AS number capability.
raise bgp.BadPeerAs()
self.cap_four_octet_as_number = False
else:
# Note: Even if the peer has Four-Octet AS number capability,
# keep the local capability setting
remote_as = cap4as.as_number
self.cap_four_octet_as_number = True
# Validate remote AS number with local setting.
if remote_as != self._peer.remote_as:
raise bgp.BadPeerAs()
# Validate bgp version number.
if open_msg.version != BGP_VERSION_NUM:
raise bgp.UnsupportedVersion(BGP_VERSION_NUM)
def _handle_msg(self, msg):
"""When a BGP message is received, send it to peer.
Open messages are validated here. Peer handler is called to handle each
message except for *Open* and *Notification* message. On receiving
*Notification* message we close connection with peer.
"""
LOG.debug('Received msg from %s << %s', self._remotename, msg)
# If we receive open message we try to bind to protocol
if msg.type == BGP_MSG_OPEN:
if self.state == BGP_FSM_OPEN_SENT:
# Validate open message.
self._validate_open_msg(msg)
self.recv_open_msg = msg
self.state = BGP_FSM_OPEN_CONFIRM
self._peer.state.bgp_state = self.state
# Try to bind this protocol to peer.
self._is_bound = self._peer.bind_protocol(self)
# If this protocol failed to bind to peer.
if not self._is_bound:
# Failure to bind to peer indicates connection collision
# resolution choose different instance of protocol and this
# instance has to close. Before closing it sends
# appropriate notification msg. to peer.
raise bgp.CollisionResolution()
# If peer sends Hold Time as zero, then according to RFC we do
# not set Hold Time and Keep Alive timer.
if msg.hold_time == 0:
LOG.info('The Hold Time sent by the peer is zero, hence '
'not setting any Hold Time and Keep Alive'
' timers.')
else:
# Start Keep Alive timer considering Hold Time preference
# of the peer.
self._start_timers(msg.hold_time)
self._send_keepalive()
# Peer does not see open message.
return
else:
# If we receive a Open message out of order
LOG.error('Open message received when current state is not '
'OpenSent')
# Received out-of-order open message
# We raise Finite state machine error
raise bgp.FiniteStateMachineError()
elif msg.type == BGP_MSG_NOTIFICATION:
if self._peer:
self._signal_bus.bgp_notification_received(self._peer, msg)
# If we receive notification message
LOG.error('Received notification message, hence closing '
'connection %s', msg)
self._socket.close()
return
# If we receive keepalive or update message, we reset expire timer.
if (msg.type == BGP_MSG_KEEPALIVE or
msg.type == BGP_MSG_UPDATE):
if self._expiry:
self._expiry.reset()
# Call peer message handler for appropriate messages.
if (msg.type in
(BGP_MSG_UPDATE, BGP_MSG_KEEPALIVE, BGP_MSG_ROUTE_REFRESH)):
self._peer.handle_msg(msg)
# We give chance to other threads to run.
self.pause(0)
def _start_timers(self, peer_holdtime):
"""Starts keepalive and expire timers.
Hold time is set to min. of peer and configured/default hold time.
Starts keep alive timer and expire timer based on this value.
"""
neg_timer = min(self._holdtime, peer_holdtime)
if neg_timer < self._holdtime:
LOG.info('Negotiated hold time (%s) is lower then '
'configured/default (%s).', neg_timer, self._holdtime)
# We use negotiated timer value.
self._holdtime = neg_timer
self._keepalive = self._create_timer('Keepalive Timer',
self._send_keepalive)
interval = self._holdtime // 3
self._keepalive.start(interval, now=False)
# Setup the expire timer.
self._expiry = self._create_timer('Holdtime Timer', self._expired)
self._expiry.start(self._holdtime, now=False)
LOG.debug('Started keep-alive and expire timer for negotiated hold'
'time %s', self._holdtime)
def _expired(self):
"""Hold timer expired event handler.
"""
LOG.info('Negotiated hold time %s expired.', self._holdtime)
code = BGP_ERROR_HOLD_TIMER_EXPIRED
subcode = BGP_ERROR_SUB_HOLD_TIMER_EXPIRED
self.send_notification(code, subcode)
self.connection_lost('Negotiated hold time %s expired.' %
self._holdtime)
self.stop()
def _send_keepalive(self):
self.send(_KEEP_ALIVE)
def _recv_loop(self):
"""Sits in tight loop collecting data received from peer and
processing it.
"""
required_len = BGP_MIN_MSG_LEN
conn_lost_reason = "Connection lost as protocol is no longer active"
try:
while True:
next_bytes = self._socket.recv(required_len)
if len(next_bytes) == 0:
conn_lost_reason = 'Peer closed connection'
break
self.data_received(next_bytes)
except socket.error as err:
conn_lost_reason = 'Connection to peer lost: %s.' % err
except bgp.BgpExc as ex:
conn_lost_reason = 'Connection to peer lost, reason: %s.' % ex
except Exception as e:
LOG.debug(traceback.format_exc())
conn_lost_reason = str(e)
finally:
self.connection_lost(conn_lost_reason)
def connection_made(self):
"""Connection to peer handler.
We send bgp open message to peer and initialize related attributes.
"""
assert self.state == BGP_FSM_CONNECT
# We have a connection with peer we send open message.
open_msg = self._peer.create_open_msg()
self._holdtime = open_msg.hold_time
self.state = BGP_FSM_OPEN_SENT
if not self.is_reactive:
self._peer.state.bgp_state = self.state
self.sent_open_msg = open_msg
self.send(open_msg)
self._peer.connection_made()
def connection_lost(self, reason):
"""Stops all timers and notifies peer that connection is lost.
"""
if self._peer:
state = self._peer.state.bgp_state
if self._is_bound or state == BGP_FSM_OPEN_SENT:
self._peer.connection_lost(reason)
self._peer = None
if reason:
LOG.info(reason)
else:
LOG.info('Connection to peer closed for unknown reasons.')
|
glennlive/gnuradio-wg-grc | refs/heads/master | gr-vocoder/examples/ulaw_audio_loopback.py | 58 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.ulaw_encode_sb()
dec = vocoder.ulaw_decode_bs()
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
|
leesavide/pythonista-docs | refs/heads/master | Documentation/matplotlib/mpl_examples/images_contours_and_fields/streamplot_demo_masking.py | 6 | """
Demo of the streamplot function with masking.
This example shows how streamlines created by the streamplot function skips
masked regions and NaN values.
"""
import numpy as np
import matplotlib.pyplot as plt
w = 3
Y, X = np.mgrid[-w:w:100j, -w:w:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
mask = np.zeros(U.shape, dtype=bool)
mask[40:60, 40:60] = 1
U = np.ma.array(U, mask=mask)
U[:20, :20] = np.nan
plt.streamplot(X, Y, U, V, color='r')
plt.imshow(~mask, extent=(-w, w, -w, w), alpha=0.5,
interpolation='nearest', cmap=plt.cm.gray)
plt.show()
|
ytjiang/django | refs/heads/master | django/contrib/gis/db/models/sql/aggregates.py | 76 | from django.db.models.sql import aggregates
from django.db.models.sql.aggregates import * # NOQA
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union'] + aggregates.__all__
warnings.warn(
"django.contrib.gis.db.models.sql.aggregates is deprecated. Use "
"django.contrib.gis.db.models.aggregates instead.",
RemovedInDjango20Warning, stacklevel=2)
|
qpleple/online-lda-vb | refs/heads/master | mdhoffma/onlineldavb.py | 1 | # onlineldavb.py: Package of functions for fitting Latent Dirichlet
# Allocation (LDA) with online variational Bayes (VB).
#
# Copyright (C) 2010 Matthew D. Hoffman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, re, time, string
import numpy as n
from scipy.special import gammaln, psi
n.random.seed(100000001)
meanchangethresh = 0.001
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), computes E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(psi(alpha) - psi(n.sum(alpha)))
return(psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis])
def parse_doc_list(docs, vocab):
"""
Parse a document into a list of word ids and a list of counts,
or parse a set of documents into two lists of lists of word ids
and counts.
Arguments:
docs: List of D documents. Each document must be represented as
a single string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
vocab: Dictionary mapping from words to integer ids.
Returns a pair of lists of lists.
The first, wordids, says what vocabulary tokens are present in
each document. wordids[i][j] gives the jth unique token present in
document i. (Don't count on these tokens being in any particular
order.)
The second, wordcts, says how many times each vocabulary token is
present. wordcts[i][j] is the number of times that the token given
by wordids[i][j] appears in document i.
"""
if (type(docs).__name__ == 'str'):
temp = list()
temp.append(docs)
docs = temp
D = len(docs)
wordids = list()
wordcts = list()
for d in range(0, D):
docs[d] = docs[d].lower()
docs[d] = re.sub(r'-', ' ', docs[d])
docs[d] = re.sub(r'[^a-z ]', '', docs[d])
docs[d] = re.sub(r' +', ' ', docs[d])
words = string.split(docs[d])
ddict = dict()
for word in words:
if (word in vocab):
wordtoken = vocab[word]
if (not wordtoken in ddict):
ddict[wordtoken] = 0
ddict[wordtoken] += 1
wordids.append(ddict.keys())
wordcts.append(ddict.values())
return((wordids, wordcts))
class OnlineLDA:
"""
Implements online VB for LDA as described in (Hoffman et al. 2010).
"""
def __init__(self, vocab, K, D, alpha, eta, tau0, kappa):
"""
Arguments:
K: Number of topics
vocab: A set of words to recognize. When analyzing documents, any word
not in this set will be ignored.
D: Total number of documents in the population. For a fixed corpus,
this is the size of the corpus. In the truly online setting, this
can be an estimate of the maximum number of documents that
could ever be seen.
alpha: Hyperparameter for prior on weight vectors theta
eta: Hyperparameter for prior on topics beta
tau0: A (positive) learning parameter that downweights early iterations
kappa: Learning rate: exponential decay rate---should be between
(0.5, 1.0] to guarantee asymptotic convergence.
Note that if you pass the same set of D documents in every time and
set kappa=0 this class can also be used to do batch VB.
"""
self._vocab = dict()
for word in vocab:
word = word.lower()
word = re.sub(r'[^a-z]', '', word)
self._vocab[word] = len(self._vocab)
self._K = K
self._W = len(self._vocab)
self._D = D
self._alpha = alpha
self._eta = eta
self._tau0 = tau0 + 1
self._kappa = kappa
self._updatect = 0
# Initialize the variational distribution q(beta|lambda)
self._lambda = 1*n.random.gamma(100., 1./100., (self._K, self._W))
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
def do_e_step(self, docs):
"""
Given a mini-batch of documents, estimates the parameters
gamma controlling the variational distribution over the topic
weights for each document in the mini-batch.
Arguments:
docs: List of D documents. Each document must be represented
as a string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
Returns a tuple containing the estimated values of gamma,
as well as sufficient statistics needed to update lambda.
"""
# This is to handle the case where someone just hands us a single
# document, not in a list.
if (type(docs).__name__ == 'string'):
temp = list()
temp.append(docs)
docs = temp
(wordids, wordcts) = parse_doc_list(docs, self._vocab)
batchD = len(docs)
# Initialize the variational distribution q(theta|gamma) for
# the mini-batch
gamma = 1*n.random.gamma(100., 1./100., (batchD, self._K))
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = n.exp(Elogtheta)
sstats = n.zeros(self._lambda.shape)
# Now, for each document d update that document's gamma and phi
it = 0
meanchange = 0
for d in range(0, batchD):
# These are mostly just shorthand (but might help cache locality)
ids = wordids[d]
cts = wordcts[d]
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self._expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to
# expElogthetad_k * expElogbetad_w. phinorm is the normalizer.
phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100
# Iterate between gamma and phi until convergence
for it in range(0, 100):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self._alpha + expElogthetad * \
n.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = n.exp(Elogthetad)
phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100
# If gamma hasn't changed much, we're done.
meanchange = n.mean(abs(gammad - lastgamma))
if (meanchange < meanchangethresh):
break
gamma[d, :] = gammad
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += n.outer(expElogthetad.T, cts/phinorm)
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats = sstats * self._expElogbeta
return((gamma, sstats))
def update_lambda(self, docs):
"""
First does an E step on the mini-batch given in wordids and
wordcts, then uses the result of that E step to update the
variational parameter matrix lambda.
Arguments:
docs: List of D documents. Each document must be represented
as a string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
Returns gamma, the parameters to the variational distribution
over the topic weights theta for the documents analyzed in this
update.
Also returns an estimate of the variational bound for the
entire corpus for the OLD setting of lambda based on the
documents passed in. This can be used as a (possibly very
noisy) estimate of held-out likelihood.
"""
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-batch.
rhot = pow(self._tau0 + self._updatect, -self._kappa)
self._rhot = rhot
# Do an E step to update gamma, phi | lambda for this
# mini-batch. This also returns the information about phi that
# we need to update lambda.
(gamma, sstats) = self.do_e_step(docs)
# Estimate held-out likelihood for current values of lambda.
## bound = self.approx_bound(docs, gamma)
bound = 0
# Update lambda based on documents.
self._lambda = self._lambda * (1-rhot) + \
rhot * (self._eta + self._D * sstats / len(docs))
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
self._updatect += 1
return (gamma, bound)
def approx_bound(self, docs, gamma):
"""
Estimates the variational bound over *all documents* using only
the documents passed in as "docs." gamma is the set of parameters
to the variational distribution q(theta) corresponding to the
set of documents passed in.
The output of this function is going to be noisy, but can be
useful for assessing convergence.
"""
# This is to handle the case where someone just hands us a single
# document, not in a list.
if (type(docs).__name__ == 'string'):
temp = list()
temp.append(docs)
docs = temp
(wordids, wordcts) = parse_doc_list(docs, self._vocab)
batchD = len(docs)
score = 0
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = n.exp(Elogtheta)
# E[log p(docs | theta, beta)]
for d in range(0, batchD):
gammad = gamma[d, :]
ids = wordids[d]
cts = n.array(wordcts[d])
phinorm = n.zeros(len(ids))
for i in range(0, len(ids)):
temp = Elogtheta[d, :] + self._Elogbeta[:, ids[i]]
tmax = max(temp)
phinorm[i] = n.log(sum(n.exp(temp - tmax))) + tmax
score += n.sum(cts * phinorm)
# oldphinorm = phinorm
# phinorm = n.dot(expElogtheta[d, :], self._expElogbeta[:, ids])
# print oldphinorm
# print n.log(phinorm)
# score += n.sum(cts * n.log(phinorm))
# E[log p(theta | alpha) - log q(theta | gamma)]
score += n.sum((self._alpha - gamma)*Elogtheta)
score += n.sum(gammaln(gamma) - gammaln(self._alpha))
score += sum(gammaln(self._alpha*self._K) - gammaln(n.sum(gamma, 1)))
# Compensate for the subsampling of the population of documents
score = score * self._D / len(docs)
# E[log p(beta | eta) - log q (beta | lambda)]
score = score + n.sum((self._eta-self._lambda)*self._Elogbeta)
score = score + n.sum(gammaln(self._lambda) - gammaln(self._eta))
score = score + n.sum(gammaln(self._eta*self._W) -
gammaln(n.sum(self._lambda, 1)))
return(score)
|
apixandru/intellij-community | refs/heads/master | python/testData/inspections/PyAttributeOutsideInitInspection/truePositive.py | 83 | __author__ = 'ktisha'
class A:
def __init__(self):
self.a = 1
def foo(self):
<weak_warning descr="Instance attribute b defined outside __init__">self.b</weak_warning>= 1
|
saitodisse/zulip | refs/heads/master | zerver/lib/session_user.py | 125 | from __future__ import absolute_import
from django.contrib.auth import SESSION_KEY, get_user_model
def get_session_dict_user(session_dict):
# Compare django.contrib.auth._get_user_session_key
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session):
return get_session_dict_user(session.get_decoded())
|
mollstam/UnrealPy | refs/heads/master | UnrealPyEmbed/Source/Python/Lib/python27/test/test_typechecks.py | 137 | """Unit tests for __instancecheck__ and __subclasscheck__."""
import unittest
from test import test_support
class ABC(type):
def __instancecheck__(cls, inst):
"""Implement isinstance(inst, cls)."""
return any(cls.__subclasscheck__(c)
for c in set([type(inst), inst.__class__]))
def __subclasscheck__(cls, sub):
"""Implement issubclass(sub, cls)."""
candidates = cls.__dict__.get("__subclass__", set()) | set([cls])
return any(c in candidates for c in sub.mro())
class Integer:
__metaclass__ = ABC
__subclass__ = set([int])
class SubInt(Integer):
pass
class TypeChecksTest(unittest.TestCase):
def testIsSubclassInternal(self):
self.assertEqual(Integer.__subclasscheck__(int), True)
self.assertEqual(Integer.__subclasscheck__(float), False)
def testIsSubclassBuiltin(self):
self.assertEqual(issubclass(int, Integer), True)
self.assertEqual(issubclass(int, (Integer,)), True)
self.assertEqual(issubclass(float, Integer), False)
self.assertEqual(issubclass(float, (Integer,)), False)
def testIsInstanceBuiltin(self):
self.assertEqual(isinstance(42, Integer), True)
self.assertEqual(isinstance(42, (Integer,)), True)
self.assertEqual(isinstance(3.14, Integer), False)
self.assertEqual(isinstance(3.14, (Integer,)), False)
def testIsInstanceActual(self):
self.assertEqual(isinstance(Integer(), Integer), True)
self.assertEqual(isinstance(Integer(), (Integer,)), True)
def testIsSubclassActual(self):
self.assertEqual(issubclass(Integer, Integer), True)
self.assertEqual(issubclass(Integer, (Integer,)), True)
def testSubclassBehavior(self):
self.assertEqual(issubclass(SubInt, Integer), True)
self.assertEqual(issubclass(SubInt, (Integer,)), True)
self.assertEqual(issubclass(SubInt, SubInt), True)
self.assertEqual(issubclass(SubInt, (SubInt,)), True)
self.assertEqual(issubclass(Integer, SubInt), False)
self.assertEqual(issubclass(Integer, (SubInt,)), False)
self.assertEqual(issubclass(int, SubInt), False)
self.assertEqual(issubclass(int, (SubInt,)), False)
self.assertEqual(isinstance(SubInt(), Integer), True)
self.assertEqual(isinstance(SubInt(), (Integer,)), True)
self.assertEqual(isinstance(SubInt(), SubInt), True)
self.assertEqual(isinstance(SubInt(), (SubInt,)), True)
self.assertEqual(isinstance(42, SubInt), False)
self.assertEqual(isinstance(42, (SubInt,)), False)
def test_oldstyle(self):
# These should just be ignored.
class X:
def __instancecheck__(self, inst):
return True
def __subclasscheck__(self, cls):
return True
class Sub(X): pass
self.assertNotIsInstance(3, X)
self.assertIsInstance(X(), X)
self.assertFalse(issubclass(int, X))
self.assertTrue(issubclass(Sub, X))
def test_main():
test_support.run_unittest(TypeChecksTest)
if __name__ == "__main__":
unittest.main()
|
chidea/GoPythonDLLWrapper | refs/heads/master | bin/lib/_sitebuiltins.py | 137 | """
The objects used by the site module to add custom builtins.
"""
# Those objects are almost immortal and they keep a reference to their module
# globals. Defining them in the site module would keep too many references
# alive.
# Note this means this module should also avoid keep things alive in its
# globals.
import sys
class Quitter(object):
def __init__(self, name, eof):
self.name = name
self.eof = eof
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, self.eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
import os
self.__name = name
self.__data = data
self.__lines = None
self.__filenames = [os.path.join(dir, filename)
for dir in dirs
for filename in files]
def __setup(self):
if self.__lines:
return
data = None
for filename in self.__filenames:
try:
with open(filename, "r") as fp:
data = fp.read()
break
except OSError:
pass
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help that provides a helpful message
when 'help' is typed at the Python interactive prompt.
Calling help() at the Python prompt starts an interactive help session.
Calling help(thing) prints help for the python object 'thing'.
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
|
fertozudo/umatoo | refs/heads/master | lib/django/middleware/csrf.py | 118 | """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import logging
import re
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.encoding import force_text
from django.utils.http import is_same_domain
from django.utils.six.moves.urllib.parse import urlparse
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
request.META["CSRF_COOKIE_USED"] = True
return request.META["CSRF_COOKIE"]
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_key(),
})
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = force_text(
request.META.get('HTTP_REFERER'),
strings_only=True,
errors='replace'
)
if referer is None:
return self._reject(request, REASON_NO_REFERER)
referer = urlparse(referer)
# Make sure we have a valid URL for Referer.
if '' in (referer.scheme, referer.netloc):
return self._reject(request, REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if referer.scheme != 'https':
return self._reject(request, REASON_INSECURE_REFERER)
# If there isn't a CSRF_COOKIE_DOMAIN, assume we need an exact
# match on host:port. If not, obey the cookie rules.
if settings.CSRF_COOKIE_DOMAIN is None:
# request.get_host() includes the port.
good_referer = request.get_host()
else:
good_referer = settings.CSRF_COOKIE_DOMAIN
server_port = request.META['SERVER_PORT']
if server_port not in ('443', '80'):
good_referer = '%s:%s' % (good_referer, server_port)
# Here we generate a list of all acceptable HTTP referers,
# including the current host since that has been validated
# upstream.
good_hosts = list(settings.CSRF_TRUSTED_ORIGINS)
good_hosts.append(good_referer)
if not any(is_same_domain(referer.netloc, host) for host in good_hosts):
reason = REASON_BAD_REFERER % referer.geturl()
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
if not constant_time_compare(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
|
apechimp/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/navigation/forwardToNothing.py | 142 | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class ForwardToNothingTest(base_test.WebDriverBaseTest):
# Get a static page that must be the same upon refresh
def test_forwardToNothing(self):
self.driver.get(self.webserver.where_is('navigation/forwardStart.html'))
body = self.driver.find_element_by_css_selector("body").text
self.driver.forward()
currbody = self.driver.find_element_by_css_selector("body").text
self.assertEqual(body, currbody)
if __name__ == '__main__':
unittest.main()
|
arnaud-morvan/QGIS | refs/heads/master | tests/src/python/test_qgssinglesymbolrenderer.py | 17 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssinglesymbolrenderer.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsSingleSymbolRenderer,
QgsFillSymbol,
QgsFeatureRequest
)
from qgis.testing import unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
class TestQgsSingleSymbolRenderer(unittest.TestCase):
def setUp(self):
self.iface = get_iface()
myShpFile = os.path.join(TEST_DATA_DIR, 'polys_overlapping.shp')
layer = QgsVectorLayer(myShpFile, 'Polys', 'ogr')
QgsProject.instance().addMapLayer(layer)
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
self.renderer = QgsSingleSymbolRenderer(sym1)
layer.setRenderer(self.renderer)
rendered_layers = [layer]
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def testOrderBy(self):
self.renderer.setOrderBy(QgsFeatureRequest.OrderBy([QgsFeatureRequest.OrderByClause('Value', False)]))
self.renderer.setOrderByEnabled(True)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_singlesymbol_orderby')
self.assertTrue(renderchecker.runTest('singlesymbol_orderby'))
# disable order by and retest
self.renderer.setOrderByEnabled(False)
self.assertTrue(renderchecker.runTest('single'))
if __name__ == '__main__':
unittest.main()
|
ferreiro/Python | refs/heads/master | 4_SQL/fifthQuery_Completed.py | 3 |
def fithQuery2(cursor):
#idea find all id of student and for all student search on the aplication tabla and obtain only the carrera
#if the Distincs carrera for each student is more than 2 borra ello
aplications = []
try:
cursor.execute('Select ID FROM Students')
lineList =[] #list of all id of students
for line in cursor:
lineList.append(line[0]) #find a way to pass the id of the student that are in lineList to serch for all the student
for line in lineList:
i=0
cursor.execute("SELECT DISTINCT Carrera FROM Aplications WHERE Aplications.ID= %s",lineList[i])
#cursor.execute("SELECT DISTINCT Carrera FROM Aplications WHERE Aplications.ID='345'")
for line in cursor:
aplications.append(line[0])
print str(line[0])
if(len(aplications)>2):
print str(len(aplications))
#Student that have request more than 2 different carreras
cursor.execute("DELETE From Aplications WHERE Aplications.ID='lineList[i]'")
#cursor.execute("DELETE From Aplications WHERE Aplications.ID='345'")
print "cancel from database"
conn.commit()
i=+1
#cursor.execute('Select ID,Nombre_Univ,Carrera,Decision FROM Aplications')
#print "last query result:"
#for line in cursor:
# print str(line[0]) + ", " + line[1]+ ", " + line[2]+ ", " + line[3]
print "Fith query completed successfully...[OK]"
except:
print "Fith query: problems deleting database entries... [NOT DELETED]" |
coreyoconnor/NeatX | refs/heads/master | lib/session.py | 3 | #
#
# Copyright (C) 2009 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Module for sessions"""
import errno
import logging
import os
import os.path
import random
import time
# md5 module is deprecated in python2.6, hashlib is the replacement
try:
import hashlib as md5
except ImportError:
import md5
from neatx import constants
from neatx import serializer
from neatx import errors
from neatx import utils
def NewUniqueId(_data=None):
"""Generate new, unique ID of 32 characters.
@rtype: str
@return: New ID
"""
if _data is None:
_data = random.SystemRandom().getrandbits(1024)
return md5.md5(str(_data)).hexdigest().upper()
class SessionBase(object):
"""Data structure for session.
"""
__slots__ = [
"cookie",
"display",
"fullscreen",
"geometry",
"hostname",
"id",
"name",
"options",
"port",
"rootless",
"screeninfo",
"ssl",
"state",
"subscription",
"type",
"username",
"virtualdesktop",
]
def __init__(self, sessid, hostname, display, username):
"""Initializes this class.
@type hostname: str
@param hostname: Local hostname
@type display: str
@param display: Display number
@type username: str
@param username: Username
"""
# Set default values. Everything else listed in __slots__ is None unless
# set otherwise.
self.id = sessid
self.hostname = hostname
self.display = display
self.username = username
self.cookie = NewUniqueId()
self.state = constants.SESS_STATE_CREATED
self.subscription = constants.DEFAULT_SUBSCRIPTION
def Serialize(self):
"""Serialize instance data.
@rtype: C{dict}
@return: Instance attributes and values
"""
state = {}
for name in self.__slots__:
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __getattr__(self, name):
if name in self.__slots__:
# Known attributes default to None. See
# http://docs.python.org/reference/datamodel.html#object.__getattr__ for
# more details.
return None
raise AttributeError, name
def __setattr__(self, name, value):
if name == "state" and value not in constants.VALID_SESS_STATES:
raise errors.InvalidSessionState()
return object.__setattr__(self, name, value)
def _GetFullId(self):
assert self.hostname
assert self.display
assert self.id
return "%s-%s-%s" % (self.hostname, self.display, self.id)
def _GetWindowName(self):
return ("Neatx - %s@%s:%s - %s" %
(self.username, self.hostname, self.display, self.name))
# Read-only attributes
full_id = property(fget=_GetFullId)
windowname = property(fget=_GetWindowName)
class NxSession(SessionBase):
def __init__(self, *args, **kwargs):
raise NotImplementedError()
@classmethod
def Restore(cls, state):
"""Restore session from serialized state.
@type state: C{dict}
@param state: Serialized state
"""
obj = cls.__new__(cls)
cls._Restore(obj, state)
return obj
@staticmethod
def _Restore(obj, state):
"""Restore session from serialized state.
@type state: C{dict}
@param state: Serialized state
"""
if not isinstance(state, dict):
raise ValueError("Invalid data: expected dict, got %s" % type(state))
# Remove unset attributes
for name in obj.__slots__:
if name not in state:
delattr(obj, name)
for name, value in state.iteritems():
if name in obj.__slots__:
setattr(obj, name, value)
def DeserializeSessionFromString(data):
return NxSession.Restore(serializer.LoadJson(data))
def SerializeSessionToString(session):
data = session.Serialize()
data["_updated"] = time.time()
return serializer.DumpJson(data)
class NxSessionManager(object):
def __init__(self, _path=constants.SESSIONS_DIR):
self._path = _path
def FindSessionsWithFilter(self, username, filter_fn):
"""Find sessions filtered by a function.
The filter function receives one parameter, the session object. If its
return value evaluates to True, the session is added to the result list.
@type username: str or None
@param username: Wanted session owner
@type filter_fn: callable or None
@param filter_fn: Filter function
@return: A list of L{NxSession} instances for any matching sessions in the
database. If none are found, the list is empty.
"""
result = []
for sessid in utils.ListVisibleFiles(self._path):
sess = self.LoadSession(sessid)
if (sess is not None and
(username is None or sess.username == username) and
(filter_fn is None or filter_fn(sess))):
result.append(sess)
return result
def GetSessionDir(self, sessid):
"""Get absolute path for a session.
"""
# TODO: If sessid is controlled by client this can be a security problem
assert os.path.sep not in sessid
return os.path.join(self._path, sessid)
def GetSessionNodeSocket(self, sessid):
return os.path.join(self.GetSessionDir(sessid),
constants.NODE_SOCKET_NAME)
def _GetSessionDataFile(self, sessid):
return os.path.join(self.GetSessionDir(sessid),
constants.SESSION_DATA_FILE_NAME)
def LoadSession(self, sessid):
"""Load a session from permanent storage.
@type sessid: str
@param sessid: Session ID
"""
filename = self._GetSessionDataFile(sessid)
logging.debug("Loading session %s from %s", sessid, filename)
try:
fd = open(filename, "r")
except IOError, err:
# Files can disappear
if err.errno in (errno.ENOENT, errno.EACCES):
return None
raise
try:
return DeserializeSessionFromString(fd.read())
finally:
fd.close()
def LoadSessionForUser(self, sessid, username):
"""Load a session from permanent storage and check username.
@type sessid: str
@param sessid: Session ID
@type username: str
@param username: Username
"""
sess = self.LoadSession(sessid)
if sess:
if sess.username == username:
return sess
logging.error("Session %r (owner %r) doesn't belong to user %r",
sessid, sess.username, username)
return None
def SaveSession(self, sess):
"""Save a session to permanent storage.
"""
filename = self._GetSessionDataFile(sess.id)
logging.debug("Writing session %r to %r", sess.id, filename)
utils.WriteFile(filename, data=SerializeSessionToString(sess))
def CreateSessionID(self):
"""Create unique session directory.
@rtype: str
@return: Session ID
"""
# Create session directory (catches duplicate session IDs)
# TODO: Split sessions into several directories (e.g. AB/DEF) to reduce
# number of subdirectories
# TODO: Cronjob to remove unused/old session directories
tries = 0
while True:
sessid = NewUniqueId()
path = self.GetSessionDir(sessid)
tries += 1
try:
os.mkdir(path, 0700)
except OSError, err:
if err.errno != errno.EEXIST:
raise
# Give up after 10 retries
if tries > 10:
raise errors.GenericError("Unable to create session directory (%r)",
err)
continue
return sessid
|
nhippenmeyer/django | refs/heads/master | django/conf/urls/i18n.py | 310 | import warnings
from django.conf import settings
from django.conf.urls import patterns, url
from django.core.urlresolvers import LocaleRegexURLResolver
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.views.i18n import set_language
def i18n_patterns(prefix, *args):
"""
Adds the language code prefix to every URL pattern within this
function. This may only be used in the root URLconf, not in an included
URLconf.
"""
if isinstance(prefix, six.string_types):
warnings.warn(
"Calling i18n_patterns() with the `prefix` argument and with tuples "
"instead of django.conf.urls.url() instances is deprecated and "
"will no longer work in Django 1.10. Use a list of "
"django.conf.urls.url() instances instead.",
RemovedInDjango110Warning, stacklevel=2
)
pattern_list = patterns(prefix, *args)
else:
pattern_list = [prefix] + list(args)
if not settings.USE_I18N:
return pattern_list
return [LocaleRegexURLResolver(pattern_list)]
urlpatterns = [
url(r'^setlang/$', set_language, name='set_language'),
]
|
GbalsaC/bitnamiP | refs/heads/master | venv/lib/python2.7/site-packages/south/__init__.py | 2 | """
South - Useable migrations for Django apps
"""
__version__ = "1.0.1"
__authors__ = [
"Andrew Godwin <andrew@aeracode.org>",
"Andy McCurdy <andy@andymccurdy.com>"
]
|
allotria/intellij-community | refs/heads/master | python/testData/resolve/pyToJava/Field.py | 83 | from java.lang import System
System.ou<ref>t |
smalls257/VRvisu | refs/heads/master | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/test/test_print.py | 121 | """Test correct operation of the print function.
"""
# In 2.6, this gives us the behavior we want. In 3.0, it has
# no function, but it still must parse correctly.
from __future__ import print_function
import unittest
from test import test_support
from StringIO import StringIO
NotDefined = object()
# A dispatch table all 8 combinations of providing
# sep, end, and file
# I use this machinery so that I'm not just passing default
# values to print, I'm either passing or not passing in the
# arguments
dispatch = {
(False, False, False):
lambda args, sep, end, file: print(*args),
(False, False, True):
lambda args, sep, end, file: print(file=file, *args),
(False, True, False):
lambda args, sep, end, file: print(end=end, *args),
(False, True, True):
lambda args, sep, end, file: print(end=end, file=file, *args),
(True, False, False):
lambda args, sep, end, file: print(sep=sep, *args),
(True, False, True):
lambda args, sep, end, file: print(sep=sep, file=file, *args),
(True, True, False):
lambda args, sep, end, file: print(sep=sep, end=end, *args),
(True, True, True):
lambda args, sep, end, file: print(sep=sep, end=end, file=file, *args),
}
# Class used to test __str__ and print
class ClassWith__str__:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
class TestPrint(unittest.TestCase):
def check(self, expected, args,
sep=NotDefined, end=NotDefined, file=NotDefined):
# Capture sys.stdout in a StringIO. Call print with args,
# and with sep, end, and file, if they're defined. Result
# must match expected.
# Look up the actual function to call, based on if sep, end, and file
# are defined
fn = dispatch[(sep is not NotDefined,
end is not NotDefined,
file is not NotDefined)]
with test_support.captured_stdout() as t:
fn(args, sep, end, file)
self.assertEqual(t.getvalue(), expected)
def test_print(self):
def x(expected, args, sep=NotDefined, end=NotDefined):
# Run the test 2 ways: not using file, and using
# file directed to a StringIO
self.check(expected, args, sep=sep, end=end)
# When writing to a file, stdout is expected to be empty
o = StringIO()
self.check('', args, sep=sep, end=end, file=o)
# And o will contain the expected output
self.assertEqual(o.getvalue(), expected)
x('\n', ())
x('a\n', ('a',))
x('None\n', (None,))
x('1 2\n', (1, 2))
x('1 2\n', (1, ' ', 2))
x('1*2\n', (1, 2), sep='*')
x('1 s', (1, 's'), end='')
x('a\nb\n', ('a', 'b'), sep='\n')
x('1.01', (1.0, 1), sep='', end='')
x('1*a*1.3+', (1, 'a', 1.3), sep='*', end='+')
x('a\n\nb\n', ('a\n', 'b'), sep='\n')
x('\0+ +\0\n', ('\0', ' ', '\0'), sep='+')
x('a\n b\n', ('a\n', 'b'))
x('a\n b\n', ('a\n', 'b'), sep=None)
x('a\n b\n', ('a\n', 'b'), end=None)
x('a\n b\n', ('a\n', 'b'), sep=None, end=None)
x('*\n', (ClassWith__str__('*'),))
x('abc 1\n', (ClassWith__str__('abc'), 1))
# 2.x unicode tests
x(u'1 2\n', ('1', u'2'))
x(u'u\1234\n', (u'u\1234',))
x(u' abc 1\n', (' ', ClassWith__str__(u'abc'), 1))
# errors
self.assertRaises(TypeError, print, '', sep=3)
self.assertRaises(TypeError, print, '', end=3)
self.assertRaises(AttributeError, print, '', file='')
def test_mixed_args(self):
# If an unicode arg is passed, sep and end should be unicode, too.
class Recorder(object):
def __init__(self, must_be_unicode):
self.buf = []
self.force_unicode = must_be_unicode
def write(self, what):
if self.force_unicode and not isinstance(what, unicode):
raise AssertionError("{0!r} is not unicode".format(what))
self.buf.append(what)
buf = Recorder(True)
print(u'hi', file=buf)
self.assertEqual(u''.join(buf.buf), 'hi\n')
del buf.buf[:]
print(u'hi', u'nothing', file=buf)
self.assertEqual(u''.join(buf.buf), 'hi nothing\n')
buf = Recorder(False)
print('hi', 'bye', end=u'\n', file=buf)
self.assertIsInstance(buf.buf[1], unicode)
self.assertIsInstance(buf.buf[3], unicode)
del buf.buf[:]
print(sep=u'x', file=buf)
self.assertIsInstance(buf.buf[-1], unicode)
def test_main():
test_support.run_unittest(TestPrint)
if __name__ == "__main__":
test_main()
|
etherkit/OpenBeacon2 | refs/heads/master | client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt4.QtGui.py | 4 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import qt_plugins_binaries
binaries = []
binaries.extend(qt_plugins_binaries('accessible', namespace='PyQt4'))
binaries.extend(qt_plugins_binaries('iconengines', namespace='PyQt4'))
binaries.extend(qt_plugins_binaries('imageformats', namespace='PyQt4'))
binaries.extend(qt_plugins_binaries('inputmethods', namespace='PyQt4'))
binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PyQt4'))
hiddenimports = ['sip', 'PyQt4.QtCore']
|
marcsans/cnn-physics-perception | refs/heads/master | phy/lib/python2.7/site-packages/scipy/io/harwell_boeing/setup.py | 128 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('harwell_boeing',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
Haikson/django-nginx-uwsgi | refs/heads/master | dnuconfig/__init__.py | 1 | #~*~ coding: utf-8 ~*~
__author__ = 'Kamo Petrosyan' |
ooici/marine-integrations | refs/heads/master | mi/core/unit_test.py | 1 | #! /usr/bin/env python
"""
@file ion/core/unit_test.py
@author Bill French
@brief Base test class for all MI tests. Provides two base classes,
One for pyon tests and one for stand alone MI tests.
We have the stand alone test case for tests that don't require or can't
integrate with the common ION test case.
"""
from mi.core.log import get_logger
log = get_logger()
import unittest
import json
from pyon.util.unit_test import IonUnitTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.util.int_test import IonIntegrationTestCase
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import DataParticleValue
from mi.idk.exceptions import IDKException
class MiUnitTest(unittest.TestCase):
"""
Base class for non-ion tests. Use only if needed to avoid ion
test common code.
"""
def shortDescription(self):
return None
class MiUnitTestCase(IonUnitTestCase):
"""
Base class for most tests in MI.
"""
def shortDescription(self):
return None
def test_verify_service(self):
pass
class MiTestCase(PyonTestCase):
"""
Base class for most tests in MI.
"""
def shortDescription(self):
return None
def test_verify_service(self):
pass
class MiIntTestCase(IonIntegrationTestCase):
"""
Base class for most tests in MI.
"""
def shortDescription(self):
return None
class ParticleTestMixin(object):
"""
A class with some methods to test data particles. Intended to be mixed
into test classes so that particles can be tested in different areas of
the MI code base.
"""
def convert_data_particle_to_dict(self, data_particle):
"""
Convert a data particle object to a dict. This will work for data
particles as DataParticle object, dictionaries or a string
@param data_particle data particle
@return dictionary representation of a data particle
"""
if (isinstance(data_particle, DataParticle)):
sample_dict = data_particle.generate_dict()
elif (isinstance(data_particle, str)):
sample_dict = json.loads(data_particle)
elif (isinstance(data_particle, dict)):
sample_dict = data_particle
else:
raise IDKException("invalid data particle type: %s", type(data_particle))
return sample_dict
def get_data_particle_values_as_dict(self, data_particle):
"""
Return all of the data particle values as a dictionary with the value
id as the key and the value as the value. This method will decimate
the data, in the any characteristics other than value id and value.
i.e. binary.
@param data_particle data particle to inspect
@return return a dictionary with keys and values { value-id: value }
@throws IDKException when missing values dictionary
"""
sample_dict = self.convert_data_particle_to_dict(data_particle)
values = sample_dict.get('values')
if(not values):
raise IDKException("Data particle missing values")
if(not isinstance(values, list)):
raise IDKException("Data particle values not a list")
result = {}
for param in values:
if(not isinstance(param, dict)):
raise IDKException("must be a dict")
key = param.get('value_id')
if(key == None):
raise IDKException("value_id not defined")
if(key in result.keys()):
raise IDKException("duplicate value detected for %s" % key)
result[key] = param.get('value')
return result
def assert_data_particle_keys(self, data_particle_key, test_config):
"""
Ensure that the keys defined in the data particle key enum match
the keys defined in the test configuration.
@param data_particle_key object that defines all data particle keys.
@param test_config dictionary containing parameter verification values
"""
driver_keys = sorted(data_particle_key.list())
test_config_keys = sorted(test_config.keys())
self.assertEqual(driver_keys, test_config_keys)
def assert_data_particle_header(self, data_particle, stream_name, require_instrument_timestamp=False):
"""
Verify a data particle header is formatted properly
@param data_particle version 1 data particle
@param stream_name version 1 data particle
@param require_instrument_timestamp should we verify the instrument timestamp exists
"""
sample_dict = self.convert_data_particle_to_dict(data_particle)
log.debug("SAMPLEDICT: %s", sample_dict)
self.assertTrue(sample_dict[DataParticleKey.STREAM_NAME], stream_name)
self.assertTrue(sample_dict[DataParticleKey.PKT_FORMAT_ID], DataParticleValue.JSON_DATA)
self.assertTrue(sample_dict[DataParticleKey.PKT_VERSION], 1)
self.assertIsInstance(sample_dict[DataParticleKey.VALUES], list)
self.assertTrue(sample_dict.get(DataParticleKey.PREFERRED_TIMESTAMP))
self.assertIsNotNone(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP), float)
# It is highly unlikely that we should have a particle without a port agent timestamp,
# at least that's the current assumption.
self.assertIsNotNone(sample_dict.get(DataParticleKey.PORT_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.PORT_TIMESTAMP), float)
if(require_instrument_timestamp):
self.assertIsNotNone(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP), float)
|
dol-sen/portage | refs/heads/master | repoman/pym/repoman/metadata.py | 3 | # -*- coding:utf-8 -*-
from __future__ import print_function, unicode_literals
import errno
import logging
import sys
import tempfile
import time
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
# import our initialized portage instance
from repoman._portage import portage
from portage import os
from portage import shutil
from portage.output import green
if sys.hexversion >= 0x3000000:
basestring = str
if sys.hexversion >= 0x3000000:
basestring = str
# Note: This URI is hardcoded in all metadata.xml files. We can't
# change it without updating all the xml files in the tree.
metadata_dtd_uri = 'http://www.gentoo.org/dtd/metadata.dtd'
metadata_xsd_uri = 'https://www.gentoo.org/xml-schema/metadata.xsd'
# force refetch if the local copy creation time is older than this
metadata_xsd_ctime_interval = 60 * 60 * 24 * 7 # 7 days
def fetch_metadata_xsd(metadata_xsd, repoman_settings):
"""
Fetch metadata.xsd if it doesn't exist or the ctime is older than
metadata_xsd_ctime_interval.
@rtype: bool
@return: True if successful, otherwise False
"""
must_fetch = True
metadata_xsd_st = None
current_time = int(time.time())
try:
metadata_xsd_st = os.stat(metadata_xsd)
except EnvironmentError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
raise
del e
else:
# Trigger fetch if metadata.xsd mtime is old or clock is wrong.
if abs(current_time - metadata_xsd_st.st_ctime) \
< metadata_xsd_ctime_interval:
must_fetch = False
if must_fetch:
print()
print(
"%s the local copy of metadata.xsd "
"needs to be refetched, doing that now" % green("***"))
print()
parsed_url = urlparse(metadata_xsd_uri)
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = repoman_settings.get(setting)
if not fcmd:
fcmd = repoman_settings.get('FETCHCOMMAND')
if not fcmd:
logging.error("FETCHCOMMAND is unset")
return False
destdir = repoman_settings["DISTDIR"]
fd, metadata_xsd_tmp = tempfile.mkstemp(
prefix='metadata.xsd.', dir=destdir)
os.close(fd)
try:
if not portage.getbinpkg.file_get(
metadata_xsd_uri, destdir, fcmd=fcmd,
filename=os.path.basename(metadata_xsd_tmp)):
logging.error(
"failed to fetch metadata.xsd from '%s'" % metadata_xsd_uri)
return False
try:
portage.util.apply_secpass_permissions(
metadata_xsd_tmp,
gid=portage.data.portage_gid, mode=0o664, mask=0o2)
except portage.exception.PortageException:
pass
shutil.move(metadata_xsd_tmp, metadata_xsd)
finally:
try:
os.unlink(metadata_xsd_tmp)
except OSError:
pass
return True
def get_metadata_xsd(repo_settings):
'''Locate and or fetch the metadata.xsd file
@param repo_settings: RepoSettings instance
@returns: path to the metadata.xsd file
'''
metadata_xsd = None
paths = list(repo_settings.repo_config.eclass_db.porttrees)
paths.reverse()
# add the test copy
paths.append("/usr/lib/portage/cnf/")
for path in paths:
path = os.path.join(path, 'metadata/xml-schema/metadata.xsd')
if os.path.exists(path):
metadata_xsd = path
break
if metadata_xsd is None:
metadata_xsd = os.path.join(
repo_settings.repoman_settings["DISTDIR"], 'metadata.xsd'
)
fetch_metadata_xsd(metadata_xsd, repo_settings.repoman_settings)
return metadata_xsd
|
adoosii/edx-platform | refs/heads/master | lms/djangoapps/bulk_email/tests/test_err_handling.py | 12 | # -*- coding: utf-8 -*-
"""
Unit tests for handling email sending errors
"""
from itertools import cycle
from celery.states import SUCCESS, RETRY # pylint: disable=no-name-in-module, import-error
from django.conf import settings
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db import DatabaseError
import json
from mock import patch, Mock
from nose.plugins.attrib import attr
from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError
from bulk_email.models import CourseEmail, SEND_TO_ALL
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
initialize_subtask_info,
SubtaskStatus,
check_subtask_is_valid,
update_subtask_status,
DuplicateTaskException,
MAX_DATABASE_LOCK_RETRIES,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class EmailTestException(Exception):
"""Mock exception for email testing."""
pass
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestEmailErrors(ModuleStoreTestCase):
"""
Test that errors from sending email are handled properly.
"""
def setUp(self):
super(TestEmailErrors, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.send_mail_url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.success_content = {
'course_id': self.course.id.to_deprecated_string(),
'success': True,
}
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_retry(self, retry, get_conn):
"""
Test that celery handles transient SMTPDataErrors by retrying.
"""
get_conn.return_value.send_messages.side_effect = SMTPDataError(455, "Throttling: Sending rate exceeded")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Test that we retry upon hitting a 4xx error
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPDataError)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.update_subtask_status')
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_fail(self, retry, result, get_conn):
"""
Test that celery handles permanent SMTPDataErrors by failing and not retrying.
"""
# have every fourth email fail due to blacklisting:
get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, "Email address is blacklisted"),
None, None, None])
students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)]
for student in students:
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# We shouldn't retry when hitting a 5xx error
self.assertFalse(retry.called)
# Test that after the rejected email, the rest still successfully send
((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args
self.assertEquals(subtask_status.skipped, 0)
expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)
self.assertEquals(subtask_status.failed, expected_fails)
self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_disconn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPServerDisconnected by retrying.
"""
get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, "Disconnecting")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPServerDisconnected)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_conn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPConnectError by retrying.
"""
get_conn.return_value.open.side_effect = SMTPConnectError(424, "Bad Connection")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPConnectError)
@patch('bulk_email.tasks.SubtaskStatus.increment')
@patch('bulk_email.tasks.log')
def test_nonexistent_email(self, mock_log, result):
"""
Tests retries when the email doesn't exist
"""
# create an InstructorTask object to pass through
course_id = self.course.id
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": -1}
with self.assertRaises(CourseEmail.DoesNotExist):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
((log_str, __, email_id), __) = mock_log.warning.call_args
self.assertTrue(mock_log.warning.called)
self.assertIn('Failed to get CourseEmail with id', log_str)
self.assertEqual(email_id, -1)
self.assertFalse(result.called)
def test_nonexistent_course(self):
"""
Tests exception when the course in the email doesn't exist
"""
course_id = SlashSeparatedCourseKey("I", "DONT", "EXIST")
email = CourseEmail(course_id=course_id)
email.save()
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
# (?i) is a regex for ignore case
with self.assertRaisesRegexp(ValueError, r"(?i)course not found"):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
def test_nonexistent_to_option(self):
"""
Tests exception when the to_option in the email doesn't exist
"""
email = CourseEmail(course_id=self.course.id, to_option="IDONTEXIST")
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_task(self):
"""
Tests exception when the course_id in task is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=self.course.id, to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create("bogus/task/id", "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match task value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_email(self):
"""
Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=SlashSeparatedCourseKey("bogus", "course", "id"), to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match email value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_send_email_undefined_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
subtask_status = SubtaskStatus.create(subtask_id)
email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find subtasks of instructor task'):
send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_missing_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
different_subtask_id = "bogus-subtask-id-value"
subtask_status = SubtaskStatus.create(different_subtask_id)
bogus_email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find status for subtask of instructor task'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_completed_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=SUCCESS)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already completed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_running_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
update_subtask_status(entry_id, subtask_id, subtask_status)
check_subtask_is_valid(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_retried_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
# try running with a clean subtask:
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
# try again, with a retried subtask with lower count:
new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_with_locked_instructor_task(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-locked-model"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with patch('instructor_task.subtasks.InstructorTask.save') as mock_task_save:
mock_task_save.side_effect = DatabaseError
with self.assertRaises(DatabaseError):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
self.assertEquals(mock_task_save.call_count, MAX_DATABASE_LOCK_RETRIES)
def test_send_email_undefined_email(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-undefined-email"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
with self.assertRaises(CourseEmail.DoesNotExist):
# we skip the call that updates subtask status, since we've not set up the InstructorTask
# for the subtask, and it's not important to the test.
with patch('bulk_email.tasks.update_subtask_status'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
|
gnowxilef/youtube-dl | refs/heads/master | youtube_dl/extractor/eroprofile.py | 61 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
unescapeHTML
)
class EroProfileIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eroprofile\.com/m/videos/view/(?P<id>[^/]+)'
_LOGIN_URL = 'http://www.eroprofile.com/auth/auth.php?'
_NETRC_MACHINE = 'eroprofile'
_TESTS = [{
'url': 'http://www.eroprofile.com/m/videos/view/sexy-babe-softcore',
'md5': 'c26f351332edf23e1ea28ce9ec9de32f',
'info_dict': {
'id': '3733775',
'display_id': 'sexy-babe-softcore',
'ext': 'm4v',
'title': 'sexy babe softcore',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file',
'md5': '1baa9602ede46ce904c431f5418d8916',
'info_dict': {
'id': '1133519',
'ext': 'm4v',
'title': 'Try It On Pee_cut_2.wmv - 4shared.com - file sharing - download movie file',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': 'Requires login',
}]
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
query = compat_urllib_parse_urlencode({
'username': username,
'password': password,
'url': 'http://www.eroprofile.com/',
})
login_url = self._LOGIN_URL + query
login_page = self._download_webpage(login_url, None, False)
m = re.search(r'Your username or password was incorrect\.', login_page)
if m:
raise ExtractorError(
'Wrong username and/or password.', expected=True)
self.report_login()
redirect_url = self._search_regex(
r'<script[^>]+?src="([^"]+)"', login_page, 'login redirect url')
self._download_webpage(redirect_url, None, False)
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
m = re.search(r'You must be logged in to view this video\.', webpage)
if m:
self.raise_login_required('This video requires login')
video_id = self._search_regex(
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
webpage, 'video id', default=None)
video_url = unescapeHTML(self._search_regex(
r'<source src="([^"]+)', webpage, 'video url'))
title = self._html_search_regex(
r'Title:</th><td>([^<]+)</td>', webpage, 'title')
thumbnail = self._search_regex(
r'onclick="showVideoPlayer\(\)"><img src="([^"]+)',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'age_limit': 18,
}
|
leppa/home-assistant | refs/heads/dev | homeassistant/components/keba/sensor.py | 1 | """Support for KEBA charging station sensors."""
import logging
from homeassistant.const import DEVICE_CLASS_POWER, ENERGY_KILO_WATT_HOUR
from homeassistant.helpers.entity import Entity
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the KEBA charging station platform."""
if discovery_info is None:
return
keba = hass.data[DOMAIN]
sensors = [
KebaSensor(keba, "Curr user", "Max current", "mdi:flash", "A"),
KebaSensor(
keba, "Setenergy", "Energy target", "mdi:gauge", ENERGY_KILO_WATT_HOUR
),
KebaSensor(keba, "P", "Charging power", "mdi:flash", "kW", DEVICE_CLASS_POWER),
KebaSensor(
keba, "E pres", "Session energy", "mdi:gauge", ENERGY_KILO_WATT_HOUR
),
KebaSensor(keba, "E total", "Total Energy", "mdi:gauge", ENERGY_KILO_WATT_HOUR),
]
async_add_entities(sensors)
class KebaSensor(Entity):
"""The entity class for KEBA charging stations sensors."""
def __init__(self, keba, key, name, icon, unit, device_class=None):
"""Initialize the KEBA Sensor."""
self._key = key
self._keba = keba
self._name = name
self._device_class = device_class
self._icon = icon
self._unit = unit
self._state = None
self._attributes = {}
@property
def should_poll(self):
"""Deactivate polling. Data updated by KebaHandler."""
return False
@property
def unique_id(self):
"""Return the unique ID of the binary sensor."""
return f"{self._keba.device_name}_{self._name}"
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Get the unit of measurement."""
return self._unit
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
return self._attributes
async def async_update(self):
"""Get latest cached states from the device."""
self._state = self._keba.get_value(self._key)
if self._key == "P":
self._attributes["power_factor"] = self._keba.get_value("PF")
self._attributes["voltage_u1"] = str(self._keba.get_value("U1"))
self._attributes["voltage_u2"] = str(self._keba.get_value("U2"))
self._attributes["voltage_u3"] = str(self._keba.get_value("U3"))
self._attributes["current_i1"] = str(self._keba.get_value("I1"))
self._attributes["current_i2"] = str(self._keba.get_value("I2"))
self._attributes["current_i3"] = str(self._keba.get_value("I3"))
elif self._key == "Curr user":
self._attributes["max_current_hardware"] = self._keba.get_value("Curr HW")
def update_callback(self):
"""Schedule a state update."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add update callback after being added to hass."""
self._keba.add_update_listener(self.update_callback)
|
malept/gunicorn | refs/heads/master | examples/frameworks/django/testing/testing/apps/someapp/urls.py | 33 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^acsv$', views.acsv),
url(r'^$', views.home),
]
|
openfisca/openfisca-france-data | refs/heads/master | openfisca_france_data/model/common.py | 1 | # -*- coding: utf-8 -*-
from numpy import arange
try:
from openfisca_survey_manager.statshelpers import weighted_quantiles
except ImportError:
weighted_quantiles = None
try:
from openfisca_survey_manager.statshelpers import mark_weighted_percentiles
except ImportError:
mark_weighted_percentiles = None
from openfisca_france_data.model.base import * # noqa analysis:ignore
class assiette_csg_salaire(Variable):
value_type = float
entity = Individu
label = "Assiette CSG salaires"
definition_period = MONTH
def formula(individu, period, parameters):
assiette_csg_abattue = individu('assiette_csg_abattue', period)
assiette_csg_non_abattue = individu('assiette_csg_non_abattue', period)
plafond_securite_sociale = individu('plafond_securite_sociale', period)
abattement = parameters(period.start).prelevements_sociaux.contributions.csg.activite.deductible.abattement
assiette = assiette_csg_abattue - abattement.calc(
assiette_csg_abattue,
factor = plafond_securite_sociale,
round_base_decimals = 2,
) + assiette_csg_non_abattue
return assiette
class assiette_csg_retraite(Variable):
value_type = float
entity = Individu
label = "Assiette CSG retraite"
definition_period = MONTH
def formula(individu, period, parameters):
retraite_brute = individu('retraite_brute', period)
taux_csg_remplacement = individu('taux_csg_remplacement', period)
return retraite_brute * (taux_csg_remplacement >= 2)
class assiette_csg_chomage(Variable):
value_type = float
entity = Individu
label = "Assiette CSG chomage"
definition_period = MONTH
def formula(individu, period, parameters):
chomage_brut = individu('chomage_brut', period)
taux_csg_remplacement = individu('taux_csg_remplacement', period)
return chomage_brut * (taux_csg_remplacement >= 2)
class decile(Variable):
value_type = Enum
possible_values = Deciles
default_value = Deciles.hors_champs
entity = Menage
label = "Décile de niveau de vie disponible"
definition_period = YEAR
def formula(menage, period):
menage_ordinaire = menage('menage_ordinaire', period)
niveau_de_vie = menage('niveau_de_vie', period)
wprm = menage('wprm', period)
labels = arange(1, 11)
method = 2
if len(wprm) == 1:
return wprm * 0
decile, values = mark_weighted_percentiles(
niveau_de_vie, labels, wprm * menage_ordinaire, method, return_quantiles = True)
del values
return decile * menage_ordinaire
class centile(Variable):
value_type = Enum
possible_values = Deciles
default_value = Deciles.hors_champs
entity = Menage
label = "Centile de niveau de vie disponible"
definition_period = YEAR
def formula(menage, period):
menage_ordinaire = menage('menage_ordinaire', period)
niveau_de_vie = menage('niveau_de_vie', period)
wprm = menage('wprm', period)
labels = arange(1, 101)
method = 2
if len(wprm) == 1:
return wprm * 0
centile, values = mark_weighted_percentiles(
niveau_de_vie, labels, wprm * menage_ordinaire, method, return_quantiles = True)
del values
return centile * menage_ordinaire
class decile_rfr(Variable):
value_type = Enum
possible_values = Deciles
default_value = Deciles.hors_champs
entity = FoyerFiscal
label = "Décile de revenu fiscal de référence"
definition_period = YEAR
def formula(foyer_fiscal, period):
rfr = foyer_fiscal('rfr', period)
weight_foyers = foyer_fiscal('weight_foyers', period)
menage_ordinaire_foyers_fiscaux = foyer_fiscal('menage_ordinaire_foyers_fiscaux', period)
labels = arange(1, 11)
# Alternative method
# method = 2
# decile, values = mark_weighted_percentiles(niveau_de_vie, labels, pondmen, method, return_quantiles = True)
decile, values = weighted_quantiles(rfr, labels, weight_foyers * menage_ordinaire_foyers_fiscaux, return_quantiles = True)
return decile
class centile_rfr(Variable):
value_type = Enum
possible_values = Deciles
default_value = Deciles.hors_champs
entity = FoyerFiscal
label = "Centile de revenu fiscal de référence"
definition_period = YEAR
def formula(foyer_fiscal, period):
rfr = foyer_fiscal('rfr', period)
weight_foyers = foyer_fiscal('weight_foyers', period)
menage_ordinaire_foyers_fiscaux = foyer_fiscal('menage_ordinaire_foyers_fiscaux', period)
labels = arange(1, 101)
centile, values = weighted_quantiles(rfr, labels, weight_foyers * menage_ordinaire_foyers_fiscaux, return_quantiles = True)
return centile
class decile_rfr_par_part(Variable):
value_type = Enum
possible_values = Deciles
default_value = Deciles.hors_champs
entity = FoyerFiscal
label = "Décile de revenu fiscal de référence par part fiscale"
definition_period = YEAR
def formula(foyer_fiscal, period):
rfr = foyer_fiscal('rfr', period)
nbptr = foyer_fiscal('nbptr', period)
weight_foyers = foyer_fiscal('weight_foyers', period)
menage_ordinaire_foyers_fiscaux = foyer_fiscal('menage_ordinaire_foyers_fiscaux', period)
labels = arange(1, 11)
# Alternative method
# method = 2
# decile, values = mark_weighted_percentiles(niveau_de_vie, labels, pondmen, method, return_quantiles = True)
decile, values = weighted_quantiles(
rfr / nbptr, labels, weight_foyers * menage_ordinaire_foyers_fiscaux, return_quantiles = True)
return decile
class centile_rfr_par_part(Variable):
value_type = Enum
possible_values = Deciles
default_value = Deciles.hors_champs
entity = FoyerFiscal
label = "Centile de revenu fiscal de référence par part fiscale"
definition_period = YEAR
def formula(foyer_fiscal, period):
rfr = foyer_fiscal('rfr', period)
nbptr = foyer_fiscal('nbptr', period)
weight_foyers = foyer_fiscal('weight_foyers', period)
menage_ordinaire_foyers_fiscaux = foyer_fiscal('menage_ordinaire_foyers_fiscaux', period)
labels = arange(1, 101)
centile, values = weighted_quantiles(
rfr / nbptr, labels, weight_foyers * menage_ordinaire_foyers_fiscaux, return_quantiles = True)
return centile
class pauvre40(Variable):
value_type = bool
entity = Menage
label = "Ménage en dessous du seuil de pauvreté à 40%"
definition_period = YEAR
def formula(menage, period):
menage_ordinaire = menage('menage_ordinaire', period)
nivvie = menage('nivvie', period)
wprm = menage('wprm', period)
labels = arange(1, 3)
method = 2
if len(wprm) == 1:
return wprm * 0
percentile, values = mark_weighted_percentiles(nivvie, labels, wprm * menage_ordinaire, method, return_quantiles = True)
threshold = .4 * values[1]
return (nivvie <= threshold) * menage_ordinaire
class pauvre50(Variable):
value_type = bool
entity = Menage
label = "Ménage en dessous du seuil de pauvreté à 50%"
definition_period = YEAR
def formula(menage, period):
menage_ordinaire = menage('menage_ordinaire', period)
nivvie = menage('nivvie', period)
wprm = menage('wprm', period)
labels = arange(1, 3)
method = 2
if len(wprm) == 1:
return wprm * 0
percentile, values = mark_weighted_percentiles(nivvie, labels, wprm * menage_ordinaire, method, return_quantiles = True)
threshold = .5 * values[1]
return (nivvie <= threshold) * menage_ordinaire
class pauvre60(Variable):
value_type = bool
entity = Menage
label = "Ménage en dessous du seuil de pauvreté à 60%"
definition_period = YEAR
def formula(menage, period):
menage_ordinaire = menage('menage_ordinaire', period)
nivvie = menage('nivvie', period)
wprm = menage('wprm', period)
labels = arange(1, 3)
method = 2
if len(wprm) == 1:
return wprm * 0
percentile, values = mark_weighted_percentiles(nivvie, labels, wprm * menage_ordinaire, method, return_quantiles = True)
threshold = .6 * values[1]
return (nivvie <= threshold) * menage_ordinaire
|
ericholscher/pinax | refs/heads/master | pinax/projects/social_project/tests/smoke_test.py | 8 | ## initially a quick smoke test to see if certain URLs throw exceptions or not
## would have caught a high percentage of recent trunk breakages
## run with ./manage.py runscript tests.smoke_test
def run():
from django.test.client import Client
c = Client()
pages = [
'/',
'/about/',
'/profiles/',
'/blog/',
'/invitations/',
'/notices/',
'/messages/',
'/announcements/',
'/tweets/',
'/tribes/',
'/robots.txt',
'/photos/',
'/bookmarks/',
]
for page in pages:
print page,
try:
x = c.get(page)
if x.status_code in [301, 302]:
print x.status_code, "=>", x["Location"]
else:
print x.status_code
except Exception, e:
print e
|
fausecteam/ctf-gameserver | refs/heads/master | tests/checker/test_integration.py | 1 | import os.path
from unittest import SkipTest
from unittest.mock import patch
import shutil
import sqlite3
import subprocess
import tempfile
import time
from ctf_gameserver.checker.master import MasterLoop
from ctf_gameserver.checker.metrics import DummyQueue
from ctf_gameserver.lib.checkresult import CheckResult
from ctf_gameserver.lib.database import transaction_cursor
from ctf_gameserver.lib.test_util import DatabaseTestCase
class IntegrationTest(DatabaseTestCase):
fixtures = ['tests/checker/fixtures/integration.json']
def setUp(self):
self.check_duration_patch = patch('ctf_gameserver.checker.database.get_check_duration')
check_duration_mock = self.check_duration_patch.start()
check_duration_mock.return_value = None
def tearDown(self):
self.check_duration_patch.stop()
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_basic(self, monotonic_mock):
checkerscript_path = os.path.join(os.path.dirname(__file__), 'integration_basic_checkerscript.py')
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
master_loop.supervisor.queue_timeout = 0.01
# Sanity check before any tick
self.assertFalse(master_loop.step())
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 0)
# Start tick
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
# Checker won't get started because interval is not yet over
self.assertFalse(master_loop.step())
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE placement_start IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 0)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 0)
# Interval is over, Checker Script gets started
monotonic_mock.return_value = 20
# Will return False because no messages yet
self.assertFalse(master_loop.step())
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE placement_start IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 1)
master_loop.supervisor.queue_timeout = 10
# Handle all messages from Checker Script
while master_loop.step():
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=2 AND tick=0')
self.assertEqual(cursor.fetchone()[0], CheckResult.OK.value)
cursor.execute('SELECT flagid FROM scoring_flag'
' WHERE service_id=1 AND protecting_team_id=2 AND tick=0')
self.assertEqual(cursor.fetchone()[0], 'value identifier')
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_missing_checkerscript(self, monotonic_mock):
checkerscript_path = os.path.join(os.path.dirname(__file__), 'does not exist')
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
# Checker Script gets started, will return False because no messages yet
self.assertFalse(master_loop.step())
master_loop.supervisor.queue_timeout = 10
while master_loop.step():
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_start IS NOT NULL AND placement_end IS NULL')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 0)
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_exception(self, monotonic_mock):
checkerscript_path = os.path.join(os.path.dirname(__file__),
'integration_exception_checkerscript.py')
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
# Checker Script gets started, will return False because no messages yet
self.assertFalse(master_loop.step())
master_loop.supervisor.queue_timeout = 10
while master_loop.step():
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_start IS NOT NULL AND placement_end IS NULL')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 0)
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_down(self, monotonic_mock):
checkerscript_path = os.path.join(os.path.dirname(__file__),
'integration_down_checkerscript.py')
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
# Checker Script gets started, will return False because no messages yet
self.assertFalse(master_loop.step())
master_loop.supervisor.queue_timeout = 10
while master_loop.step():
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=2 AND tick=0')
self.assertEqual(cursor.fetchone()[0], CheckResult.DOWN.value)
@patch('logging.warning')
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_unfinished(self, monotonic_mock, warning_mock):
checkerscript_path = os.path.join(os.path.dirname(__file__),
'integration_unfinished_checkerscript.py')
checkerscript_pidfile = tempfile.NamedTemporaryFile()
os.environ['CHECKERSCRIPT_PIDFILE'] = checkerscript_pidfile.name
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
# Checker Script gets started, will return False because no messages yet
self.assertFalse(master_loop.step())
master_loop.supervisor.queue_timeout = 10
self.assertTrue(master_loop.step())
checkerscript_pidfile.seek(0)
checkerscript_pid = int(checkerscript_pidfile.read())
# Ensure process is running by sending signal 0
os.kill(checkerscript_pid, 0)
master_loop.supervisor.queue_timeout = 0.01
monotonic_mock.return_value = 50
self.assertFalse(master_loop.step())
# Process should still be running
os.kill(checkerscript_pid, 0)
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=1')
monotonic_mock.return_value = 190
self.assertFalse(master_loop.step())
# Poll whether the process has been killed
for _ in range(100):
try:
os.kill(checkerscript_pid, 0)
except ProcessLookupError:
break
time.sleep(0.1)
with self.assertRaises(ProcessLookupError):
os.kill(checkerscript_pid, 0)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_start IS NOT NULL AND placement_end IS NULL')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 0)
warning_mock.assert_called_with('Terminating all %d Runner processes', 1)
del os.environ['CHECKERSCRIPT_PIDFILE']
checkerscript_pidfile.close()
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_multi_teams_ticks(self, monotonic_mock):
checkerscript_path = os.path.join(os.path.dirname(__file__),
'integration_multi_checkerscript.py')
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
# Tick 0
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
# Also add flags for service 2 (which does not get checked) to make sure it won't get touched
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0), (1, 3, 0), (2, 2, 0), (2, 3, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
self.assertFalse(master_loop.step())
monotonic_mock.return_value = 100
master_loop.supervisor.queue_timeout = 10
while master_loop.step() or master_loop.get_running_script_count() > 0:
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 2)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 2)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=2 AND tick=0')
self.assertEqual(cursor.fetchone()[0], CheckResult.FAULTY.value)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=3 AND tick=0')
self.assertEqual(cursor.fetchone()[0], CheckResult.OK.value)
# Tick 1
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=1')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 1), (1, 3, 1), (2, 2, 1), (2, 3, 1)')
monotonic_mock.return_value = 200
master_loop.supervisor.queue_timeout = 0.01
self.assertFalse(master_loop.step())
monotonic_mock.return_value = 280
master_loop.supervisor.queue_timeout = 10
while master_loop.step() or master_loop.get_running_script_count() > 0:
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 4)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 4)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=2 AND tick=0')
self.assertEqual(cursor.fetchone()[0], CheckResult.FAULTY.value)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=3 AND tick=0')
self.assertEqual(cursor.fetchone()[0], CheckResult.OK.value)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=2 AND tick=1')
self.assertEqual(cursor.fetchone()[0], CheckResult.DOWN.value)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=3 AND tick=1')
self.assertEqual(cursor.fetchone()[0], CheckResult.FAULTY.value)
# Tick 2
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=2')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 2), (1, 3, 2), (2, 2, 2), (2, 3, 2)')
monotonic_mock.return_value = 380
master_loop.supervisor.queue_timeout = 0.01
self.assertFalse(master_loop.step())
monotonic_mock.return_value = 460
master_loop.supervisor.queue_timeout = 10
while master_loop.step() or master_loop.get_running_script_count() > 0:
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 6)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 6)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=2 AND tick=2')
self.assertEqual(cursor.fetchone()[0], CheckResult.RECOVERING.value)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=3 AND tick=2')
self.assertEqual(cursor.fetchone()[0], CheckResult.OK.value)
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_state(self, monotonic_mock):
checkerscript_path = os.path.join(os.path.dirname(__file__),
'integration_state_checkerscript.py')
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
# Prepopulate state for the non-checked service to ensure we'll never get this data returned
data = 'gAN9cQBYAwAAAGZvb3EBWAMAAABiYXJxAnMu'
cursor.execute('INSERT INTO scoring_checkerstate (team_id, service_id, key, data)'
' VALUES (2, 2, %s, %s), (3, 2, %s, %s)', ('key1', data, 'key2', data))
# Tick 0
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0), (1, 3, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
self.assertFalse(master_loop.step())
monotonic_mock.return_value = 100
master_loop.supervisor.queue_timeout = 10
while master_loop.step() or master_loop.get_running_script_count() > 0:
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 2)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck WHERE status=%s',
(CheckResult.OK.value,))
self.assertEqual(cursor.fetchone()[0], 2)
cursor.execute('SELECT flagid FROM scoring_flag'
' WHERE service_id=1 AND protecting_team_id=3 AND tick=0')
self.assertIsNone(cursor.fetchone()[0])
# Tick 1
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=1')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 1), (1, 3, 1)')
monotonic_mock.return_value = 200
master_loop.supervisor.queue_timeout = 0.01
self.assertFalse(master_loop.step())
monotonic_mock.return_value = 280
master_loop.supervisor.queue_timeout = 10
while master_loop.step() or master_loop.get_running_script_count() > 0:
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 4)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck WHERE status=%s',
(CheckResult.OK.value,))
self.assertEqual(cursor.fetchone()[0], 4)
cursor.execute('SELECT flagid FROM scoring_flag'
' WHERE service_id=1 AND protecting_team_id=3 AND tick=1')
self.assertEqual(cursor.fetchone()[0], 'value identifier')
# Tick 2
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=2')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 2), (1, 3, 2)')
monotonic_mock.return_value = 380
master_loop.supervisor.queue_timeout = 0.01
self.assertFalse(master_loop.step())
monotonic_mock.return_value = 460
master_loop.supervisor.queue_timeout = 10
while master_loop.step() or master_loop.get_running_script_count() > 0:
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 6)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck WHERE status=%s',
(CheckResult.OK.value,))
self.assertEqual(cursor.fetchone()[0], 6)
cursor.execute('SELECT flagid FROM scoring_flag'
' WHERE service_id=1 AND protecting_team_id=3 AND tick=2')
self.assertIsNone(cursor.fetchone()[0])
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_shutdown(self, monotonic_mock):
checkerscript_path = '/dev/null'
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, None, 2, 1, 10,
'0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
master_loop.shutting_down = True
master_loop.supervisor.queue_timeout = 0.01
monotonic_mock.return_value = 20
# Will return False because no messages yet
self.assertFalse(master_loop.step())
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE placement_start IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 0)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 0)
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_sudo(self, monotonic_mock):
if shutil.which('sudo') is None or not os.path.exists('/etc/sudoers.d/ctf-checker'):
raise SkipTest('sudo or sudo config not available')
checkerscript_path = os.path.join(os.path.dirname(__file__),
'integration_sudo_checkerscript.py')
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, 'ctf-checkerrunner', 2, 1,
10, '0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
# Checker Script gets started, will return False because no messages yet
self.assertFalse(master_loop.step())
master_loop.supervisor.queue_timeout = 10
while master_loop.step():
pass
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE placement_end IS NOT NULL')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT status FROM scoring_statuscheck'
' WHERE service_id=1 AND team_id=2 AND tick=0')
self.assertEqual(cursor.fetchone()[0], CheckResult.OK.value)
@patch('logging.warning')
@patch('ctf_gameserver.checker.master.get_monotonic_time')
def test_sudo_unfinished(self, monotonic_mock, warning_mock):
if shutil.which('sudo') is None or not os.path.exists('/etc/sudoers.d/ctf-checker'):
raise SkipTest('sudo or sudo config not available')
checkerscript_path = os.path.join(os.path.dirname(__file__),
'integration_unfinished_checkerscript.py')
checkerscript_pidfile = tempfile.NamedTemporaryFile()
os.chmod(checkerscript_pidfile.name, 0o666)
os.environ['CHECKERSCRIPT_PIDFILE'] = checkerscript_pidfile.name
monotonic_mock.return_value = 10
master_loop = MasterLoop(self.connection, 'service1', checkerscript_path, 'ctf-checkerrunner', 2, 1,
10, '0.0.%s.1', b'secret', {}, DummyQueue())
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start=NOW()')
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=0')
cursor.execute('INSERT INTO scoring_flag (service_id, protecting_team_id, tick)'
' VALUES (1, 2, 0)')
monotonic_mock.return_value = 20
master_loop.supervisor.queue_timeout = 0.01
# Checker Script gets started, will return False because no messages yet
self.assertFalse(master_loop.step())
master_loop.supervisor.queue_timeout = 10
self.assertTrue(master_loop.step())
checkerscript_pidfile.seek(0)
checkerscript_pid = int(checkerscript_pidfile.read())
def signal_script():
subprocess.check_call(['sudo', '--user=ctf-checkerrunner', '--', 'kill', '-0',
str(checkerscript_pid)])
# Ensure process is running by sending signal 0
signal_script()
master_loop.supervisor.queue_timeout = 0.01
monotonic_mock.return_value = 50
self.assertFalse(master_loop.step())
# Process should still be running
signal_script()
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET current_tick=1')
monotonic_mock.return_value = 190
self.assertFalse(master_loop.step())
# Poll whether the process has been killed
for _ in range(100):
try:
signal_script()
except subprocess.CalledProcessError:
break
time.sleep(0.1)
with self.assertRaises(subprocess.CalledProcessError):
signal_script()
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag'
' WHERE placement_start IS NOT NULL AND placement_end IS NULL')
self.assertEqual(cursor.fetchone()[0], 1)
cursor.execute('SELECT COUNT(*) FROM scoring_statuscheck')
self.assertEqual(cursor.fetchone()[0], 0)
warning_mock.assert_called_with('Terminating all %d Runner processes', 1)
del os.environ['CHECKERSCRIPT_PIDFILE']
checkerscript_pidfile.close()
|
ojengwa/grr | refs/heads/master | lib/rdfvalues/stats.py | 4 | #!/usr/bin/env python
"""RDFValue instances related to the statistics collection."""
import math
import threading
from grr.lib import rdfvalue
from grr.lib import utils
from grr.proto import analysis_pb2
from grr.proto import jobs_pb2
class StatsHistogramBin(rdfvalue.RDFProtoStruct):
protobuf = jobs_pb2.StatsHistogramBin
class StatsHistogram(rdfvalue.RDFProtoStruct):
"""Histogram with a user-provided set of bins."""
protobuf = jobs_pb2.StatsHistogram
def __init__(self, initializer=None, **kwargs):
if isinstance(initializer, (list, tuple)):
super(StatsHistogram, self).__init__(initializer=None, **kwargs)
for histogram_bin in initializer:
self.bins.Append(StatsHistogramBin(range_max_value=histogram_bin))
else:
super(StatsHistogram, self).__init__(initializer=initializer, **kwargs)
def RegisterValue(self, value):
"""Puts a given value into an appropriate bin."""
if self.bins:
for b in self.bins:
if b.range_max_value > value:
b.num += 1
return
self.bins[-1].num += 1
class RunningStats(rdfvalue.RDFProtoStruct):
"""Class for collecting running stats: mean, stdev and histogram data."""
protobuf = jobs_pb2.RunningStats
def RegisterValue(self, value):
self.num += 1
self.sum += value
self.sum_sq += value ** 2
self.histogram.RegisterValue(value)
@property
def mean(self):
if self.num == 0:
return 0
else:
return self.sum / float(self.num)
@property
def std(self):
if self.num == 0:
return 0
else:
return math.sqrt(self.sum_sq / float(self.num) - self.mean ** 2)
class ClientResourcesStats(rdfvalue.RDFProtoStruct):
"""RDF value representing clients' resources usage statistics for hunts."""
protobuf = jobs_pb2.ClientResourcesStats
CPU_STATS_BINS = [0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5,
6, 7, 8, 9, 10, 15, 20]
NETWORK_STATS_BINS = [16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192,
16384, 32768, 65536, 131072, 262144, 524288, 1048576,
2097152]
NUM_WORST_PERFORMERS = 10
def __init__(self, initializer=None, **kwargs):
super(ClientResourcesStats, self).__init__(initializer=initializer,
**kwargs)
self.user_cpu_stats.histogram = self.CPU_STATS_BINS
self.system_cpu_stats.histogram = self.CPU_STATS_BINS
self.network_bytes_sent_stats.histogram = self.NETWORK_STATS_BINS
self.lock = threading.RLock()
@utils.Synchronized
def __getstate__(self):
to_pickle = self.__dict__.copy()
to_pickle["lock"] = None
return to_pickle
def __setstate__(self, state):
self.__dict__ = state
self.lock = threading.RLock()
@utils.Synchronized
def RegisterResources(self, client_resources):
"""Update stats with info about resources consumed by a single client."""
self.user_cpu_stats.RegisterValue(
client_resources.cpu_usage.user_cpu_time)
self.system_cpu_stats.RegisterValue(
client_resources.cpu_usage.system_cpu_time)
self.network_bytes_sent_stats.RegisterValue(
client_resources.network_bytes_sent)
self.worst_performers.Append(client_resources)
new_worst_performers = sorted(
self.worst_performers,
key=lambda s: s.cpu_usage.user_cpu_time + s.cpu_usage.system_cpu_time,
reverse=True)[:self.NUM_WORST_PERFORMERS]
self.worst_performers = new_worst_performers
class Sample(rdfvalue.RDFProtoStruct):
"""A Graph sample is a single data point."""
protobuf = analysis_pb2.Sample
class SampleFloat(rdfvalue.RDFProtoStruct):
"""A Graph float data point."""
protobuf = analysis_pb2.SampleFloat
class Graph(rdfvalue.RDFProtoStruct):
"""A Graph is a collection of sample points."""
protobuf = analysis_pb2.Graph
def Append(self, **kwargs):
self.data.Append(**kwargs)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return bool(self.data)
def __getitem__(self, item):
return Sample(self.data[item])
def __iter__(self):
for x in self.data:
yield Sample(x)
class GraphFloat(Graph):
"""A Graph that stores sample points as floats."""
protobuf = analysis_pb2.GraphFloat
def __getitem__(self, item):
return SampleFloat(self.data[item])
def __iter__(self):
for x in self.data:
yield SampleFloat(x)
class GraphSeries(rdfvalue.RDFValueArray):
"""A sequence of graphs (e.g. evolving over time)."""
rdf_type = Graph
|
hotpxl/mxnet | refs/heads/master | tools/caffe_converter/compare_layers.py | 5 | """Test converted models layer by layer
"""
import os
import argparse
import logging
import mxnet as mx
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
def read_image(img_path, image_dims=None, mean=None):
"""
Reads an image from file path or URL, optionally resizing to given image dimensions and
subtracting mean.
:param img_path: path to file, or url to download
:param image_dims: image dimensions to resize to, or None
:param mean: mean file to subtract, or None
:return: loaded image, in RGB format
"""
import urllib
filename = img_path.split("/")[-1]
if img_path.startswith('http'):
urllib.urlretrieve(img_path, filename)
img = cv2.imread(filename)
else:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if image_dims is not None:
img = cv2.resize(img, image_dims) # resize to image_dims to fit model
img = np.rollaxis(img, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
if mean is not None:
mean = np.array(mean)
if mean.shape == (3,):
mean = mean[np.newaxis, :, np.newaxis, np.newaxis] # extend to (n, c, 1, 1)
img = img.astype(np.float32) - mean # subtract mean
return img
def _ch_dev(arg_params, aux_params, ctx):
"""
Changes device of given mxnet arguments
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param ctx: new device context
:return: arguments and auxiliary parameters on new device
"""
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return
def _bfs(root_node, process_node):
"""
Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node
"""
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
# process current node
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node)
def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed):
"""
Compare layer by layer of a caffe network with mxnet network
:param caffe_net: loaded caffe network
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param exe: mxnet model
:param layer_name_to_record: map between caffe layer and information record
:param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)
:param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob
:param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
"""
import re
log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}'
compare_layers_from_nets.is_first_convolution = True
def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note):
diff = np.abs(mx_blob - caf_blob)
diff_mean = diff.mean()
diff_max = diff.max()
logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean,
'%4.5f' % diff_max, note))
assert diff_mean < mean_diff_allowed
assert diff_max < max_diff_allowed
def _process_layer_parameters(layer):
logging.debug('processing layer %s of type %s', layer.name, layer.type)
normalized_layer_name = re.sub('[-/]', '_', layer.name)
# handle weight and bias of convolution and fully-connected layers
if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct',
'Deconvolution']:
has_bias = len(caffe_net.params[layer.name]) > 1
mx_name_weight = '{}_weight'.format(normalized_layer_name)
mx_beta = arg_params[mx_name_weight].asnumpy()
# first convolution should change from BGR to RGB
if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution:
compare_layers_from_nets.is_first_convolution = False
# if RGB or RGBA
if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :]
caf_beta = caffe_net.params[layer.name][0].data
_compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '')
if has_bias:
mx_name_bias = '{}_bias'.format(normalized_layer_name)
mx_gamma = arg_params[mx_name_bias].asnumpy()
caf_gamma = caffe_net.params[layer.name][1].data
_compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '')
elif layer.name in caffe_net.params and layer.type == 'Scale':
if 'scale' in normalized_layer_name:
bn_name = normalized_layer_name.replace('scale', 'bn')
elif 'sc' in normalized_layer_name:
bn_name = normalized_layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
mx_beta = arg_params[beta_name].asnumpy()
caf_beta = caffe_net.params[layer.name][1].data
_compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '')
mx_gamma = arg_params[gamma_name].asnumpy()
caf_gamma = caffe_net.params[layer.name][0].data
_compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '')
elif layer.name in caffe_net.params and layer.type == 'BatchNorm':
mean_name = '{}_moving_mean'.format(normalized_layer_name)
var_name = '{}_moving_var'.format(normalized_layer_name)
caf_rescale_factor = caffe_net.params[layer.name][2].data
mx_mean = aux_params[mean_name].asnumpy()
caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor
_compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '')
mx_var = aux_params[var_name].asnumpy()
caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor
_compare_blob(caf_var, mx_var, layer.name, var_name, 'var',
'expect 1e-04 change due to cudnn eps')
elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat',
'Dropout', 'Crop']:
# no parameters to check for these layers
pass
else:
logging.warn('No handling for layer %s of type %s, should we ignore it?', layer.name,
layer.type)
return
def _process_layer_output(caffe_blob_name):
logging.debug('processing blob %s', caffe_blob_name)
# skip blobs not originating from actual layers, e.g. artificial split layers added by caffe
if caffe_blob_name not in top_to_layers:
return
caf_blob = caffe_net.blobs[caffe_blob_name].data
# data should change from BGR to RGB
if caffe_blob_name == 'data':
# if RGB or RGBA
if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :]
mx_name = 'data'
else:
# get last layer name which outputs this blob name
last_layer_name = top_to_layers[caffe_blob_name][-1]
normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name)
mx_name = '{}_output'.format(normalized_last_layer_name)
if 'scale' in mx_name:
mx_name = mx_name.replace('scale', 'bn')
elif 'sc' in mx_name:
mx_name = mx_name.replace('sc', 'bn')
if mx_name not in exe.output_dict:
logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name)
return
mx_blob = exe.output_dict[mx_name].asnumpy()
_compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '')
return
# check layer parameters
logging.info('\n***** Network Parameters '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
first_layer_name = layer_name_to_record.keys()[0]
_bfs(layer_name_to_record[first_layer_name], _process_layer_parameters)
# check layer output
logging.info('\n***** Network Outputs '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
for caffe_blob_name in caffe_net.blobs.keys():
_process_layer_output(caffe_blob_name)
return
def main():
"""Entrypoint for compare_layers"""
parser = argparse.ArgumentParser(
description='Tool for testing caffe to mxnet conversion layer by layer')
parser.add_argument('--image_url', type=str,
default='http://writm.com/wp-content/uploads/2016/08/Cat-hd-wallpapers.jpg',
help='input image to test inference, can be either file path or url')
parser.add_argument('--caffe_prototxt_path', type=str,
default='./model.prototxt',
help='path to caffe prototxt')
parser.add_argument('--caffe_model_path', type=str,
default='./model.caffemodel',
help='path to caffe weights')
parser.add_argument('--caffe_mean', type=str,
default='./model_mean.binaryproto',
help='path to caffe mean file')
parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,
help='mean difference allowed between caffe blob and mxnet blob')
parser.add_argument('--max_diff_allowed', type=int, default=1e-01,
help='max difference allowed between caffe blob and mxnet blob')
parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')
args = parser.parse_args()
convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,
args.caffe_model_path, args.caffe_mean,
args.mean_diff_allowed, args.max_diff_allowed)
if __name__ == '__main__':
main()
|
monikagrabowska/osf.io | refs/heads/develop | tests/test_guids.py | 3 | # -*- coding: utf-8 -*-
import mock
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from modularodm import Q
from modularodm import fields
from framework.mongo.storage import MongoStorage
from framework.mongo import database
from framework.guid.model import GuidStoredObject
from website import models
class TestGuidStoredObject(OsfTestCase):
def test_guid_stored_object(self):
class FakeSchema(GuidStoredObject):
_id = fields.StringField()
@property
def deep_url(self):
return 'http://dinosaurs.sexy'
FakeSchema.set_storage(MongoStorage(database, 'fakeschema'))
fake_guid = FakeSchema(_id='fake')
fake_guid.save()
guids = models.Guid.find(Q('_id', 'eq', 'fake'))
assert_equal(guids.count(), 1)
assert_equal(guids[0].referent, fake_guid)
assert_equal(guids[0]._id, fake_guid._id)
class TestResolveGuid(OsfTestCase):
def setUp(self):
super(TestResolveGuid, self).setUp()
self.node = NodeFactory()
def test_resolve_guid(self):
res_guid = self.app.get(self.node.web_url_for('node_setting', _guid=True), auth=self.node.creator.auth)
res_full = self.app.get(self.node.web_url_for('node_setting'), auth=self.node.creator.auth)
assert_equal(res_guid.text, res_full.text)
def test_resolve_guid_no_referent(self):
guid = models.Guid.load(self.node._id)
guid.referent = None
guid.save()
res = self.app.get(
self.node.web_url_for('node_setting', _guid=True),
auth=self.node.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, 404)
@mock.patch('website.project.model.Node.deep_url', None)
def test_resolve_guid_no_url(self):
res = self.app.get(
self.node.web_url_for('node_setting', _guid=True),
auth=self.node.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, 404)
|
gnieboer/gnuradio | refs/heads/android | gr-digital/examples/snr_estimators.py | 46 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data):
n = 0
mean = 0
M2 = 0
M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * n
mean = mean + delta_n
M3 = M3 + term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
return scipy.sqrt(len(data))*M3 / scipy.power(M2, 3.0/2.0);
def snr_est_simple(signal):
s = scipy.mean(abs(signal)**2)
n = 2*scipy.var(abs(signal))
snr_rat = s/n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(signal.real)
#y4 = stats.skew(abs(signal.real))
skw = y4*y4 / (y2*y2*y2);
s = y1*y1
n = 2*(y3 + skw*s)
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = ((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits =2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
#bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
# 1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(2*SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(float(snr0dB))
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = blocks.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = channels.channel_model(1.0/scale)
gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.plot(yy.real, yy.imag, 'o')
pylab.show()
if __name__ == "__main__":
main()
|
jiaphuan/models | refs/heads/master | research/rebar/rebar_train.py | 9 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import random
import sys
import os
import numpy as np
import tensorflow as tf
import rebar
import datasets
import logger as L
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
gfile = tf.gfile
tf.app.flags.DEFINE_string("working_dir", "/tmp/rebar",
"""Directory where to save data, write logs, etc.""")
tf.app.flags.DEFINE_string('hparams', '',
'''Comma separated list of name=value pairs.''')
tf.app.flags.DEFINE_integer('eval_freq', 20,
'''How often to run the evaluation step.''')
FLAGS = tf.flags.FLAGS
def manual_scalar_summary(name, value):
value = tf.Summary.Value(tag=name, simple_value=value)
summary_str = tf.Summary(value=[value])
return summary_str
def eval(sbn, eval_xs, n_samples=100, batch_size=5):
n = eval_xs.shape[0]
i = 0
res = []
while i < n:
batch_xs = eval_xs[i:min(i+batch_size, n)]
res.append(sbn.partial_eval(batch_xs, n_samples))
i += batch_size
res = np.mean(res, axis=0)
return res
def train(sbn, train_xs, valid_xs, test_xs, training_steps, debug=False):
hparams = sorted(sbn.hparams.values().items())
hparams = (map(str, x) for x in hparams)
hparams = ('_'.join(x) for x in hparams)
hparams_str = '.'.join(hparams)
logger = L.Logger()
# Create the experiment name from the hparams
experiment_name = ([str(sbn.hparams.n_hidden) for i in xrange(sbn.hparams.n_layer)] +
[str(sbn.hparams.n_input)])
if sbn.hparams.nonlinear:
experiment_name = '~'.join(experiment_name)
else:
experiment_name = '-'.join(experiment_name)
experiment_name = 'SBN_%s' % experiment_name
rowkey = {'experiment': experiment_name,
'model': hparams_str}
# Create summary writer
summ_dir = os.path.join(FLAGS.working_dir, hparams_str)
summary_writer = tf.summary.FileWriter(
summ_dir, flush_secs=15, max_queue=100)
sv = tf.train.Supervisor(logdir=os.path.join(
FLAGS.working_dir, hparams_str),
save_summaries_secs=0,
save_model_secs=1200,
summary_op=None,
recovery_wait_secs=30,
global_step=sbn.global_step)
with sv.managed_session() as sess:
# Dump hparams to file
with gfile.Open(os.path.join(FLAGS.working_dir,
hparams_str,
'hparams.json'),
'w') as out:
json.dump(sbn.hparams.values(), out)
sbn.initialize(sess)
batch_size = sbn.hparams.batch_size
scores = []
n = train_xs.shape[0]
index = range(n)
while not sv.should_stop():
lHats = []
grad_variances = []
temperatures = []
random.shuffle(index)
i = 0
while i < n:
batch_index = index[i:min(i+batch_size, n)]
batch_xs = train_xs[batch_index, :]
if sbn.hparams.dynamic_b:
# Dynamically binarize the batch data
batch_xs = (np.random.rand(*batch_xs.shape) < batch_xs).astype(float)
lHat, grad_variance, step, temperature = sbn.partial_fit(batch_xs,
sbn.hparams.n_samples)
if debug:
print(i, lHat)
if i > 100:
return
lHats.append(lHat)
grad_variances.append(grad_variance)
temperatures.append(temperature)
i += batch_size
grad_variances = np.log(np.mean(grad_variances, axis=0)).tolist()
summary_strings = []
if isinstance(grad_variances, list):
grad_variances = dict(zip([k for (k, v) in sbn.losses], map(float, grad_variances)))
rowkey['step'] = step
logger.log(rowkey, {'step': step,
'train': np.mean(lHats, axis=0)[0],
'grad_variances': grad_variances,
'temperature': np.mean(temperatures), })
grad_variances = '\n'.join(map(str, sorted(grad_variances.iteritems())))
else:
rowkey['step'] = step
logger.log(rowkey, {'step': step,
'train': np.mean(lHats, axis=0)[0],
'grad_variance': grad_variances,
'temperature': np.mean(temperatures), })
summary_strings.append(manual_scalar_summary("log grad variance", grad_variances))
print('Step %d: %s\n%s' % (step, str(np.mean(lHats, axis=0)), str(grad_variances)))
# Every few epochs compute test and validation scores
epoch = int(step / (train_xs.shape[0] / sbn.hparams.batch_size))
if epoch % FLAGS.eval_freq == 0:
valid_res = eval(sbn, valid_xs)
test_res= eval(sbn, test_xs)
print('\nValid %d: %s' % (step, str(valid_res)))
print('Test %d: %s\n' % (step, str(test_res)))
logger.log(rowkey, {'step': step,
'valid': valid_res[0],
'test': test_res[0]})
logger.flush() # Flush infrequently
# Create summaries
summary_strings.extend([
manual_scalar_summary("Train ELBO", np.mean(lHats, axis=0)[0]),
manual_scalar_summary("Temperature", np.mean(temperatures)),
])
for summ_str in summary_strings:
summary_writer.add_summary(summ_str, global_step=step)
summary_writer.flush()
sys.stdout.flush()
scores.append(np.mean(lHats, axis=0))
if step > training_steps:
break
return scores
def main():
# Parse hyperparams
hparams = rebar.default_hparams
hparams.parse(FLAGS.hparams)
print(hparams.values())
train_xs, valid_xs, test_xs = datasets.load_data(hparams)
mean_xs = np.mean(train_xs, axis=0) # Compute mean centering on training
training_steps = 2000000
model = getattr(rebar, hparams.model)
sbn = model(hparams, mean_xs=mean_xs)
scores = train(sbn, train_xs, valid_xs, test_xs,
training_steps=training_steps, debug=False)
if __name__ == '__main__':
main()
|
barachka/odoo | refs/heads/master | addons/sale/wizard/sale_line_invoice.py | 61 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import workflow
class sale_order_line_make_invoice(osv.osv_memory):
_name = "sale.order.line.make.invoice"
_description = "Sale OrderLine Make_invoice"
def make_invoices(self, cr, uid, ids, context=None):
"""
To make invoices.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None: context = {}
res = False
invoices = {}
#TODO: merge with sale.py/make_invoice
def make_invoice(order, lines):
"""
To make invoices.
@param order:
@param lines:
@return:
"""
a = order.partner_id.property_account_receivable.id
if order.partner_id and order.partner_id.property_payment_term.id:
pay_term = order.partner_id.property_payment_term.id
else:
pay_term = False
inv = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': "P%dSO%d" % (order.partner_id.id, order.id),
'account_id': a,
'partner_id': order.partner_invoice_id.id,
'invoice_line': [(6, 0, lines)],
'currency_id' : order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': pay_term,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'user_id': order.user_id and order.user_id.id or False,
'company_id': order.company_id and order.company_id.id or False,
'date_invoice': fields.date.today(),
}
inv_id = self.pool.get('account.invoice').create(cr, uid, inv)
return inv_id
sales_order_line_obj = self.pool.get('sale.order.line')
sales_order_obj = self.pool.get('sale.order')
for line in sales_order_line_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.order_id in invoices:
invoices[line.order_id] = []
line_id = sales_order_line_obj.invoice_line_create(cr, uid, [line.id])
for lid in line_id:
invoices[line.order_id].append(lid)
for order, il in invoices.items():
res = make_invoice(order, il)
cr.execute('INSERT INTO sale_order_invoice_rel \
(order_id,invoice_id) values (%s,%s)', (order.id, res))
sales_order_obj.invalidate_cache(cr, uid, ['invoice_ids'], [order.id], context=context)
flag = True
sales_order_obj.message_post(cr, uid, [order.id], body=_("Invoice created"), context=context)
data_sale = sales_order_obj.browse(cr, uid, order.id, context=context)
for line in data_sale.order_line:
if not line.invoiced:
flag = False
break
if flag:
workflow.trg_validate(uid, 'sale.order', order.id, 'manual_invoice', cr)
if not invoices:
raise osv.except_osv(_('Warning!'), _('Invoice cannot be created for this Sales Order Line due to one of the following reasons:\n1.The state of this sales order line is either "draft" or "cancel"!\n2.The Sales Order Line is Invoiced!'))
if context.get('open_invoices', False):
return self.open_invoices(cr, uid, ids, res, context=context)
return {'type': 'ir.actions.act_window_close'}
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids,
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': {'type': 'out_invoice'},
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
woltage/ansible | refs/heads/devel | lib/ansible/executor/connection_info.py | 4 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pipes
import random
import re
from ansible import constants as C
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.errors import AnsibleError
__all__ = ['ConnectionInformation']
SU_PROMPT_LOCALIZATIONS = [
'Password',
'암호',
'パスワード',
'Adgangskode',
'Contraseña',
'Contrasenya',
'Hasło',
'Heslo',
'Jelszó',
'Lösenord',
'Mật khẩu',
'Mot de passe',
'Parola',
'Parool',
'Pasahitza',
'Passord',
'Passwort',
'Salasana',
'Sandi',
'Senha',
'Wachtwoord',
'ססמה',
'Лозинка',
'Парола',
'Пароль',
'गुप्तशब्द',
'शब्दकूट',
'సంకేతపదము',
'හස්පදය',
'密码',
'密碼',
]
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the ConnectionInformation
# object. The dictionary values are tuples, to account for aliases
# in variable names.
MAGIC_VARIABLE_MAPPING = dict(
connection = ('ansible_connection',),
remote_addr = ('ansible_ssh_host', 'ansible_host'),
remote_user = ('ansible_ssh_user', 'ansible_user'),
port = ('ansible_ssh_port', 'ansible_port'),
password = ('ansible_ssh_pass', 'ansible_password'),
private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'),
shell = ('ansible_shell_type',),
become = ('ansible_become',),
become_method = ('ansible_become_method',),
become_user = ('ansible_become_user',),
become_pass = ('ansible_become_password','ansible_become_pass'),
become_exe = ('ansible_become_exe',),
become_flags = ('ansible_become_flags',),
sudo = ('ansible_sudo',),
sudo_user = ('ansible_sudo_user',),
sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe = ('ansible_sudo_exe',),
sudo_flags = ('ansible_sudo_flags',),
su = ('ansible_su',),
su_user = ('ansible_su_user',),
su_pass = ('ansible_su_password', 'ansible_su_pass'),
su_exe = ('ansible_su_exe',),
su_flags = ('ansible_su_flags',),
)
SU_PROMPT_LOCALIZATIONS = [
'Password',
'암호',
'パスワード',
'Adgangskode',
'Contraseña',
'Contrasenya',
'Hasło',
'Heslo',
'Jelszó',
'Lösenord',
'Mật khẩu',
'Mot de passe',
'Parola',
'Parool',
'Pasahitza',
'Passord',
'Passwort',
'Salasana',
'Sandi',
'Senha',
'Wachtwoord',
'ססמה',
'Лозинка',
'Парола',
'Пароль',
'गुप्तशब्द',
'शब्दकूट',
'సంకేతపదము',
'හස්පදය',
'密码',
'密碼',
]
class ConnectionInformation:
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
def __init__(self, play=None, options=None, passwords=None):
if passwords is None:
passwords = {}
# connection
self.connection = None
self.remote_addr = None
self.remote_user = None
self.password = passwords.get('conn_pass','')
self.port = None
self.private_key_file = C.DEFAULT_PRIVATE_KEY_FILE
self.timeout = C.DEFAULT_TIMEOUT
self.shell = None
# privilege escalation
self.become = None
self.become_method = None
self.become_user = None
self.become_pass = passwords.get('become_pass','')
self.become_exe = None
self.become_flags = None
# backwards compat
self.sudo_exe = None
self.sudo_flags = None
self.sudo_pass = None
self.su_exe = None
self.su_flags = None
self.su_pass = None
# general flags (should we move out?)
self.verbosity = 0
self.only_tags = set()
self.skip_tags = set()
self.no_log = False
self.check_mode = False
#TODO: just pull options setup to above?
# set options before play to allow play to override them
if options:
self.set_options(options)
if play:
self.set_play(play)
def set_play(self, play):
'''
Configures this connection information instance with data from
the play class.
'''
if play.connection:
self.connection = play.connection
if play.remote_user:
self.remote_user = play.remote_user
if play.port:
self.port = int(play.port)
if play.become is not None:
self.become = play.become
if play.become_method:
self.become_method = play.become_method
if play.become_user:
self.become_user = play.become_user
# non connection related
self.no_log = play.no_log
self.environment = play.environment
def set_options(self, options):
'''
Configures this connection information instance with data from
options specified by the user on the command line. These have a
higher precedence than those set on the play or host.
'''
if options.connection:
self.connection = options.connection
self.remote_user = options.remote_user
self.private_key_file = options.private_key_file
# privilege escalation
self.become = options.become
self.become_method = options.become_method
self.become_user = options.become_user
# general flags (should we move out?)
if options.verbosity:
self.verbosity = options.verbosity
#if options.no_log:
# self.no_log = boolean(options.no_log)
if options.check:
self.check_mode = boolean(options.check)
# get the tag info from options, converting a comma-separated list
# of values into a proper list if need be. We check to see if the
# options have the attribute, as it is not always added via the CLI
if hasattr(options, 'tags'):
if isinstance(options.tags, list):
self.only_tags.update(options.tags)
elif isinstance(options.tags, basestring):
self.only_tags.update(options.tags.split(','))
if len(self.only_tags) == 0:
self.only_tags = set(['all'])
if hasattr(options, 'skip_tags'):
if isinstance(options.skip_tags, list):
self.skip_tags.update(options.skip_tags)
elif isinstance(options.skip_tags, basestring):
self.skip_tags.update(options.skip_tags.split(','))
def copy(self, ci):
'''
Copies the connection info from another connection info object, used
when merging in data from task overrides.
'''
for field in self._get_fields():
value = getattr(ci, field, None)
if isinstance(value, dict):
setattr(self, field, value.copy())
elif isinstance(value, set):
setattr(self, field, value.copy())
elif isinstance(value, list):
setattr(self, field, value[:])
else:
setattr(self, field, value)
def set_task_and_host_override(self, task, host):
'''
Sets attributes from the task if they are set, which will override
those from the play.
'''
new_info = ConnectionInformation()
new_info.copy(self)
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'):
if hasattr(task, attr):
attr_val = getattr(task, attr)
if attr_val is not None:
setattr(new_info, attr, attr_val)
# finally, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from inventory
variables = host.get_vars()
for (attr, variable_names) in MAGIC_VARIABLE_MAPPING.iteritems():
for variable_name in variable_names:
if variable_name in variables:
setattr(new_info, attr, variables[variable_name])
# become legacy updates
if not new_info.become_pass:
if new_info.become_method == 'sudo' and new_info.sudo_pass:
setattr(new_info, 'become_pass', new_info.sudo_pass)
elif new_info.become_method == 'su' and new_info.su_pass:
setattr(new_info, 'become_pass', new_info.su_pass)
return new_info
def make_become_cmd(self, cmd, executable=C.DEFAULT_EXECUTABLE):
""" helper function to create privilege escalation commands """
prompt = None
success_key = None
if self.become:
becomecmd = None
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
executable = executable or '$SHELL'
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
if self.become_method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
# string to the user's shell. We loop reading output until we see the randomly-generated
# sudo prompt set with the -p option.
prompt = '[sudo via ansible, key=%s] password: ' % randbits
exe = self.become_exe or self.sudo_exe or 'sudo'
flags = self.become_flags or self.sudo_flags or ''
becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
(exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd)
elif self.become_method == 'su':
def detect_su_prompt(data):
SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
prompt = detect_su_prompt
exe = self.become_exe or self.su_exe or 'su'
flags = self.become_flags or self.su_flags or ''
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
elif self.become_method == 'pbrun':
prompt='assword:'
exe = self.become_exe or 'pbrun'
flags = self.become_flags or ''
becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'pfexec':
exe = self.become_exe or 'pfexec'
flags = self.become_flags or ''
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key)
return (cmd, prompt, success_key)
def _get_fields(self):
return [i for i in self.__dict__.keys() if i[:1] != '_']
def post_validate(self, templar):
'''
Finalizes templated values which may be set on this objects fields.
'''
for field in self._get_fields():
value = templar.template(getattr(self, field))
setattr(self, field, value)
def update_vars(self, variables):
'''
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
'''
#FIXME: remove password? possibly add become/sudo settings
for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file']:
if special_var not in variables:
for prop, varnames in MAGIC_VARIABLE_MAPPING.items():
if special_var in varnames:
variables[special_var] = getattr(self, prop)
|
ngoix/OCRF | refs/heads/master | sklearn/utils/tests/test_multiclass.py | 34 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
Chilledheart/googletest | refs/heads/master | googletest/test/gtest_shuffle_test.py | 3023 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
openstack/tempest-lib | refs/heads/master | tempest_lib/cmd/check_uuid.py | 1 | #!/usr/bin/env python
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import ast
import importlib
import inspect
import os
import sys
import unittest
import uuid
import six.moves.urllib.parse as urlparse
DECORATOR_MODULE = 'test'
DECORATOR_NAME = 'idempotent_id'
DECORATOR_IMPORT = 'tempest.%s' % DECORATOR_MODULE
IMPORT_LINE = 'from tempest import %s' % DECORATOR_MODULE
DECORATOR_TEMPLATE = "@%s.%s('%%s')" % (DECORATOR_MODULE,
DECORATOR_NAME)
UNIT_TESTS_EXCLUDE = 'tempest.tests'
class SourcePatcher(object):
""""Lazy patcher for python source files"""
def __init__(self):
self.source_files = None
self.patches = None
self.clear()
def clear(self):
"""Clear inner state"""
self.source_files = {}
self.patches = {}
@staticmethod
def _quote(s):
return urlparse.quote(s)
@staticmethod
def _unquote(s):
return urlparse.unquote(s)
def add_patch(self, filename, patch, line_no):
"""Add lazy patch"""
if filename not in self.source_files:
with open(filename) as f:
self.source_files[filename] = self._quote(f.read())
patch_id = str(uuid.uuid4())
if not patch.endswith('\n'):
patch += '\n'
self.patches[patch_id] = self._quote(patch)
lines = self.source_files[filename].split(self._quote('\n'))
lines[line_no - 1] = ''.join(('{%s:s}' % patch_id, lines[line_no - 1]))
self.source_files[filename] = self._quote('\n').join(lines)
def _save_changes(self, filename, source):
print('%s fixed' % filename)
with open(filename, 'w') as f:
f.write(source)
def apply_patches(self):
"""Apply all patches"""
for filename in self.source_files:
patched_source = self._unquote(
self.source_files[filename].format(**self.patches)
)
self._save_changes(filename, patched_source)
self.clear()
class TestChecker(object):
def __init__(self, package):
self.package = package
self.base_path = os.path.abspath(os.path.dirname(package.__file__))
def _path_to_package(self, path):
relative_path = path[len(self.base_path) + 1:]
if relative_path:
return '.'.join((self.package.__name__,) +
tuple(relative_path.split('/')))
else:
return self.package.__name__
def _modules_search(self):
"""Recursive search for python modules in base package"""
modules = []
for root, dirs, files in os.walk(self.base_path):
if not os.path.exists(os.path.join(root, '__init__.py')):
continue
root_package = self._path_to_package(root)
for item in files:
if item.endswith('.py'):
module_name = '.'.join((root_package,
os.path.splitext(item)[0]))
if not module_name.startswith(UNIT_TESTS_EXCLUDE):
modules.append(module_name)
return modules
@staticmethod
def _get_idempotent_id(test_node):
"""Return key-value dict with all metadata from @test.idempotent_id"""
idempotent_id = None
for decorator in test_node.decorator_list:
if (hasattr(decorator, 'func') and
hasattr(decorator.func, 'attr') and
decorator.func.attr == DECORATOR_NAME and
hasattr(decorator.func, 'value') and
decorator.func.value.id == DECORATOR_MODULE):
for arg in decorator.args:
idempotent_id = ast.literal_eval(arg)
return idempotent_id
@staticmethod
def _is_decorator(line):
return line.strip().startswith('@')
@staticmethod
def _is_def(line):
return line.strip().startswith('def ')
def _add_uuid_to_test(self, patcher, test_node, source_path):
with open(source_path) as src:
src_lines = src.read().split('\n')
lineno = test_node.lineno
insert_position = lineno
while True:
if (self._is_def(src_lines[lineno - 1]) or
(self._is_decorator(src_lines[lineno - 1]) and
(DECORATOR_TEMPLATE.split('(')[0] <=
src_lines[lineno - 1].strip().split('(')[0]))):
insert_position = lineno
break
lineno += 1
patcher.add_patch(
source_path,
' ' * test_node.col_offset + DECORATOR_TEMPLATE % uuid.uuid4(),
insert_position
)
@staticmethod
def _is_test_case(module, node):
if (node.__class__ is ast.ClassDef and
hasattr(module, node.name) and
inspect.isclass(getattr(module, node.name))):
return issubclass(getattr(module, node.name), unittest.TestCase)
@staticmethod
def _is_test_method(node):
return (node.__class__ is ast.FunctionDef
and node.name.startswith('test_'))
@staticmethod
def _next_node(body, node):
if body.index(node) < len(body):
return body[body.index(node) + 1]
@staticmethod
def _import_name(node):
if type(node) == ast.Import:
return node.names[0].name
elif type(node) == ast.ImportFrom:
return '%s.%s' % (node.module, node.names[0].name)
def _add_import_for_test_uuid(self, patcher, src_parsed, source_path):
with open(source_path) as f:
src_lines = f.read().split('\n')
line_no = 0
tempest_imports = [node for node in src_parsed.body
if self._import_name(node) and
'tempest.' in self._import_name(node)]
if not tempest_imports:
import_snippet = '\n'.join(('', IMPORT_LINE, ''))
else:
for node in tempest_imports:
if self._import_name(node) < DECORATOR_IMPORT:
continue
else:
line_no = node.lineno
import_snippet = IMPORT_LINE
break
else:
line_no = tempest_imports[-1].lineno
while True:
if (not src_lines[line_no - 1] or
getattr(self._next_node(src_parsed.body,
tempest_imports[-1]),
'lineno') == line_no or
line_no == len(src_lines)):
break
line_no += 1
import_snippet = '\n'.join((IMPORT_LINE, ''))
patcher.add_patch(source_path, import_snippet, line_no)
def get_tests(self):
"""Get test methods with sources from base package with metadata"""
tests = {}
for module_name in self._modules_search():
tests[module_name] = {}
module = importlib.import_module(module_name)
source_path = '.'.join(
(os.path.splitext(module.__file__)[0], 'py')
)
with open(source_path, 'r') as f:
source = f.read()
tests[module_name]['source_path'] = source_path
tests[module_name]['tests'] = {}
source_parsed = ast.parse(source)
tests[module_name]['ast'] = source_parsed
tests[module_name]['import_valid'] = (
hasattr(module, DECORATOR_MODULE) and
inspect.ismodule(getattr(module, DECORATOR_MODULE))
)
test_cases = (node for node in source_parsed.body
if self._is_test_case(module, node))
for node in test_cases:
for subnode in filter(self._is_test_method, node.body):
test_name = '%s.%s' % (node.name, subnode.name)
tests[module_name]['tests'][test_name] = subnode
return tests
@staticmethod
def _filter_tests(function, tests):
"""Filter tests with condition 'function(test_node) == True'"""
result = {}
for module_name in tests:
for test_name in tests[module_name]['tests']:
if function(module_name, test_name, tests):
if module_name not in result:
result[module_name] = {
'ast': tests[module_name]['ast'],
'source_path': tests[module_name]['source_path'],
'import_valid': tests[module_name]['import_valid'],
'tests': {}
}
result[module_name]['tests'][test_name] = \
tests[module_name]['tests'][test_name]
return result
def find_untagged(self, tests):
"""Filter all tests without uuid in metadata"""
def check_uuid_in_meta(module_name, test_name, tests):
idempotent_id = self._get_idempotent_id(
tests[module_name]['tests'][test_name])
return not idempotent_id
return self._filter_tests(check_uuid_in_meta, tests)
def report_collisions(self, tests):
"""Reports collisions if there are any
Returns true if collisions exist.
"""
uuids = {}
def report(module_name, test_name, tests):
test_uuid = self._get_idempotent_id(
tests[module_name]['tests'][test_name])
if not test_uuid:
return
if test_uuid in uuids:
error_str = "%s:%s\n uuid %s collision: %s<->%s\n%s:%s" % (
tests[module_name]['source_path'],
tests[module_name]['tests'][test_name].lineno,
test_uuid,
test_name,
uuids[test_uuid]['test_name'],
uuids[test_uuid]['source_path'],
uuids[test_uuid]['test_node'].lineno,
)
print(error_str)
print("cannot automatically resolve the collision, please "
"manually remove the duplicate value on the new test.")
return True
else:
uuids[test_uuid] = {
'module': module_name,
'test_name': test_name,
'test_node': tests[module_name]['tests'][test_name],
'source_path': tests[module_name]['source_path']
}
return bool(self._filter_tests(report, tests))
def report_untagged(self, tests):
"""Reports untagged tests if there are any
Returns true if untagged tests exist.
"""
def report(module_name, test_name, tests):
error_str = "%s:%s\nmissing @test.idempotent_id('...')\n%s\n" % (
tests[module_name]['source_path'],
tests[module_name]['tests'][test_name].lineno,
test_name
)
print(error_str)
return True
return bool(self._filter_tests(report, tests))
def fix_tests(self, tests):
"""Add uuids to all specified in tests and fix it in source files"""
patcher = SourcePatcher()
for module_name in tests:
add_import_once = True
for test_name in tests[module_name]['tests']:
if not tests[module_name]['import_valid'] and add_import_once:
self._add_import_for_test_uuid(
patcher,
tests[module_name]['ast'],
tests[module_name]['source_path']
)
add_import_once = False
self._add_uuid_to_test(
patcher, tests[module_name]['tests'][test_name],
tests[module_name]['source_path'])
patcher.apply_patches()
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--package', action='store', dest='package',
default='tempest', type=str,
help='Package with tests')
parser.add_argument('--fix', action='store_true', dest='fix_tests',
help='Attempt to fix tests without UUIDs')
args = parser.parse_args()
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
pkg = importlib.import_module(args.package)
checker = TestChecker(pkg)
errors = False
tests = checker.get_tests()
untagged = checker.find_untagged(tests)
errors = checker.report_collisions(tests) or errors
if args.fix_tests and untagged:
checker.fix_tests(untagged)
else:
errors = checker.report_untagged(untagged) or errors
if errors:
sys.exit("@test.idempotent_id existence and uniqueness checks failed\n"
"Run 'tox -v -euuidgen' to automatically fix tests with\n"
"missing @test.idempotent_id decorators.")
if __name__ == '__main__':
run()
|
mikedchavez1010/XX-Net | refs/heads/master | python27/1.0/lib/logging/config.py | 61 | # Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import cStringIO
import errno
import io
import logging
import logging.handlers
import os
import re
import socket
import struct
import sys
import traceback
import types
try:
import thread
import threading
except ImportError:
thread = None
from SocketServer import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
RESET_ERROR = errno.ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import ConfigParser
cp = ConfigParser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.readfp(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _encoded(s):
return s if isinstance(s, str) else s.encode('utf-8')
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp.get("formatters", "keys")
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
opts = cp.options(sectname)
if "format" in opts:
fs = cp.get(sectname, "format", 1)
else:
fs = None
if "datefmt" in opts:
dfs = cp.get(sectname, "datefmt", 1)
else:
dfs = None
c = logging.Formatter
if "class" in opts:
class_name = cp.get(sectname, "class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp.get("handlers", "keys")
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
sectname = "handler_%s" % hand
klass = cp.get(sectname, "class")
opts = cp.options(sectname)
if "formatter" in opts:
fmt = cp.get(sectname, "formatter")
else:
fmt = ""
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = cp.get(sectname, "args")
args = eval(args, vars(logging))
h = klass(*args)
if "level" in opts:
level = cp.get(sectname, "level")
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
if "target" in opts:
target = cp.get(sectname,"target")
else:
target = ""
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _install_loggers(cp, handlers, disable_existing_loggers):
"""Create and install loggers"""
# configure the root first
llist = cp.get("loggers", "keys")
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
sectname = "logger_root"
root = logging.root
log = root
opts = cp.options(sectname)
if "level" in opts:
level = cp.get(sectname, "level")
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
sectname = "logger_%s" % log
qn = cp.get(sectname, "qualname")
opts = cp.options(sectname)
if "propagate" in opts:
propagate = cp.getint(sectname, "propagate")
else:
propagate = 1
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in opts:
level = cp.get(sectname, "level")
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = 1
else:
logger.disabled = disable_existing_loggers
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
class ConvertingMixin(object):
"""For ConvertingXXX's, this mixin class provides common functions"""
def convert_with_key(self, key, value, replace=True):
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
if replace:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def convert(self, value):
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict, ConvertingMixin):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
return self.convert_with_key(key, value)
def get(self, key, default=None):
value = dict.get(self, key, default)
return self.convert_with_key(key, value)
def pop(self, key, default=None):
value = dict.pop(self, key, default)
return self.convert_with_key(key, value, replace=False)
class ConvertingList(list, ConvertingMixin):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
return self.convert_with_key(key, value)
def pop(self, idx=-1):
value = list.pop(self, idx)
return self.convert(value)
class ConvertingTuple(tuple, ConvertingMixin):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
# Can't replace a tuple entry.
return self.convert_with_key(key, value, replace=False)
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
# Issue 12718: winpdb replaces __import__ with a Python function, which
# ends up being treated as a bound method. To avoid problems, we
# set the importer on the instance, but leave it defined in the class
# so existing code doesn't break
if type(__import__) == types.FunctionType:
self.importer = __import__
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
name = _encoded(name)
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config['class'] = cname # restore for deferred configuration
raise StandardError('target not configured yet')
config['target'] = th
except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread:
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
import tempfile
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = cStringIO.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if e.errno != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
xinwu/horizon | refs/heads/master | openstack_dashboard/dashboards/project/stacks/resource_types/tables.py | 59 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tables
class ResourceTypesTable(tables.DataTable):
class ResourceColumn(tables.Column):
def get_raw_data(self, datum):
attr_list = ['implementation', 'component', 'resource']
info_list = datum.resource_type.split('::')
info_list[0] = info_list[0].replace("OS", "OpenStack")
if info_list[0] == "AWS":
info_list[0] = _("AWS compatible")
info_dict = dict(zip(attr_list, info_list))
return info_dict[self.transform]
name = tables.Column("resource_type",
verbose_name=_("Type"),
link="horizon:project:stacks.resource_types:details",)
implementation = ResourceColumn("implementation",
verbose_name=_("Implementation"),)
component = ResourceColumn("component",
verbose_name=_("Component"),)
resource = ResourceColumn("resource",
verbose_name=_("Resource"),)
def get_object_id(self, resource):
return resource.resource_type
class Meta(object):
name = "resource_types"
verbose_name = _("Resource Types")
|
KellyChan/Python | refs/heads/master | python/django/elf/elf/src/dream/models.py | 3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
class Dream(models.Model):
user = models.ForeignKey(User)
date = models.DateField(unique=True)
title = models.CharField(max_length=100)
content = models.TextField()
feedback = models.TextField(null=True, blank=True)
pubtime = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'%d %s %s %s %s %s %s' \
% (self.id, self.user, self.date, self.title, self.content, self.feedback, self.pubtime)
class Meta:
ordering = ['date', 'pubtime']
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/ship/shared_bwing.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_bwing.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
jamesliu/mxnet | refs/heads/master | python/mxnet/_cy2/__init__.py | 50 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for cython generated modules for python2"""
|
irin4eto/Bar-Management | refs/heads/master | manager/migrations/0001_initial.py | 4 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Manager'
db.create_table('manager_manager', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('manager', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('manager', ['Manager'])
def backwards(self, orm):
# Deleting model 'Manager'
db.delete_table('manager_manager')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'manager.manager': {
'Meta': {'object_name': 'Manager'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['manager'] |
RCoon/CodingBat | refs/heads/master | Python/Logic_2/close_far.py | 1 | # Given three ints, a b c, return True if one of b or c is "close" (differing
# from a by at most 1), while the other is "far", differing from both other
# values by 2 or more. Note: abs(num) computes the absolute value of a number.
# close_far(1, 2, 10) --> True
# close_far(1, 2, 3) --> False
# close_far(4, 1, 3) --> True
def close_far(a, b, c):
a_b_diff = abs(a - b)
a_c_diff = abs(a - c)
b_c_diff = abs(b - c)
return (a_b_diff <= 1 and (b_c_diff >= 2 and a_c_diff >= 2) or
a_c_diff <= 1 and (b_c_diff >= 2 and a_b_diff >= 2))
print(close_far(1, 2, 10))
print(close_far(1, 2, 3))
print(close_far(4, 1, 3))
|
MQQiang/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/turtledemo/clock.py | 99 | #!/usr/bin/env python3
# -*- coding: cp1252 -*-
""" turtle-example-suite:
tdemo_clock.py
Enhanced clock-program, showing date
and time
------------------------------------
Press STOP to exit the program!
------------------------------------
"""
from turtle import *
from datetime import datetime
def jump(distanz, winkel=0):
penup()
right(winkel)
forward(distanz)
left(winkel)
pendown()
def hand(laenge, spitze):
fd(laenge*1.15)
rt(90)
fd(spitze/2.0)
lt(120)
fd(spitze)
lt(120)
fd(spitze)
lt(120)
fd(spitze/2.0)
def make_hand_shape(name, laenge, spitze):
reset()
jump(-laenge*0.15)
begin_poly()
hand(laenge, spitze)
end_poly()
hand_form = get_poly()
register_shape(name, hand_form)
def clockface(radius):
reset()
pensize(7)
for i in range(60):
jump(radius)
if i % 5 == 0:
fd(25)
jump(-radius-25)
else:
dot(3)
jump(-radius)
rt(6)
def setup():
global second_hand, minute_hand, hour_hand, writer
mode("logo")
make_hand_shape("second_hand", 125, 25)
make_hand_shape("minute_hand", 130, 25)
make_hand_shape("hour_hand", 90, 25)
clockface(160)
second_hand = Turtle()
second_hand.shape("second_hand")
second_hand.color("gray20", "gray80")
minute_hand = Turtle()
minute_hand.shape("minute_hand")
minute_hand.color("blue1", "red1")
hour_hand = Turtle()
hour_hand.shape("hour_hand")
hour_hand.color("blue3", "red3")
for hand in second_hand, minute_hand, hour_hand:
hand.resizemode("user")
hand.shapesize(1, 1, 3)
hand.speed(0)
ht()
writer = Turtle()
#writer.mode("logo")
writer.ht()
writer.pu()
writer.bk(85)
def wochentag(t):
wochentag = ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"]
return wochentag[t.weekday()]
def datum(z):
monat = ["Jan.", "Feb.", "Mar.", "Apr.", "May", "June",
"July", "Aug.", "Sep.", "Oct.", "Nov.", "Dec."]
j = z.year
m = monat[z.month - 1]
t = z.day
return "%s %d %d" % (m, t, j)
def tick():
t = datetime.today()
sekunde = t.second + t.microsecond*0.000001
minute = t.minute + sekunde/60.0
stunde = t.hour + minute/60.0
try:
tracer(False) # Terminator can occur here
writer.clear()
writer.home()
writer.forward(65)
writer.write(wochentag(t),
align="center", font=("Courier", 14, "bold"))
writer.back(150)
writer.write(datum(t),
align="center", font=("Courier", 14, "bold"))
writer.forward(85)
tracer(True)
second_hand.setheading(6*sekunde) # or here
minute_hand.setheading(6*minute)
hour_hand.setheading(30*stunde)
tracer(True)
ontimer(tick, 100)
except Terminator:
pass # turtledemo user pressed STOP
def main():
tracer(False)
setup()
tracer(True)
tick()
return "EVENTLOOP"
if __name__ == "__main__":
mode("logo")
msg = main()
print(msg)
mainloop()
|
pixelrebel/st2 | refs/heads/master | st2common/tests/unit/test_logging.py | 4 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.logging.misc import get_logger_name_for_module
from st2reactor.cmd import sensormanager
import python_runner
from st2common import runners
__all__ = [
'LoggingMiscUtilsTestCase'
]
class LoggingMiscUtilsTestCase(unittest2.TestCase):
def test_get_logger_name_for_module(self):
logger_name = get_logger_name_for_module(sensormanager)
self.assertEqual(logger_name, 'st2reactor.cmd.sensormanager')
logger_name = get_logger_name_for_module(python_runner)
self.assertTrue(logger_name.endswith('contrib.runners.python_runner.python_runner'))
logger_name = get_logger_name_for_module(runners)
self.assertEqual(logger_name, 'st2common.runners.__init__')
|
tlatzko/spmcluster | refs/heads/master | .tox/2.6-nocov/lib/python2.6/site-packages/pip/req/req_file.py | 239 | """
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils import normalize_name
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.allow_all_external:
finder.allow_all_external = opts.allow_all_external
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.allow_external:
finder.allow_external |= set(
[normalize_name(v).lower() for v in opts.allow_external])
if opts.allow_unverified:
# Remove after 7.0
finder.allow_unverified |= set(
[normalize_name(v).lower() for v in opts.allow_unverified])
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
|
gentoo/gentoo-keys | refs/heads/master | gkeys-ldap/gkeyldap/__init__.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__version__ = '0.2'
__license__ = 'GPLv2'
|
lzambella/Qyoutube-dl | refs/heads/master | youtube_dl/extractor/revision3.py | 8 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_iso8601,
unescapeHTML,
qualities,
)
class Revision3IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|testtube|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)'
_TESTS = [{
'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016',
'md5': 'd94a72d85d0a829766de4deb8daaf7df',
'info_dict': {
'id': '73034',
'display_id': 'technobuffalo/5-google-predictions-for-2016',
'ext': 'webm',
'title': '5 Google Predictions for 2016',
'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.',
'upload_date': '20151228',
'timestamp': 1451325600,
'duration': 187,
'uploader': 'TechnoBuffalo',
'uploader_id': 'technobuffalo',
}
}, {
'url': 'http://testtube.com/brainstuff',
'info_dict': {
'id': '251',
'title': 'BrainStuff',
'description': 'Whether the topic is popcorn or particle physics, you can count on the HowStuffWorks team to explore-and explain-the everyday science in the world around us on BrainStuff.',
},
'playlist_mincount': 93,
}, {
'url': 'https://testtube.com/dnews/5-weird-ways-plants-can-eat-animals?utm_source=FB&utm_medium=DNews&utm_campaign=DNewsSocial',
'info_dict': {
'id': '60163',
'display_id': 'dnews/5-weird-ways-plants-can-eat-animals',
'duration': 275,
'ext': 'webm',
'title': '5 Weird Ways Plants Can Eat Animals',
'description': 'Why have some plants evolved to eat meat?',
'upload_date': '20150120',
'timestamp': 1421763300,
'uploader': 'DNews',
'uploader_id': 'dnews',
},
}]
_PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s'
_API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62'
def _real_extract(self, url):
domain, display_id = re.match(self._VALID_URL, url).groups()
page_info = self._download_json(
self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id)
if page_info['data']['type'] == 'episode':
episode_data = page_info['data']
video_id = compat_str(episode_data['video']['data']['id'])
video_data = self._download_json(
'http://revision3.com/api/getPlaylist.json?api_key=%s&codecs=h264,vp8,theora&video_id=%s' % (self._API_KEY, video_id),
video_id)['items'][0]
formats = []
for vcodec, media in video_data['media'].items():
for quality_id, quality in media.items():
if quality_id == 'hls':
formats.extend(self._extract_m3u8_formats(
quality['url'], video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
else:
formats.append({
'url': quality['url'],
'format_id': '%s-%s' % (vcodec, quality_id),
'tbr': int_or_none(quality.get('bitrate')),
'vcodec': vcodec,
})
self._sort_formats(formats)
preference = qualities(['mini', 'small', 'medium', 'large'])
thumbnails = [{
'url': image_url,
'id': image_id,
'preference': preference(image_id)
} for image_id, image_url in video_data.get('images', {}).items()]
return {
'id': video_id,
'display_id': display_id,
'title': unescapeHTML(video_data['title']),
'description': unescapeHTML(video_data.get('summary')),
'timestamp': parse_iso8601(episode_data.get('publishTime'), ' '),
'author': episode_data.get('author'),
'uploader': video_data.get('show', {}).get('name'),
'uploader_id': video_data.get('show', {}).get('slug'),
'duration': int_or_none(video_data.get('duration')),
'thumbnails': thumbnails,
'formats': formats,
}
else:
show_data = page_info['show']['data']
episodes_data = page_info['episodes']['data']
num_episodes = page_info['meta']['totalEpisodes']
processed_episodes = 0
entries = []
page_num = 1
while True:
entries.extend([self.url_result(
'http://%s/%s/%s' % (domain, display_id, episode['slug'])) for episode in episodes_data])
processed_episodes += len(episodes_data)
if processed_episodes == num_episodes:
break
page_num += 1
episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % (
domain, display_id + '/' + compat_str(page_num), domain),
display_id)['episodes']['data']
return self.playlist_result(
entries, compat_str(show_data['id']),
show_data.get('name'), show_data.get('summary'))
|
bilgili/nest-simulator | refs/heads/master | topology/pynest/tests/test_dumping.py | 13 | # -*- coding: utf-8 -*-
#
# test_dumping.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for topology hl_api dumping functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
import sys
import os
import os.path
class PlottingTestCase(unittest.TestCase):
def nest_tmpdir(self):
"""Loads temporary directory path from the environment variable, returns current directory otherwise"""
if 'NEST_DATA_PATH' in os.environ:
return os.environ['NEST_DATA_PATH']
else:
return '.'
def test_DumpNodes(self):
"""Test dumping nodes."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l, os.path.join(self.nest_tmpdir(), 'test_DumpNodes.out.lyr') )
self.assertTrue(True)
def test_DumpNodes2(self):
"""Test dumping nodes, two layers."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l*2, os.path.join(self.nest_tmpdir(), 'test_DumpNodes2.out.lyr') )
self.assertTrue(True)
def test_DumpConns(self):
"""Test dumping connections."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l, 'static_synapse', os.path.join(self.nest_tmpdir(), 'test_DumpConns.out.cnn') )
self.assertTrue(True)
def test_DumpConns2(self):
"""Test dumping connections, 2 layers."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent', 'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l*2, 'static_synapse', os.path.join(self.nest_tmpdir(), 'test_DumpConns2.out.cnn') )
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase,'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
try:
import matplotlib.pyplot as plt
plt.show()
except ImportError:
pass
|
lucidbard/NewsBlur | refs/heads/master | vendor/paypal/standard/conf.py | 31 | from django.conf import settings
class PayPalSettingsError(Exception):
"""Raised when settings be bad."""
RECEIVER_EMAIL = settings.PAYPAL_RECEIVER_EMAIL
# API Endpoints.
POSTBACK_ENDPOINT = "https://www.paypal.com/cgi-bin/webscr"
SANDBOX_POSTBACK_ENDPOINT = "https://www.sandbox.paypal.com/cgi-bin/webscr"
# Images
IMAGE = getattr(settings, "PAYPAL_IMAGE", "http://images.paypal.com/images/x-click-but01.gif")
SUBSCRIPTION_IMAGE = getattr(settings, "PAYPAL_SUBSCRIPTION_IMAGE",
"https://www.paypal.com/en_US/i/btn/btn_subscribeCC_LG.gif")
DONATION_IMAGE = getattr(settings, "PAYPAL_DONATION_IMAGE", "https://www.paypal.com/en_US/i/btn/btn_donateCC_LG.gif")
SANDBOX_IMAGE = getattr(settings, "PAYPAL_SANDBOX_IMAGE",
"https://www.sandbox.paypal.com/en_US/i/btn/btn_buynowCC_LG.gif")
SUBSCRIPTION_SANDBOX_IMAGE = getattr(settings, "PAYPAL_SUBSCRIPTION_SANDBOX_IMAGE",
"https://www.sandbox.paypal.com/en_US/i/btn/btn_subscribeCC_LG.gif")
DONATION_SANDBOX_IMAGE = getattr(settings, "PAYPAL_DONATION_SANDBOX_IMAGE",
"https://www.sandbox.paypal.com/en_US/i/btn/btn_donateCC_LG.gif")
|
zhulin2609/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py | 119 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import datetime
import StringIO
from .bugzilla import Bugzilla, BugzillaQueries, EditUsersParser
from webkitpy.common.config import urls
from webkitpy.common.config.committers import Reviewer, Committer, Contributor, CommitterList
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.web_mock import MockBrowser
from webkitpy.thirdparty.mock import Mock
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BugzillaTest(unittest.TestCase):
_example_attachment = '''
<attachment
isobsolete="1"
ispatch="1"
isprivate="0"
>
<attachid>33721</attachid>
<date>2009-07-29 10:23 PDT</date>
<desc>Fixed whitespace issue</desc>
<filename>patch</filename>
<type>text/plain</type>
<size>9719</size>
<attacher>christian.plesner.hansen@gmail.com</attacher>
<flag name="review"
id="17931"
status="+"
setter="one@test.com"
/>
<flag name="commit-queue"
id="17932"
status="+"
setter="two@test.com"
/>
</attachment>
'''
_expected_example_attachment_parsing = {
'attach_date': datetime.datetime(2009, 07, 29, 10, 23),
'bug_id' : 100,
'is_obsolete' : True,
'is_patch' : True,
'id' : 33721,
'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
'name' : "Fixed whitespace issue",
'type' : "text/plain",
'review' : '+',
'reviewer_email' : 'one@test.com',
'commit-queue' : '+',
'committer_email' : 'two@test.com',
'attacher_email' : 'christian.plesner.hansen@gmail.com',
}
def test_url_creation(self):
# FIXME: These would be all better as doctests
bugs = Bugzilla()
self.assertIsNone(bugs.bug_url_for_bug_id(None))
self.assertIsNone(bugs.short_bug_url_for_bug_id(None))
self.assertIsNone(bugs.attachment_url_for_id(None))
def test_parse_bug_id(self):
# Test that we can parse the urls we produce.
bugs = Bugzilla()
self.assertEqual(12345, urls.parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
_bug_xml = """
<bug>
<bug_id>32585</bug_id>
<creation_ts>2009-12-15 15:17 PST</creation_ts>
<short_desc>bug to test webkit-patch's and commit-queue's failures</short_desc>
<delta_ts>2009-12-27 21:04:50 PST</delta_ts>
<reporter_accessible>1</reporter_accessible>
<cclist_accessible>1</cclist_accessible>
<classification_id>1</classification_id>
<classification>Unclassified</classification>
<product>WebKit</product>
<component>Tools / Tests</component>
<version>528+ (Nightly build)</version>
<rep_platform>PC</rep_platform>
<op_sys>Mac OS X 10.5</op_sys>
<bug_status>NEW</bug_status>
<priority>P2</priority>
<bug_severity>Normal</bug_severity>
<target_milestone>---</target_milestone>
<everconfirmed>1</everconfirmed>
<reporter name="Eric Seidel">eric@webkit.org</reporter>
<assigned_to name="Nobody">webkit-unassigned@lists.webkit.org</assigned_to>
<cc>foo@bar.com</cc>
<cc>example@example.com</cc>
<long_desc isprivate="0">
<who name="Eric Seidel">eric@webkit.org</who>
<bug_when>2009-12-15 15:17:28 PST</bug_when>
<thetext>bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.</thetext>
</long_desc>
<attachment
isobsolete="0"
ispatch="1"
isprivate="0"
>
<attachid>45548</attachid>
<date>2009-12-27 23:51 PST</date>
<desc>Patch</desc>
<filename>bug-32585-20091228005112.patch</filename>
<type>text/plain</type>
<size>10882</size>
<attacher>mjs@apple.com</attacher>
<token>1261988248-dc51409e9c421a4358f365fa8bec8357</token>
<data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09
removed-because-it-was-really-long
ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==
</data>
<flag name="review"
id="27602"
status="?"
setter="mjs@apple.com"
/>
</attachment>
</bug>
"""
_single_bug_xml = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd">
<bugzilla version="3.2.3"
urlbase="https://bugs.webkit.org/"
maintainer="admin@webkit.org"
exporter="eric@webkit.org"
>
%s
</bugzilla>
""" % _bug_xml
_expected_example_bug_parsing = {
"id" : 32585,
"title" : u"bug to test webkit-patch's and commit-queue's failures",
"cc_emails" : ["foo@bar.com", "example@example.com"],
"reporter_email" : "eric@webkit.org",
"assigned_to_email" : "webkit-unassigned@lists.webkit.org",
"bug_status": "NEW",
"attachments" : [{
"attach_date": datetime.datetime(2009, 12, 27, 23, 51),
'name': u'Patch',
'url' : "https://bugs.webkit.org/attachment.cgi?id=45548",
'is_obsolete': False,
'review': '?',
'is_patch': True,
'attacher_email': 'mjs@apple.com',
'bug_id': 32585,
'type': 'text/plain',
'id': 45548
}],
"comments" : [{
'comment_date': datetime.datetime(2009, 12, 15, 15, 17, 28),
'comment_email': 'eric@webkit.org',
'text': """bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.""",
}]
}
# FIXME: This should move to a central location and be shared by more unit tests.
def _assert_dictionaries_equal(self, actual, expected):
# Make sure we aren't parsing more or less than we expect
self.assertItemsEqual(actual.keys(), expected.keys())
for key, expected_value in expected.items():
self.assertEqual(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value)))
def test_parse_bug_dictionary_from_xml(self):
bug = Bugzilla()._parse_bug_dictionary_from_xml(self._single_bug_xml)
self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing)
_sample_multi_bug_xml = """
<bugzilla version="3.2.3" urlbase="https://bugs.webkit.org/" maintainer="admin@webkit.org" exporter="eric@webkit.org">
%s
%s
</bugzilla>
""" % (_bug_xml, _bug_xml)
def test_parse_bugs_from_xml(self):
bugzilla = Bugzilla()
bugs = bugzilla._parse_bugs_from_xml(self._sample_multi_bug_xml)
self.assertEqual(len(bugs), 2)
self.assertEqual(bugs[0].id(), self._expected_example_bug_parsing['id'])
bugs = bugzilla._parse_bugs_from_xml("")
self.assertEqual(len(bugs), 0)
# This could be combined into test_bug_parsing later if desired.
def test_attachment_parsing(self):
bugzilla = Bugzilla()
soup = BeautifulSoup(self._example_attachment)
attachment_element = soup.find("attachment")
attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id'])
self.assertTrue(attachment)
self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing)
_sample_attachment_detail_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>
Attachment 41073 Details for Bug 27314</title>
<link rel="Top" href="https://bugs.webkit.org/">
<link rel="Up" href="show_bug.cgi?id=27314">
"""
def test_attachment_detail_bug_parsing(self):
bugzilla = Bugzilla()
self.assertEqual(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page))
def test_add_cc_to_bug(self):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
expected_logs = "Adding ['adam@example.com'] to the CC list for bug 42\n"
OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["adam@example.com"]], expected_logs=expected_logs)
def _mock_control_item(self, name):
mock_item = Mock()
mock_item.name = name
return mock_item
def _mock_find_control(self, item_names=[], selected_index=0):
mock_control = Mock()
mock_control.items = [self._mock_control_item(name) for name in item_names]
mock_control.value = [item_names[selected_index]] if item_names else None
return lambda name, type: mock_control
def _assert_reopen(self, item_names=None, selected_index=None, extra_logs=None):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
mock_find_control = self._mock_find_control(item_names, selected_index)
bugzilla.browser.find_control = mock_find_control
expected_logs = "Re-opening bug 42\n['comment']\n"
if extra_logs:
expected_logs += extra_logs
OutputCapture().assert_outputs(self, bugzilla.reopen_bug, [42, ["comment"]], expected_logs=expected_logs)
def test_reopen_bug(self):
self._assert_reopen(item_names=["REOPENED", "RESOLVED", "CLOSED"], selected_index=1)
self._assert_reopen(item_names=["UNCONFIRMED", "RESOLVED", "CLOSED"], selected_index=1)
extra_logs = "Did not reopen bug 42, it appears to already be open with status ['NEW'].\n"
self._assert_reopen(item_names=["NEW", "RESOLVED"], selected_index=0, extra_logs=extra_logs)
def test_file_object_for_upload(self):
bugzilla = Bugzilla()
file_object = StringIO.StringIO()
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self.assertEqual(bugzilla._file_object_for_upload(file_object), file_object)
self.assertEqual(bugzilla._file_object_for_upload(utf8_tor).read(), utf8_tor)
self.assertEqual(bugzilla._file_object_for_upload(unicode_tor).read(), utf8_tor)
def test_filename_for_upload(self):
bugzilla = Bugzilla()
mock_file = Mock()
mock_file.name = "foo"
self.assertEqual(bugzilla._filename_for_upload(mock_file, 1234), 'foo')
mock_timestamp = lambda: "now"
filename = bugzilla._filename_for_upload(StringIO.StringIO(), 1234, extension="patch", timestamp=mock_timestamp)
self.assertEqual(filename, "bug-1234-now.patch")
def test_commit_queue_flag(self):
bugzilla = Bugzilla()
bugzilla.committers = CommitterList(reviewers=[Reviewer("WebKit Reviewer", "reviewer@webkit.org")],
committers=[Committer("WebKit Committer", "committer@webkit.org")],
contributors=[Contributor("WebKit Contributor", "contributor@webkit.org")])
def assert_commit_queue_flag(mark_for_landing, mark_for_commit_queue, expected, username=None):
bugzilla.username = username
capture = OutputCapture()
capture.capture_output()
try:
self.assertEqual(bugzilla._commit_queue_flag(mark_for_landing=mark_for_landing, mark_for_commit_queue=mark_for_commit_queue), expected)
finally:
capture.restore_output()
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='unknown@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='unknown@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='unknown@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='unknown@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='contributor@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='contributor@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='?', username='contributor@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='contributor@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='committer@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='committer@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='committer@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='committer@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='reviewer@webkit.org')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='reviewer@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='reviewer@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='reviewer@webkit.org')
def test__check_create_bug_response(self):
bugzilla = Bugzilla()
title_html_bugzilla_323 = "<title>Bug 101640 Submitted</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_323), '101640')
title_html_bugzilla_425 = "<title>Bug 101640 Submitted – Testing webkit-patch again</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_425), '101640')
class BugzillaQueriesTest(unittest.TestCase):
_sample_request_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Request Queue</title>
</head>
<body>
<h3>Flag: review</h3>
<table class="requests" cellspacing="0" cellpadding="4" border="1">
<tr>
<th>Requester</th>
<th>Requestee</th>
<th>Bug</th>
<th>Attachment</th>
<th>Created</th>
</tr>
<tr>
<td>Shinichiro Hamaji <hamaji@chromium.org></td>
<td></td>
<td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td>
<td><a href="attachment.cgi?id=40511&action=review">
40511: Patch v0</a></td>
<td>2009-10-02 04:58 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40722&action=review">
40722: Media controls, the simple approach</a></td>
<td>2009-10-06 09:13 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40723&action=review">
40723: Adjust the media slider thumb size</a></td>
<td>2009-10-06 09:15 PST</td>
</tr>
</table>
</body>
</html>
"""
_sample_quip_page = u"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Bugzilla Quip System</title>
</head>
<body>
<h2>
Existing quips:
</h2>
<ul>
<li>Everything should be made as simple as possible, but not simpler. - Albert Einstein</li>
<li>Good artists copy. Great artists steal. - Pablo Picasso</li>
<li>\u00e7gua mole em pedra dura, tanto bate at\u008e que fura.</li>
</ul>
</body>
</html>
"""
def _assert_result_count(self, queries, html, count):
self.assertEqual(queries._parse_result_count(html), count)
def test_parse_result_count(self):
queries = BugzillaQueries(None)
# Pages with results, always list the count at least twice.
self._assert_result_count(queries, '<span class="bz_result_count">314 bugs found.</span><span class="bz_result_count">314 bugs found.</span>', 314)
self._assert_result_count(queries, '<span class="bz_result_count">Zarro Boogs found.</span>', 0)
self._assert_result_count(queries, '<span class="bz_result_count">\n \nOne bug found.</span>', 1)
self.assertRaises(Exception, queries._parse_result_count, ['Invalid'])
def test_request_page_parsing(self):
queries = BugzillaQueries(None)
self.assertEqual([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page))
def test_quip_page_parsing(self):
queries = BugzillaQueries(None)
expected_quips = ["Everything should be made as simple as possible, but not simpler. - Albert Einstein", "Good artists copy. Great artists steal. - Pablo Picasso", u"\u00e7gua mole em pedra dura, tanto bate at\u008e que fura."]
self.assertEqual(expected_quips, queries._parse_quips(self._sample_quip_page))
def test_load_query(self):
queries = BugzillaQueries(Mock())
queries._load_query("request.cgi?action=queue&type=review&group=type")
class EditUsersParserTest(unittest.TestCase):
_example_user_results = """
<div id="bugzilla-body">
<p>1 user found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr>
<td >
<a href="editusers.cgi?action=edit&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
abarth@webkit.org
</a>
</td>
<td >
Adam Barth
</td>
<td >
<a href="editusers.cgi?action=activity&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
View
</a>
</td>
</tr>
</table>
"""
_example_empty_user_results = """
<div id="bugzilla-body">
<p>0 users found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr><td colspan="3" align="center"><i><none></i></td></tr>
</table>
"""
def _assert_login_userid_pairs(self, results_page, expected_logins):
parser = EditUsersParser()
logins = parser.login_userid_pairs_from_edit_user_results(results_page)
self.assertEqual(logins, expected_logins)
def test_logins_from_editusers_results(self):
self._assert_login_userid_pairs(self._example_user_results, [("abarth@webkit.org", 1234)])
self._assert_login_userid_pairs(self._example_empty_user_results, [])
_example_user_page = """<table class="main"><tr>
<th><label for="login">Login name:</label></th>
<td>eric@webkit.org
</td>
</tr>
<tr>
<th><label for="name">Real name:</label></th>
<td>Eric Seidel
</td>
</tr>
<tr>
<th>Group access:</th>
<td>
<table class="groups">
<tr>
</tr>
<tr>
<th colspan="2">User is a member of these groups</th>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_7"
name="group_7"
value="1" checked="checked" /></td>
<td class="groupname">
<label for="group_7">
<strong>canconfirm:</strong>
Can confirm a bug.
</label>
</td>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_6"
name="group_6"
value="1" /></td>
<td class="groupname">
<label for="group_6">
<strong>editbugs:</strong>
Can edit all aspects of any bug.
/label>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Product responsibilities:</th>
<td>
<em>none</em>
</td>
</tr>
</table>"""
def test_user_dict_from_edit_user_page(self):
parser = EditUsersParser()
user_dict = parser.user_dict_from_edit_user_page(self._example_user_page)
expected_user_dict = {u'login': u'eric@webkit.org', u'groups': set(['canconfirm']), u'name': u'Eric Seidel'}
self.assertEqual(expected_user_dict, user_dict)
|
ZhangXinNan/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/where_op_test.py | 7 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.test_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def testWrongNumbers(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
def testRandomBool(self):
self._testRandom(np.bool)
def testRandomInt32(self):
self._testRandom(np.int32)
def testRandomInt64(self):
self._testRandom(np.int64)
def testRandomFloat(self):
self._testRandom(np.float32)
def testRandomDouble(self):
self._testRandom(np.float64)
def testRandomComplex64(self):
self._testRandom(np.complex64)
def testRandomComplex128(self):
self._testRandom(np.complex128)
def testRandomUint8(self):
self._testRandom(np.uint8)
def testRandomInt8(self):
self._testRandom(np.int8)
def testRandomInt16(self):
self._testRandom(np.int16)
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.test_session(use_gpu=True):
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session() as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session() as sess:
x.initializer.run()
y.initializer.run()
c.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
guoxiaoyong/simple-useful | refs/heads/master | cxx_learn/cronx/spider/spider_daily_ftse100.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2;
import re;
import string;
import sys;
from BeautifulSoup import BeautifulSoup
month_num = {
'Jan' : '01',
'Feb' : '02',
'Mar' : '03',
'Apr' : '04',
'May' : '05',
'Jun' : '06',
'Jul' : '07',
'Aug' : '08',
'Sep' : '09',
'Oct' : '10',
'Nov' : '11',
'Dec' : '12'
''
};
def process_date(raw_date):
global month_num;
raw_list=raw_date.split(' ');
month_str=month_num[raw_list[0]];
day_list=raw_list[1].split(',');
if len(day_list[0]) == 1:
day_str = '0' + day_list[0];
else:
day_str = day_list[0];
year_str = raw_list[2];
return year_str + '-' + month_str + '-' + day_str;
def process_num(raw_num):
raw_list=raw_num.split(',');
sz = len(raw_list);
str_num=raw_list[0];
for i in range(1,sz):
str_num = str_num+raw_list[i];
return str_num;
str_url = "http://finance.yahoo.com/q/hp?s=%5EFTSE+Historical+Prices";
req=urllib2.Request(str_url);
resp=urllib2.urlopen(req);
respHtml=resp.read();
HtmlEncoding = "UTF-8";
soup = BeautifulSoup(respHtml, fromEncoding=HtmlEncoding);
tag_top = soup.find('table', {"class":"yfnc_datamodoutline1"});
tag_body = tag_top.contents[0].contents[0].contents[0];
str_date = process_date(tag_body.contents[1].contents[0].contents[0]);
open_price = process_num(tag_body.contents[1].contents[1].contents[0]);
high_price = process_num(tag_body.contents[1].contents[2].contents[0]);
low_price = process_num(tag_body.contents[1].contents[3].contents[0]);
close_price = process_num(tag_body.contents[1].contents[4].contents[0]);
volume = process_num(tag_body.contents[1].contents[5].contents[0]);
if volume != "0":
daily_file = sys.argv[1];
history_file = sys.argv[2];
daily_fp = open(daily_file, 'w');
history_fp = open(history_file, 'a');
title_str = "Date,Open Price,High Price,Low Price,Close Price,Volume(GBP)\n";
daily_fp.write(title_str);
day_market_data = str_date+","+open_price+","+high_price+","+low_price+","+close_price+","+volume+'\n';
daily_fp.write(day_market_data);
history_fp.write(day_market_data);
daily_fp.close();
history_fp.close();
|
sestrella/ansible | refs/heads/devel | test/units/modules/network/fortios/test_fortios_firewall_ttl_policy.py | 21 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_firewall_ttl_policy
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_ttl_policy.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_ttl_policy_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ttl_policy': {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ttl_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
}
set_method_mock.assert_called_with('firewall', 'ttl-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_ttl_policy_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ttl_policy': {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ttl_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
}
set_method_mock.assert_called_with('firewall', 'ttl-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_ttl_policy_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_ttl_policy': {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ttl_policy.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'ttl-policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_ttl_policy_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_ttl_policy': {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ttl_policy.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'ttl-policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_ttl_policy_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ttl_policy': {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ttl_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
}
set_method_mock.assert_called_with('firewall', 'ttl-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_ttl_policy_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_ttl_policy': {
'random_attribute_not_valid': 'tag',
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_ttl_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'id': '4',
'schedule': 'test_value_5',
'srcintf': 'test_value_6',
'status': 'enable',
'ttl': 'test_value_8'
}
set_method_mock.assert_called_with('firewall', 'ttl-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
Bismarrck/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/topk_op_test.py | 6 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TopK op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class TopKTest(test.TestCase):
def _validateTopK(self,
inputs,
k,
expected_values,
expected_indices,
sorted=True): # pylint: disable=redefined-builtin
np_expected_values = np.array(expected_values)
np_expected_indices = np.array(expected_indices)
with self.cached_session(use_gpu=True) as sess:
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
values, indices = self.evaluate([values_op, indices_op])
self.assertShapeEqual(np_expected_values, values_op)
self.assertShapeEqual(np_expected_indices, indices_op)
if sorted:
self.assertAllClose(np_expected_values, values)
# Do some special casing of equality of indices: if indices
# are not the same, but values are floating type, ensure that
# the values are within epsilon of each other.
if not np.issubdtype(np_expected_values.dtype, np.floating):
# Values are not floating point type; check indices exactly
self.assertAllEqual(np_expected_indices, indices)
else:
# Values are floating point; indices may be swapped for
# values near each other.
indices_not_equal = np_expected_indices != indices
if np.any(indices_not_equal):
values_unsure = values[indices_not_equal]
expected_values_unsure = expected_values[indices_not_equal]
self.assertAllClose(expected_values_unsure, values_unsure)
else:
np_inputs = np.array(inputs)
# Check that the indices are valid.
for result_index, src_index in np.ndenumerate(indices):
value = values[result_index]
expected_value = np_inputs[result_index[0], src_index]
np.testing.assert_almost_equal(value, expected_value)
# Check that if two elements are equal, the lower-index element appears
# first.
shape = values.shape
for batch_index in range(shape[0]):
for index in range(shape[1] - 1):
if np.isclose(values[batch_index, index],
values[batch_index, index + 1]):
self.assertLess(indices[batch_index, index],
indices[batch_index, index + 1])
# Now check the results, ignoring order.
self.assertAllEqual(np.sort(np_expected_indices), np.sort(indices))
self.assertAllClose(np.sort(np_expected_values), np.sort(values))
def testTop1(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 1, [[0.4], [0.3]], [[3], [1]])
def testTop2(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]]
self._validateTopK(inputs, 2, [[0.4, 0.3], [0.4, 0.3]], [[3, 1], [2, 1]])
def testTop3(self):
k = 5
inputs = np.random.permutation(np.linspace(0, 100, 6140, dtype=np.float64))
indices = np.argsort(-inputs)[:k]
values = -np.sort(-inputs)[:k]
self._validateTopK(inputs, k, values, indices)
def _testLargeSort(self, dtype):
b = 10
n = 5000
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)
values = -np.sort(-inputs, axis=1)
self._validateTopK(inputs, n, values, indices)
def testLargeSort(self):
self._testLargeSort(np.float32)
self._testLargeSort(np.float16)
def _testLargeTopK(self, dtype):
b = 10
n = 5000
k = n - 1
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testLargeTopK(self):
self._testLargeTopK(np.float32)
self._testLargeTopK(np.float16)
def _testMediumTopK(self, dtype):
b = 5
n = 500
k = 50
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testMediumTopK(self):
self._testMediumTopK(np.float32)
self._testMediumTopK(np.float16)
def testStableSort(self):
b = 5
n = 500
for k in [1, 5, 50, 500]:
# Lots of repeated integers taking values in [0, 3]
inputs = np.random.permutation(
np.linspace(0, 3, b * n, dtype=np.int32)).reshape(b, n)
# Use mergesort, a stable sort, to get the indices.
indices = np.argsort(-inputs, axis=1, kind="mergesort")[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testTopAll(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 4, [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
[[3, 1, 2, 0], [1, 2, 3, 0]])
def testTop3Unsorted(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.4, 0.3, 0.2]]
self._validateTopK(
inputs,
3, [[0.2, 0.3, 0.4], [0.2, 0.4, 0.3]], [[2, 1, 3], [3, 1, 2]],
sorted=False)
def testTop3Vector(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
self._validateTopK(inputs, 3, [19, 18, 17], [11, 3, 7])
def testTensorK(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
k = constant_op.constant(3)
self._validateTopK(inputs, k, [19, 18, 17], [11, 3, 7])
@test_util.run_deprecated_v1
def testKNegative(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.session(use_gpu=True):
k = array_ops.placeholder(dtypes.int32)
values, _ = nn_ops.top_k(inputs, k)
with self.assertRaisesOpError("Need k >= 0, got -7"):
values.eval(feed_dict={k: -7})
@test_util.run_deprecated_v1
def testKTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.assertRaisesRegexp(ValueError,
r"must have last dimension >= k = 4"):
nn_ops.top_k(inputs, 4)
@test_util.run_deprecated_v1
def testTopKGradients(self):
with self.session(use_gpu=True) as sess:
inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5])
values, _ = nn_ops.top_k(inputs, 3)
grad = sess.run(
gradients_impl.gradients(
values, inputs, grad_ys=[[[1., 2., 3.], [4., 5., 6.]]]),
feed_dict={inputs: [[2., -1., 1000., 3., 4.],
[1., 5., 2., 4., 3.]]})[0]
self.assertEqual(
grad.tolist(), [[0., 0., 1., 3., 2.], [0., 4., 0., 5., 6.]])
class TopKBenchmark(test.Benchmark):
def benchmarkTopK(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False, True]):
k = int(p * n)
if k == 0:
continue
name = "m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n))
v = resource_variable_ops.ResourceVariable(x)
op = nn_ops.top_k(v, k)
with session.Session() as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
pedro2d10/SickRage-FR | refs/heads/develop | sickbeard/common.py | 1 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io/
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Common interface for Quality and Status
"""
# pylint: disable=line-too-long
import operator
from os import path
import platform
import re
import uuid
from hachoir_parser import createParser # pylint: disable=import-error
from hachoir_metadata import extractMetadata # pylint: disable=import-error
from hachoir_core.log import log # pylint: disable=import-error
from fake_useragent import settings as UA_SETTINGS, UserAgent
from sickbeard.numdict import NumDict
from sickrage.helper.encoding import ek
from sickrage.helper.common import try_int
from sickrage.tagger.episode import EpisodeTags, Episode_fr
from sickrage.recompiled import tags
# If some provider has an issue with functionality of SR, other than user agents, it's best to come talk to us rather than block.
# It is no different than us going to a provider if we have questions or issues. Be a team player here.
# This is disabled, was only added for testing, and has no config.ini or web ui setting. To enable, set SPOOF_USER_AGENT = True
SPOOF_USER_AGENT = False
INSTANCE_ID = str(uuid.uuid1())
USER_AGENT = ('SickRage/(' + platform.system() + '; ' + platform.release() + '; ' + INSTANCE_ID + ')')
UA_SETTINGS.DB = ek(path.abspath, ek(path.join, ek(path.dirname, __file__), '../lib/fake_useragent/ua.json'))
UA_POOL = UserAgent()
if SPOOF_USER_AGENT:
USER_AGENT = UA_POOL.random
cpu_presets = {
'HIGH': 5,
'NORMAL': 2,
'LOW': 1
}
# Other constants
MULTI_EP_RESULT = -1
SEASON_RESULT = -2
# Notification Types
NOTIFY_SNATCH = 1
NOTIFY_DOWNLOAD = 2
NOTIFY_SUBTITLE_DOWNLOAD = 3
NOTIFY_GIT_UPDATE = 4
NOTIFY_GIT_UPDATE_TEXT = 5
NOTIFY_LOGIN = 6
NOTIFY_LOGIN_TEXT = 7
notifyStrings = NumDict({
NOTIFY_SNATCH: "Started Download",
NOTIFY_DOWNLOAD: "Download Finished",
NOTIFY_SUBTITLE_DOWNLOAD: "Subtitle Download Finished",
NOTIFY_GIT_UPDATE: "SickRage Updated",
NOTIFY_GIT_UPDATE_TEXT: "SickRage Updated To Commit#: ",
NOTIFY_LOGIN: "SickRage new login",
NOTIFY_LOGIN_TEXT: "New login from IP: {0}. http://geomaplookup.net/?ip={0}"
})
# Episode statuses
UNKNOWN = -1 # should never happen
UNAIRED = 1 # episodes that haven't aired yet
SNATCHED = 2 # qualified with quality
WANTED = 3 # episodes we don't have but want to get
DOWNLOADED = 4 # qualified with quality
SKIPPED = 5 # episodes we don't want
ARCHIVED = 6 # episodes that you don't have locally (counts toward download completion stats)
IGNORED = 7 # episodes that you don't want included in your download stats
SNATCHED_PROPER = 9 # qualified with quality
SUBTITLED = 10 # qualified with quality
FAILED = 11 # episode downloaded or snatched we don't want
SNATCHED_BEST = 12 # episode re-downloaded using best quality
SNATCHED_FRENCH = 40 # episode re-download in french
NAMING_REPEAT = 1
NAMING_EXTEND = 2
NAMING_DUPLICATE = 4
NAMING_LIMITED_EXTEND = 8
NAMING_SEPARATED_REPEAT = 16
NAMING_LIMITED_EXTEND_E_PREFIXED = 32
MULTI_EP_STRINGS = NumDict({
NAMING_REPEAT: "Repeat",
NAMING_SEPARATED_REPEAT: "Repeat (Separated)",
NAMING_DUPLICATE: "Duplicate",
NAMING_EXTEND: "Extend",
NAMING_LIMITED_EXTEND: "Extend (Limited)",
NAMING_LIMITED_EXTEND_E_PREFIXED: "Extend (Limited, E-prefixed)"
})
def scene_french(name, anime=False): # pylint: disable=too-many-branches
"""
Return The quality from the scene episode File
:param name: Episode filename to analyse
:param anime: Boolean to indicate if the show we're resolving is Anime
:return: Quality
"""
if not name:
return ""
else:
name = ek(path.basename, name)
result = None
ep = Episode_fr(name)
if ep.french:
return 'fre'
else:
return 'eng'
class Quality(object):
"""
Determine quality and set status codes
"""
NONE = 0 # 0
SDTV = 1 # 1
SDDVD = 1 << 1 # 2
HDTV = 1 << 2 # 4
RAWHDTV = 1 << 3 # 8 -- 720p/1080i mpeg2 (trollhd releases)
FULLHDTV = 1 << 4 # 16 -- 1080p HDTV (QCF releases)
HDWEBDL = 1 << 5 # 32
FULLHDWEBDL = 1 << 6 # 64 -- 1080p web-dl
HDBLURAY = 1 << 7 # 128
FULLHDBLURAY = 1 << 8 # 256
UHD_4K_TV = 1 << 9 # 512 -- 2160p aka 4K UHD aka UHD-1
UHD_4K_WEBDL = 1 << 10 # 1024
UHD_4K_BLURAY = 1 << 11 # 2048
UHD_8K_TV = 1 << 12 # 4096 -- 4320p aka 8K UHD aka UHD-2
UHD_8K_WEBDL = 1 << 13 # 8192
UHD_8K_BLURAY = 1 << 14 # 16384
ANYHDTV = HDTV | FULLHDTV # 20
ANYWEBDL = HDWEBDL | FULLHDWEBDL # 96
ANYBLURAY = HDBLURAY | FULLHDBLURAY # 384
# put these bits at the other end of the spectrum, far enough out that they shouldn't interfere
UNKNOWN = 1 << 15 # 32768
qualityStrings = NumDict({
None: "None",
NONE: "N/A",
UNKNOWN: "Unknown",
SDTV: "SDTV",
SDDVD: "SD DVD",
HDTV: "720p HDTV",
RAWHDTV: "RawHD",
FULLHDTV: "1080p HDTV",
HDWEBDL: "720p WEB-DL",
FULLHDWEBDL: "1080p WEB-DL",
HDBLURAY: "720p BluRay",
FULLHDBLURAY: "1080p BluRay",
UHD_4K_TV: "4K UHD TV",
UHD_8K_TV: "8K UHD TV",
UHD_4K_WEBDL: "4K UHD WEB-DL",
UHD_8K_WEBDL: "8K UHD WEB-DL",
UHD_4K_BLURAY: "4K UHD BluRay",
UHD_8K_BLURAY: "8K UHD BluRay",
})
sceneQualityStrings = NumDict({
None: "None",
NONE: "N/A",
UNKNOWN: "Unknown",
SDTV: "HDTV",
SDDVD: "",
HDTV: "720p HDTV",
RAWHDTV: "1080i HDTV",
FULLHDTV: "1080p HDTV",
HDWEBDL: "720p WEB-DL",
FULLHDWEBDL: "1080p WEB-DL",
HDBLURAY: "720p BluRay",
FULLHDBLURAY: "1080p BluRay",
UHD_4K_TV: "4K UHD TV",
UHD_8K_TV: "8K UHD TV",
UHD_4K_WEBDL: "4K UHD WEB-DL",
UHD_8K_WEBDL: "8K UHD WEB-DL",
UHD_4K_BLURAY: "4K UHD BluRay",
UHD_8K_BLURAY: "8K UHD BluRay",
})
combinedQualityStrings = NumDict({
ANYHDTV: "HDTV",
ANYWEBDL: "WEB-DL",
ANYBLURAY: "BluRay"
})
cssClassStrings = NumDict({
None: "None",
NONE: "N/A",
UNKNOWN: "Unknown",
SDTV: "SDTV",
SDDVD: "SDDVD",
HDTV: "HD720p",
RAWHDTV: "RawHD",
FULLHDTV: "HD1080p",
HDWEBDL: "HD720p",
FULLHDWEBDL: "HD1080p",
HDBLURAY: "HD720p",
FULLHDBLURAY: "HD1080p",
UHD_4K_TV: "UHD-4K",
UHD_8K_TV: "UHD-8K",
UHD_4K_WEBDL: "UHD-4K",
UHD_8K_WEBDL: "UHD-8K",
UHD_4K_BLURAY: "UHD-4K",
UHD_8K_BLURAY: "UHD-8K",
ANYHDTV: "any-hd",
ANYWEBDL: "any-hd",
ANYBLURAY: "any-hd"
})
statusPrefixes = NumDict({
DOWNLOADED: "Downloaded",
SNATCHED: "Snatched",
SNATCHED_PROPER: "Snatched (Proper)",
FAILED: "Failed",
SNATCHED_BEST: "Snatched (Best)",
ARCHIVED: "Archived"
})
@staticmethod
def _getStatusStrings(status):
"""
Returns string values associated with Status prefix
:param status: Status prefix to resolve
:return: Human readable status value
"""
to_return = {}
for quality in Quality.qualityStrings:
if quality is not None:
stat = Quality.statusPrefixes[status]
qual = Quality.qualityStrings[quality]
comp = Quality.compositeStatus(status, quality)
to_return[comp] = '%s (%s)' % (stat, qual)
return to_return
@staticmethod
def combineQualities(allowed_qualities, preferred_qualities):
any_quality = 0
best_quality = 0
if allowed_qualities:
any_quality = reduce(operator.or_, allowed_qualities)
if preferred_qualities:
best_quality = reduce(operator.or_, preferred_qualities)
return any_quality | (best_quality << 16)
@staticmethod
def splitQuality(quality):
if quality is None:
quality = Quality.NONE
allowed_qualities = []
preferred_qualities = []
for cur_qual in Quality.qualityStrings:
if cur_qual is None:
cur_qual = Quality.NONE
if cur_qual & quality:
allowed_qualities.append(cur_qual)
if cur_qual << 16 & quality:
preferred_qualities.append(cur_qual)
return sorted(allowed_qualities), sorted(preferred_qualities)
@staticmethod
def nameQuality(name, anime=False):
"""
Return The quality from an episode File renamed by SickRage
If no quality is achieved it will try sceneQuality regex
:param name: to parse
:param anime: Boolean to indicate if the show we're resolving is Anime
:return: Quality prefix
"""
# Try Scene names first
quality = Quality.sceneQuality(name, anime)
if quality != Quality.UNKNOWN:
return quality
quality = Quality.assumeQuality(name)
if quality != Quality.UNKNOWN:
return quality
return Quality.UNKNOWN
@staticmethod
# TODO: Remove this method and sceneQuality after the new scene_quality has been validated.
def old_scene_quality(name, anime=False): # pylint: disable=too-many-branches
"""
Return The quality from the scene episode File
:param name: Episode filename to analyse
:param anime: Boolean to indicate if the show we're resolving is Anime
:return: Quality prefix
"""
ret = Quality.UNKNOWN
if not name:
return ret
name = ek(path.basename, name)
check_name = lambda regex_list, func: func([re.search(regex, name, re.I) for regex in regex_list])
if anime:
dvd_options = check_name([r"dvd", r"dvdrip"], any)
bluray_options = check_name([r"BD", r"blue?-?ray"], any)
sd_options = check_name([r"360p", r"480p", r"848x480", r"XviD"], any)
hd_options = check_name([r"720p", r"1280x720", r"960x720"], any)
full_hd = check_name([r"1080p", r"1920x1080"], any)
if sd_options and not bluray_options and not dvd_options:
ret = Quality.SDTV
elif dvd_options:
ret = Quality.SDDVD
elif hd_options and not bluray_options and not full_hd:
ret = Quality.HDTV
elif full_hd and not bluray_options and not hd_options:
ret = Quality.FULLHDTV
elif hd_options and not bluray_options and not full_hd:
ret = Quality.HDWEBDL
elif bluray_options and hd_options and not full_hd:
ret = Quality.HDBLURAY
elif bluray_options and full_hd and not hd_options:
ret = Quality.FULLHDBLURAY
return ret
if check_name([r"480p|web.?dl|web(rip|mux|hd)|[sph]d.?tv|dsr|tv(rip|mux)|satrip", r"xvid|divx|[xh].?26[45]"], all) and not check_name([r"(720|1080)[pi]"], all) and not check_name([r"hr.ws.pdtv.[xh].?26[45]", r"dvd(rip|mux)|b[rd](rip|mux)|blue?-?ray"], any):
ret = Quality.SDTV
elif check_name([r"dvd(rip|mux)|b[rd](rip|mux)|blue?-?ray", r"xvid|divx|[xh].?26[45]"], all) and not check_name([r"(720|1080)[pi]"], all) and not check_name([r"hr.ws.pdtv.[xh].?26[45]"], any):
ret = Quality.SDDVD
elif check_name([r"720p", r"hd.?tv", r"[xh].?26[45]"], all) or check_name([r"hr.ws.pdtv.[xh].?26[45]"], any) and not check_name([r"1080[pi]"], all):
ret = Quality.HDTV
elif check_name([r"720p|1080i", r"hd.?tv", r"mpeg-?2"], all) or check_name([r"1080[pi].hdtv", r"h.?26[45]"], all):
ret = Quality.RAWHDTV
elif check_name([r"1080p", r"hd.?tv", r"[xh].?26[45]"], all):
ret = Quality.FULLHDTV
elif check_name([r"720p", r"web.?dl|web(rip|mux|hd)"], all) or check_name([r"720p", r"itunes", r"[xh].?26[45]"], all):
ret = Quality.HDWEBDL
elif check_name([r"1080p", r"web.?dl|web(rip|mux|hd)"], all) or check_name([r"1080p", r"itunes", r"[xh].?26[45]"], all):
ret = Quality.FULLHDWEBDL
elif check_name([r"720p", r"blue?-?ray|hddvd|b[rd](rip|mux)", r"[xh].?26[45]"], all):
ret = Quality.HDBLURAY
elif check_name([r"1080p", r"blue?-?ray|hddvd|b[rd](rip|mux)", r"[xh].?26[45]"], all):
ret = Quality.FULLHDBLURAY
return ret
@staticmethod
def scene_quality(name, anime=False): # pylint: disable=too-many-branches
"""
Return The quality from the scene episode File
:param name: Episode filename to analyse
:param anime: Boolean to indicate if the show we're resolving is Anime
:return: Quality
"""
if not name:
return Quality.UNKNOWN
else:
name = ek(path.basename, name)
result = None
ep = EpisodeTags(name)
if anime:
sd_options = tags.anime_sd.search(name)
hd_options = tags.anime_hd.search(name)
full_hd = tags.anime_fullhd.search(name)
ep.rex[u'bluray'] = tags.anime_bluray
# BluRay
if ep.bluray and (full_hd or hd_options):
result = Quality.FULLHDBLURAY if full_hd else Quality.HDBLURAY
# HD TV
elif not ep.bluray and (full_hd or hd_options):
result = Quality.FULLHDTV if full_hd else Quality.HDTV
# SD DVD
elif ep.dvd:
result = Quality.SDDVD
# SD TV
elif sd_options:
result = Quality.SDTV
return Quality.UNKNOWN if result is None else result
# Is it UHD?
if ep.vres in [2160, 4320] and ep.scan == u'p':
# BluRay
full_res = (ep.vres == 4320)
if ep.avc and ep.bluray:
result = Quality.UHD_4K_BLURAY if not full_res else Quality.UHD_8K_BLURAY
# WEB-DL
elif (ep.avc and ep.itunes) or ep.web:
result = Quality.UHD_4K_WEBDL if not full_res else Quality.UHD_8K_WEBDL
# HDTV
elif ep.avc and ep.tv == u'hd':
result = Quality.UHD_4K_TV if not full_res else Quality.UHD_8K_TV
# Is it HD?
elif ep.vres in [1080, 720]:
if ep.scan == u'p':
# BluRay
full_res = (ep.vres == 1080)
if ep.avc and (ep.bluray or ep.hddvd):
result = Quality.FULLHDBLURAY if full_res else Quality.HDBLURAY
# WEB-DL
elif (ep.avc and ep.itunes) or ep.web:
result = Quality.FULLHDWEBDL if full_res else Quality.HDWEBDL
# HDTV
elif ep.avc and ep.tv == u'hd':
if not all([ep.vres == 1080, ep.raw, ep.avc_non_free]):
result = Quality.FULLHDTV if full_res else Quality.HDTV
else:
result = Quality.RAWHDTV
elif all([ep.vres == 720, ep.tv == u'hd', ep.mpeg]):
result = Quality.RAWHDTV
elif (ep.res == u'1080i') and ep.tv == u'hd':
if ep.mpeg or (ep.raw and ep.avc_non_free):
result = Quality.RAWHDTV
elif ep.hrws:
result = Quality.HDTV
# Is it SD?
elif ep.xvid or ep.avc:
# SD DVD
if ep.dvd or ep.bluray:
result = Quality.SDDVD
# SDTV
elif ep.res == u'480p' or any([ep.tv, ep.sat, ep.web]):
result = Quality.SDTV
return Quality.UNKNOWN if result is None else result
def scene_french(name, anime=False): # pylint: disable=too-many-branches
"""
Return The quality from the scene episode File
:param name: Episode filename to analyse
:param anime: Boolean to indicate if the show we're resolving is Anime
:return: Quality
"""
if not name:
return ""
else:
name = ek(path.basename, name)
result = None
ep = Episode_fr(name)
return ep
@staticmethod
# TODO: Remove this method and old_scene_quality after the new scene_quality has been validated.
def sceneQuality(name, anime=False):
"""
Validation for new scene_quality.
:param name: Episode filename to analyse
:param anime: Boolean to indicate if the show we're resolving is Anime
:return: Quality
"""
# use the new scene_quality to determine quality
result = Quality.scene_quality(name, anime)
# if its a quality known by old_scene_quality assert they match
if result <= Quality.FULLHDBLURAY or result == Quality.UNKNOWN:
old = Quality.old_scene_quality(name, anime)
assert old == result, 'Old quality does not match new: %s != %s : %s' % (Quality.qualityStrings[old], Quality.qualityStrings[result], name)
return result
@staticmethod
def assumeQuality(name):
"""
Assume a quality from file extension if we cannot resolve it otherwise
:param name: File name of episode to analyse
:return: Quality prefix
"""
quality = Quality.qualityFromFileMeta(name)
if quality != Quality.UNKNOWN:
return quality
if name.lower().endswith(".ts"):
return Quality.RAWHDTV
else:
return Quality.UNKNOWN
@staticmethod
def qualityFromFileMeta(filename): # pylint: disable=too-many-branches
"""
Get quality file file metadata
:param filename: Filename to analyse
:return: Quality prefix
"""
log.use_print = False
try:
parser = createParser(filename)
except Exception: # pylint: disable=broad-except
parser = None
if not parser:
return Quality.UNKNOWN
try:
metadata = extractMetadata(parser)
except Exception: # pylint: disable=broad-except
metadata = None
try:
parser.stream._input.close() # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
pass
if not metadata:
return Quality.UNKNOWN
height = 0
if metadata.has('height'):
height = int(metadata.get('height') or 0)
else:
test = getattr(metadata, "iterGroups", None)
if callable(test):
for metagroup in metadata.iterGroups():
if metagroup.has('height'):
height = int(metagroup.get('height') or 0)
if not height:
return Quality.UNKNOWN
base_filename = ek(path.basename, filename)
bluray = re.search(r"blue?-?ray|hddvd|b[rd](rip|mux)", base_filename, re.I) is not None
webdl = re.search(r"web.?dl|web(rip|mux|hd)", base_filename, re.I) is not None
ret = Quality.UNKNOWN
if 3240 < height:
ret = ((Quality.UHD_8K_TV, Quality.UHD_8K_BLURAY)[bluray], Quality.UHD_8K_WEBDL)[webdl]
if 1620 < height <= 3240:
ret = ((Quality.UHD_4K_TV, Quality.UHD_4K_BLURAY)[bluray], Quality.UHD_4K_WEBDL)[webdl]
elif 800 < height <= 1620:
ret = ((Quality.FULLHDTV, Quality.FULLHDBLURAY)[bluray], Quality.FULLHDWEBDL)[webdl]
elif 680 < height <= 800:
ret = ((Quality.HDTV, Quality.HDBLURAY)[bluray], Quality.HDWEBDL)[webdl]
elif height <= 680:
ret = (Quality.SDTV, Quality.SDDVD)[re.search(r'dvd|b[rd]rip|blue?-?ray', base_filename, re.I) is not None]
return ret
@staticmethod
def compositeStatus(status, quality):
if quality is None:
quality = Quality.NONE
return status + 100 * quality
@staticmethod
def qualityDownloaded(status):
return (status - DOWNLOADED) / 100
@staticmethod
def splitCompositeStatus(status):
"""
Split a composite status code into a status and quality.
:param status: to split
:returns: a tuple containing (status, quality)
"""
status = long(status)
if status == UNKNOWN:
return UNKNOWN, Quality.UNKNOWN
for q in sorted(Quality.qualityStrings.keys(), reverse=True):
if status > q * 100:
return status - q * 100, q
return status, Quality.NONE
@staticmethod
def sceneQualityFromName(name, quality): # pylint: disable=too-many-branches
"""
Get scene naming parameters from filename and quality
:param name: Filename to check
:param quality: int of quality to make sure we get the right rip type
:return: encoder type for scene quality naming
"""
codec_list = ['xvid', 'divx']
x264_list = ['x264', 'x 264', 'x.264']
h264_list = ['h264', 'h 264', 'h.264', 'avc']
x265_list = ['x265', 'x 265', 'x.265']
h265_list = ['h265', 'h 265', 'h.265', 'hevc']
codec_list += x264_list + h264_list + x265_list + h265_list
found_codecs = {}
found_codec = None
for codec in codec_list:
if codec in name.lower():
found_codecs[name.lower().rfind(codec)] = codec
if found_codecs:
sorted_codecs = sorted(found_codecs, reverse=True)
found_codec = found_codecs[list(sorted_codecs)[0]]
# 2 corresponds to SDDVD quality
if quality == 2:
if re.search(r"b(r|d|rd)?(-| |\.)?(rip|mux)", name.lower()):
rip_type = " BDRip"
elif re.search(r"(dvd)(-| |\.)?(rip|mux)?", name.lower()):
rip_type = " DVDRip"
else:
rip_type = ""
if found_codec:
if codec_list[0] in found_codec:
found_codec = 'XviD'
elif codec_list[1] in found_codec:
found_codec = 'DivX'
elif found_codec in x264_list:
found_codec = x264_list[0]
elif found_codec in h264_list:
found_codec = h264_list[0]
elif found_codec in x265_list:
found_codec = x265_list[0]
elif found_codec in h265_list:
found_codec = h265_list[0]
if quality == 2:
return rip_type + " " + found_codec
else:
return " " + found_codec
elif quality == 2:
return rip_type
else:
return ""
@staticmethod
def statusFromName(name, assume=True, anime=False):
"""
Get a status object from filename
:param name: Filename to check
:param assume: boolean to assume quality by extension if we can't figure it out
:param anime: boolean to enable anime parsing
:return: Composite status/quality object
"""
quality = Quality.nameQuality(name, anime)
if assume and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(name)
return Quality.compositeStatus(DOWNLOADED, quality)
DOWNLOADED = None
SNATCHED = None
SNATCHED_PROPER = None
FAILED = None
SNATCHED_BEST = None
ARCHIVED = None
SNATCHED_FRENCH = None
Quality.DOWNLOADED = [Quality.compositeStatus(DOWNLOADED, x) for x in Quality.qualityStrings]
Quality.SNATCHED = [Quality.compositeStatus(SNATCHED, x) for x in Quality.qualityStrings]
Quality.SNATCHED_PROPER = [Quality.compositeStatus(SNATCHED_PROPER, x) for x in Quality.qualityStrings]
Quality.FAILED = [Quality.compositeStatus(FAILED, x) for x in Quality.qualityStrings]
Quality.SNATCHED_BEST = [Quality.compositeStatus(SNATCHED_BEST, x) for x in Quality.qualityStrings]
Quality.ARCHIVED = [Quality.compositeStatus(ARCHIVED, x) for x in Quality.qualityStrings]
Quality.SNATCHED_FRENCH = [Quality.compositeStatus(SNATCHED_FRENCH, x) for x in Quality.qualityStrings.keys()]
HD720p = Quality.combineQualities([Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], [])
HD1080p = Quality.combineQualities([Quality.FULLHDTV, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY], [])
UHD_4K = Quality.combineQualities([Quality.UHD_4K_TV, Quality.UHD_4K_WEBDL, Quality.UHD_4K_BLURAY], [])
UHD_8K = Quality.combineQualities([Quality.UHD_8K_TV, Quality.UHD_8K_WEBDL, Quality.UHD_8K_BLURAY], [])
SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], [])
HD = Quality.combineQualities([HD720p, HD1080p], [])
UHD = Quality.combineQualities([UHD_4K, UHD_8K], [])
ANY = Quality.combineQualities([SD, HD, UHD], [])
# legacy template, cant remove due to reference in mainDB upgrade?
BEST = Quality.combineQualities([Quality.SDTV, Quality.HDTV, Quality.HDWEBDL], [Quality.HDTV])
qualityPresets = (
SD,
HD, HD720p, HD1080p,
UHD, UHD_4K, UHD_8K,
ANY,
)
qualityPresetStrings = NumDict({
SD: "SD",
HD: "HD",
HD720p: "HD720p",
HD1080p: "HD1080p",
UHD: "UHD",
UHD_4K: "UHD-4K",
UHD_8K: "UHD-8K",
ANY: "Any",
})
class StatusStrings(NumDict):
"""
Dictionary containing strings for status codes
"""
# todo: Make views return Qualities too
qualities = Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST + Quality.ARCHIVED + Quality.FAILED
def __missing__(self, key):
"""
If the key is not found try to determine a status from Quality
:param key: A numeric key or None
:raise KeyError: if the key is invalid and can't be determined from Quality
"""
key = self.numeric(key) # try to convert the key to a number which will raise KeyError if it can't
if key in self.qualities: # the key wasn't found locally so check in qualities
status, quality = Quality.splitCompositeStatus(key)
return self[status] if not quality else self[status] + " (" + Quality.qualityStrings[quality] + ")"
else: # the key wasn't found in qualities either
raise KeyError(key) # ... so the key is invalid
def __contains__(self, key):
try:
key = self.numeric(key)
return key in self.data or key in self.qualities
except KeyError:
return False
# Assign strings to statuses
statusStrings = StatusStrings({
UNKNOWN: "Unknown",
UNAIRED: "Unaired",
SNATCHED: "Snatched",
DOWNLOADED: "Downloaded",
SKIPPED: "Skipped",
SNATCHED_PROPER: "Snatched (Proper)",
WANTED: "Wanted",
ARCHIVED: "Archived",
IGNORED: "Ignored",
SUBTITLED: "Subtitled",
FAILED: "Failed",
SNATCHED_BEST: "Snatched (Best)",
SNATCHED_FRENCH: "Snatched (French)"
})
class Overview(object): # pylint: disable=too-few-public-methods
UNAIRED = UNAIRED # 1
SNATCHED = SNATCHED # 2
WANTED = WANTED # 3
GOOD = DOWNLOADED # 4
SKIPPED = SKIPPED # 5
SNATCHED_PROPER = SNATCHED_PROPER # 9
SNATCHED_BEST = SNATCHED_BEST # 1
SNATCHED_FRENCH = SNATCHED_FRENCH # 13
# Should suffice!
QUAL = 50
overviewStrings = NumDict({
SKIPPED: "skipped",
WANTED: "wanted",
QUAL: "qual",
GOOD: "good",
UNAIRED: "unaired",
SNATCHED: "snatched",
# we can give these a different class later, otherwise
# breaks checkboxes in displayShow for showing different statuses
SNATCHED_BEST: "snatched",
SNATCHED_PROPER: "snatched",
SNATCHED_FRENCH: "snatched"
})
showLanguages = {'eng':'english',
'fre':'french',
'':'unknown'}
countryList = {
'Australia': 'AU',
'Canada': 'CA',
'USA': 'US'
}
|
timm/timmnix | refs/heads/master | pypy3-v5.5.0-linux64/lib_pypy/cffi/_pycparser/ply/yacc.py | 465 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
|
DroneMapp/powerlibs-aws-sqs-dequeue_to_api | refs/heads/master | tests/test_dequeuer.py | 1 | import pytest
def test_handle_message__patch(dequeuer, update_message):
dequeuer.handle_message(update_message)
method = dequeuer.mocked_requests_module.patch
assert method.call_count == 1
assert update_message.delete.call_count == 1
def test_handle_message__post(dequeuer, create_message):
dequeuer.handle_message(create_message)
method = dequeuer.mocked_requests_module.post
assert method.call_count == 1
assert create_message.delete.call_count == 1
def test_handle_message_without_necessary_key(dequeuer, born_message):
with pytest.raises(KeyError):
dequeuer.handle_message(born_message)
|
yongshengwang/hue | refs/heads/master | desktop/core/ext-py/python-ldap-2.3.13/Lib/ldap/filter.py | 44 | """
filters.py - misc stuff for handling LDAP filter strings (see RFC2254)
See http://www.python-ldap.org/ for details.
\$Id: filter.py,v 1.8 2010/04/19 17:34:36 stroeder Exp $
Compability:
- Tested with Python 2.0+
"""
from ldap import __version__
def escape_filter_chars(assertion_value,escape_mode=0):
"""
Replace all special characters found in assertion_value
by quoted notation.
escape_mode
If 0 only special chars mentioned in RFC 2254 are escaped.
If 1 all NON-ASCII chars are escaped.
If 2 all chars are escaped.
"""
if escape_mode:
r = []
if escape_mode==1:
for c in assertion_value:
if c < '0' or c > 'z' or c in "\\*()":
c = "\\%02x" % ord(c)
r.append(c)
elif escape_mode==2:
for c in assertion_value:
r.append("\\%02x" % ord(c))
else:
raise ValueError('escape_mode must be 0, 1 or 2.')
s = ''.join(r)
else:
s = assertion_value.replace('\\', r'\5c')
s = s.replace(r'*', r'\2a')
s = s.replace(r'(', r'\28')
s = s.replace(r')', r'\29')
s = s.replace('\x00', r'\00')
return s
def filter_format(filter_template,assertion_values):
"""
filter_template
String containing %s as placeholder for assertion values.
assertion_values
List or tuple of assertion values. Length must match
count of %s in filter_template.
"""
return filter_template % (tuple(map(escape_filter_chars,assertion_values)))
|
0x7067/imagevision-bot | refs/heads/master | bot.py | 1 | #!/usr/bin/env python3
import time
import logging
import telebot
from mscomputervision import _mskey
from mscomputervision import msProcessRequest
from translator import *
TOKEN = "" # YOUR BotFather token here
logger = telebot.logger
telebot.logger.setLevel(logging.DEBUG) # Outputs debug messages to console.
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start'])
def send_welcome(message):
bot.reply_to(message, '''🇺🇸 Welcome!\n Send me an image\nRate the bot: https://telegram.me/storebot?start=imagevisionbot\n\n
🇧🇷 Bem-vindo!\nEnvie-me uma imagem.\nAvalie o bot: https://telegram.me/storebot?start=imagevisionbot\n\n\n
Desenvolvido com pyTelegramBotAPI, ComputerVision API e Bing Translator API''')
@bot.message_handler(commands=['info'])
def send_welcome(message):
info = ('Bot ainda em desenvolvimento!\n'
'Qualquer problema ou sugestão, por favor, fale comigo!\n'
'Telegram: @moisespedro\n'
'Avalie o bot: https://telegram.me/storebot?start=imagevisionbot \n')
bot.reply_to(message, info)
@bot.message_handler(content_types=["photo"])
def answer_photo(message):
photo = bot.get_file(message.photo[-1].file_id)
# URL direction to image
photo_url = "https://api.telegram.org/file/bot{0}/{1}".format(
TOKEN, photo.file_path)
# Computer Vision parameters
params = {'visualFeatures': 'Description'}
headers = dict()
headers['Ocp-Apim-Subscription-Key'] = _mskey
headers['Content-Type'] = 'application/json'
json = {'url': photo_url}
data = None
result = msProcessRequest(json, data, headers, params)
msg_en = result['description']['captions'][0]['text']
msg_pt = translate_en_pt(msg_en)
msg_persian = translate_en_persian(msg_en)
bot.send_chat_action(message.chat.id, 'typing')
time.sleep(1)
bot.reply_to(message, "🇺🇸 " + msg_en + "\n🇧🇷 " +
msg_pt + "\n🇮🇷 " + msg_persian)
@bot.message_handler(func=lambda m: True)
def reply_all(message):
if message.chat.type == "private":
bot.reply_to(message, '''🇺🇸 Please send me an image so I can describe it!
🇧🇷 Por favor envie uma imagem para que eu possa descrevê-la!
🇮🇷 لطفا یک عکس ارسال کن تا بتونم توصیفش کنم!''')
bot.polling(none_stop=True)
while True:
time.sleep(5)
|
csgrad/ns-3-9-ngwmn | refs/heads/master | waf-tools/cflags.py | 17 | import Logs
import Options
import Utils
class CompilerTraits(object):
def get_warnings_flags(self, level):
"""get_warnings_flags(level) -> list of cflags"""
raise NotImplementedError
def get_optimization_flags(self, level):
"""get_optimization_flags(level) -> list of cflags"""
raise NotImplementedError
def get_debug_flags(self, level):
"""get_debug_flags(level) -> (list of cflags, list of cppdefines)"""
raise NotImplementedError
class GccTraits(CompilerTraits):
def __init__(self):
super(GccTraits, self).__init__()
# cumulative list of warnings per level
self.warnings_flags = [['-Wall'], ['-Werror'], ['-Wextra']]
def get_warnings_flags(self, level):
warnings = []
for l in range(level):
if l < len(self.warnings_flags):
warnings.extend(self.warnings_flags[l])
else:
break
return warnings
def get_optimization_flags(self, level):
if level == 0:
return ['-O0']
elif level == 1:
return ['-O']
elif level == 2:
return ['-O2']
elif level == 3:
return ['-O3']
def get_debug_flags(self, level):
if level == 0:
return (['-g0'], ['NDEBUG'])
elif level == 1:
return (['-g'], [])
elif level >= 2:
return (['-ggdb', '-g3'], ['_DEBUG'])
class IccTraits(CompilerTraits):
def __init__(self):
super(IccTraits, self).__init__()
# cumulative list of warnings per level
# icc is _very_ verbose with -Wall, -Werror is barely achievable
self.warnings_flags = [[], [], ['-Wall']]
def get_warnings_flags(self, level):
warnings = []
for l in range(level):
if l < len(self.warnings_flags):
warnings.extend(self.warnings_flags[l])
else:
break
return warnings
def get_optimization_flags(self, level):
if level == 0:
return ['-O0']
elif level == 1:
return ['-O']
elif level == 2:
return ['-O2']
elif level == 3:
return ['-O3']
def get_debug_flags(self, level):
if level == 0:
return (['-g0'], ['NDEBUG'])
elif level == 1:
return (['-g'], [])
elif level >= 2:
return (['-ggdb', '-g3'], ['_DEBUG'])
class MsvcTraits(CompilerTraits):
def __init__(self):
super(MsvcTraits, self).__init__()
# cumulative list of warnings per level
self.warnings_flags = [['/W2'], ['/WX'], ['/Wall']]
def get_warnings_flags(self, level):
warnings = []
for l in range(level):
if l < len(self.warnings_flags):
warnings.extend(self.warnings_flags[l])
else:
break
return warnings
def get_optimization_flags(self, level):
if level == 0:
return ['/Od']
elif level == 1:
return []
elif level == 2:
return ['/O2']
elif level == 3:
return ['/Ox']
def get_debug_flags(self, level):
if level == 0:
return ([], ['NDEBUG'])
elif level == 1:
return (['/ZI', '/RTC1'], [])
elif level >= 2:
return (['/ZI', '/RTC1'], ['_DEBUG'])
gcc = GccTraits()
icc = IccTraits()
msvc = MsvcTraits()
# how to map env['COMPILER_CC'] or env['COMPILER_CXX'] into a traits object
compiler_mapping = {
'gcc': gcc,
'g++': gcc,
'msvc': msvc,
'icc': icc,
'icpc': icc,
}
profiles = {
# profile name: [optimization_level, warnings_level, debug_level]
'default': [2, 1, 1],
'debug': [0, 2, 3],
'release': [3, 1, 0],
}
default_profile = 'default'
def set_options(opt):
assert default_profile in profiles
opt.add_option('-d', '--build-profile',
action='store',
default=default_profile,
help=("Specify the build profile. "
"Build profiles control the default compilation flags"
" used for C/C++ programs, if CCFLAGS/CXXFLAGS are not"
" set set in the environment. [Allowed Values: %s]"
% ", ".join([repr(p) for p in profiles.keys()])),
choices=profiles.keys(),
dest='build_profile')
def detect(conf):
cc = conf.env['COMPILER_CC'] or None
cxx = conf.env['COMPILER_CXX'] or None
if not (cc or cxx):
raise Utils.WafError("neither COMPILER_CC nor COMPILER_CXX are defined; "
"maybe the compiler_cc or compiler_cxx tool has not been configured yet?")
try:
compiler = compiler_mapping[cc]
except KeyError:
try:
compiler = compiler_mapping[cxx]
except KeyError:
Logs.warn("No compiler flags support for compiler %r or %r"
% (cc, cxx))
return
opt_level, warn_level, dbg_level = profiles[Options.options.build_profile]
optimizations = compiler.get_optimization_flags(opt_level)
debug, debug_defs = compiler.get_debug_flags(dbg_level)
warnings = compiler.get_warnings_flags(warn_level)
if cc and not conf.env['CCFLAGS']:
conf.env.append_value('CCFLAGS', optimizations)
conf.env.append_value('CCFLAGS', debug)
conf.env.append_value('CCFLAGS', warnings)
conf.env.append_value('CCDEFINES', debug_defs)
if cxx and not conf.env['CXXFLAGS']:
conf.env.append_value('CXXFLAGS', optimizations)
conf.env.append_value('CXXFLAGS', debug)
conf.env.append_value('CXXFLAGS', warnings)
conf.env.append_value('CXXDEFINES', debug_defs)
|
t-yanaka/zabbix-report | refs/heads/master | customer_api/sessions/apps.py | 591 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SessionsConfig(AppConfig):
name = 'django.contrib.sessions'
verbose_name = _("Sessions")
|
shams169/pythonProject | refs/heads/master | env/lib/python3.6/site-packages/pip/exceptions.py | 344 | """Exceptions used throughout package"""
from __future__ import absolute_import
from itertools import chain, groupby, repeat
from pip._vendor.six import iteritems
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class RequirementsFileParseError(InstallationError):
"""Raised when a general error occurs parsing a requirements file line."""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed."""
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
class HashErrors(InstallationError):
"""Multiple HashError instances rolled into one for reporting"""
def __init__(self):
self.errors = []
def append(self, error):
self.errors.append(error)
def __str__(self):
lines = []
self.errors.sort(key=lambda e: e.order)
for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
lines.append(cls.head)
lines.extend(e.body() for e in errors_of_cls)
if lines:
return '\n'.join(lines)
def __nonzero__(self):
return bool(self.errors)
def __bool__(self):
return self.__nonzero__()
class HashError(InstallationError):
"""
A failure to verify a package against known-good hashes
:cvar order: An int sorting hash exception classes by difficulty of
recovery (lower being harder), so the user doesn't bother fretting
about unpinned packages when he has deeper issues, like VCS
dependencies, to deal with. Also keeps error reports in a
deterministic order.
:cvar head: A section heading for display above potentially many
exceptions of this kind
:ivar req: The InstallRequirement that triggered this error. This is
pasted on after the exception is instantiated, because it's not
typically available earlier.
"""
req = None
head = ''
def body(self):
"""Return a summary of me for display under the heading.
This default implementation simply prints a description of the
triggering requirement.
:param req: The InstallRequirement that provoked this error, with
populate_link() having already been called
"""
return ' %s' % self._requirement_name()
def __str__(self):
return '%s\n%s' % (self.head, self.body())
def _requirement_name(self):
"""Return a description of the requirement that triggered me.
This default implementation returns long description of the req, with
line numbers
"""
return str(self.req) if self.req else 'unknown package'
class VcsHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 0
head = ("Can't verify hashes for these requirements because we don't "
"have a way to hash version control repositories:")
class DirectoryUrlHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 1
head = ("Can't verify hashes for these file:// requirements because they "
"point to directories:")
class HashMissing(HashError):
"""A hash was needed for a requirement but is absent."""
order = 2
head = ('Hashes are required in --require-hashes mode, but they are '
'missing from some requirements. Here is a list of those '
'requirements along with the hashes their downloaded archives '
'actually had. Add lines like these to your requirements files to '
'prevent tampering. (If you did not enable --require-hashes '
'manually, note that it turns on automatically when any package '
'has a hash.)')
def __init__(self, gotten_hash):
"""
:param gotten_hash: The hash of the (possibly malicious) archive we
just downloaded
"""
self.gotten_hash = gotten_hash
def body(self):
from pip.utils.hashes import FAVORITE_HASH # Dodge circular import.
package = None
if self.req:
# In the case of URL-based requirements, display the original URL
# seen in the requirements file rather than the package name,
# so the output can be directly copied into the requirements file.
package = (self.req.original_link if self.req.original_link
# In case someone feeds something downright stupid
# to InstallRequirement's constructor.
else getattr(self.req, 'req', None))
return ' %s --hash=%s:%s' % (package or 'unknown package',
FAVORITE_HASH,
self.gotten_hash)
class HashUnpinned(HashError):
"""A requirement had a hash specified but was not pinned to a specific
version."""
order = 3
head = ('In --require-hashes mode, all requirements must have their '
'versions pinned with ==. These do not:')
class HashMismatch(HashError):
"""
Distribution file hash values don't match.
:ivar package_name: The name of the package that triggered the hash
mismatch. Feel free to write to this after the exception is raise to
improve its error message.
"""
order = 4
head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS '
'FILE. If you have updated the package versions, please update '
'the hashes. Otherwise, examine the package contents carefully; '
'someone may have tampered with them.')
def __init__(self, allowed, gots):
"""
:param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion
"""
self.allowed = allowed
self.gots = gots
def body(self):
return ' %s:\n%s' % (self._requirement_name(),
self._hash_comparison())
def _hash_comparison(self):
"""
Return a comparison of actual and expected hash values.
Example::
Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
or 123451234512345123451234512345123451234512345
Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
"""
def hash_then_or(hash_name):
# For now, all the decent hashes have 6-char names, so we can get
# away with hard-coding space literals.
return chain([hash_name], repeat(' or'))
lines = []
for hash_name, expecteds in iteritems(self.allowed):
prefix = hash_then_or(hash_name)
lines.extend((' Expected %s %s' % (next(prefix), e))
for e in expecteds)
lines.append(' Got %s\n' %
self.gots[hash_name].hexdigest())
prefix = ' or'
return '\n'.join(lines)
class UnsupportedPythonVersion(InstallationError):
"""Unsupported python version according to Requires-Python package
metadata."""
|
tritoanst/ccxt | refs/heads/master | python/ccxt/huobi.py | 1 | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class huobi (Exchange):
def describe(self):
return self.deep_extend(super(huobi, self).describe(), {
'id': 'huobi',
'name': 'Huobi',
'countries': 'CN',
'rateLimit': 2000,
'version': 'v3',
'hasCORS': False,
'hasFetchOHLCV': True,
'timeframes': {
'1m': '001',
'5m': '005',
'15m': '015',
'30m': '030',
'1h': '060',
'1d': '100',
'1w': '200',
'1M': '300',
'1y': '400',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766569-15aa7b9a-5edd-11e7-9e7f-44791f4ee49c.jpg',
'api': 'http://api.huobi.com',
'www': 'https://www.huobi.com',
'doc': 'https://github.com/huobiapi/API_Docs_en/wiki',
},
'api': {
'staticmarket': {
'get': [
'{id}_kline_{period}',
'ticker_{id}',
'depth_{id}',
'depth_{id}_{length}',
'detail_{id}',
],
},
'usdmarket': {
'get': [
'{id}_kline_{period}',
'ticker_{id}',
'depth_{id}',
'depth_{id}_{length}',
'detail_{id}',
],
},
'trade': {
'post': [
'get_account_info',
'get_orders',
'order_info',
'buy',
'sell',
'buy_market',
'sell_market',
'cancel_order',
'get_new_deal_orders',
'get_order_id_by_trade_id',
'withdraw_coin',
'cancel_withdraw_coin',
'get_withdraw_coin_result',
'transfer',
'loan',
'repayment',
'get_loan_available',
'get_loans',
],
},
},
'markets': {
'BTC/CNY': {'id': 'btc', 'symbol': 'BTC/CNY', 'base': 'BTC', 'quote': 'CNY', 'type': 'staticmarket', 'coinType': 1},
'LTC/CNY': {'id': 'ltc', 'symbol': 'LTC/CNY', 'base': 'LTC', 'quote': 'CNY', 'type': 'staticmarket', 'coinType': 2},
# 'BTC/USD': {'id': 'btc', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'type': 'usdmarket', 'coinType': 1},
},
})
def fetch_balance(self, params={}):
balances = self.tradePostGetAccountInfo()
result = {'info': balances}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
account = self.account()
available = 'available_' + lowercase + '_display'
frozen = 'frozen_' + lowercase + '_display'
loan = 'loan_' + lowercase + '_display'
if available in balances:
account['free'] = float(balances[available])
if frozen in balances:
account['used'] = float(balances[frozen])
if loan in balances:
account['used'] = self.sum(account['used'], float(balances[loan]))
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
market = self.market(symbol)
method = market['type'] + 'GetDepthId'
orderbook = getattr(self, method)(self.extend({'id': market['id']}, params))
return self.parse_order_book(orderbook)
def fetch_ticker(self, symbol, params={}):
market = self.market(symbol)
method = market['type'] + 'GetTickerId'
response = getattr(self, method)(self.extend({
'id': market['id'],
}, params))
ticker = response['ticker']
timestamp = int(response['time']) * 1000
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'ask': self.safe_float(ticker, 'sell'),
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': None,
'first': None,
'last': self.safe_float(ticker, 'last'),
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': self.safe_float(ticker, 'vol'),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['ts']
return {
'info': trade,
'id': str(trade['id']),
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['direction'],
'price': trade['price'],
'amount': trade['amount'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
method = market['type'] + 'GetDetailId'
response = getattr(self, method)(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['trades'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
# not implemented yet
return [
ohlcv[0],
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[6],
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
market = self.market(symbol)
method = market['type'] + 'GetIdKlinePeriod'
ohlcvs = getattr(self, method)(self.extend({
'id': market['id'],
'period': self.timeframes[timeframe],
}, params))
return ohlcvs
# return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
market = self.market(symbol)
method = 'tradePost' + self.capitalize(side)
order = {
'coin_type': market['coinType'],
'amount': amount,
'market': market['quote'].lower(),
}
if type == 'limit':
order['price'] = price
else:
method += self.capitalize(type)
response = getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
def cancel_order(self, id, symbol=None, params={}):
return self.tradePostCancelOrder({'id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api']
if api == 'trade':
self.check_required_credentials()
url += '/api' + self.version
query = self.keysort(self.extend({
'method': path,
'access_key': self.apiKey,
'created': self.nonce(),
}, params))
queryString = self.urlencode(self.omit(query, 'market'))
# secret key must be appended to the query before signing
queryString += '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString))
body = self.urlencode(query)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
url += '/' + api + '/' + self.implode_params(path, params) + '_json.js'
query = self.omit(params, self.extract_params(path))
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='trade', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'status' in response:
if response['status'] == 'error':
raise ExchangeError(self.id + ' ' + self.json(response))
if 'code' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
jaromil/faircoin2 | refs/heads/faircoin2 | qa/rpc-tests/merkle_blocks.py | 13 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 50})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 50})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 50})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We cant find the block from a fully-spent tx
assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
|
jbhuang0604/CF2 | refs/heads/master | external/matconvnet/utils/import-caffe.py | 4 | #! /usr/bin/python
# file: import-caffe.py
# brief: Caffe importer for DagNN and SimpleNN
# author: Karel Lenc and Andrea Vedaldi
# Requires Google Protobuf for Python and SciPy
import sys
import os
import argparse
import code
import re
import numpy as np
from math import floor, ceil
import numpy
from numpy import array
import scipy
import scipy.io
import scipy.misc
import google.protobuf.text_format
from ast import literal_eval as make_tuple
from layers import *
# --------------------------------------------------------------------
# Check NumPy version
# --------------------------------------------------------------------
def versiontuple(version):
return tuple(map(int, (version.split("."))))
min_numpy_version = "1.7.0"
if versiontuple(numpy.version.version) < versiontuple(min_numpy_version):
print 'Unsupported numpy version ({}), must be >= {}'.format(numpy.version.version,
min_numpy_version)
sys.exit(0)
# --------------------------------------------------------------------
# Helper functions
# --------------------------------------------------------------------
def find(seq, name):
for item in seq:
if item.name == name:
return item
return None
def blobproto_to_array(blob):
"""Convert a Caffe Blob to a numpy array.
It also reverses the order of all dimensions to [width, height,
channels, instance].
"""
dims = []
if hasattr(blob, 'shape'):
dims = tolist(blob.shape.dim)
if not dims:
dims = [blob.num, blob.channels, blob.height, blob.width]
return np.array(blob.data,dtype='float32').reshape(dims).transpose()
def dict_to_struct_array(d):
if not d:
return np.zeros((0,))
dt=[(x,object) for x in d.keys()]
y = np.empty((1,),dtype=dt)
for x in d.keys():
y[x][0] = d[x]
return y
def tolist(x):
"Convert x to a Python list. x can be a Protobuf container, a list or tuple, or scalar"
if isinstance(x,google.protobuf.internal.containers.RepeatedScalarFieldContainer):
return [z for z in x]
elif isinstance(x, (list,tuple)):
return [z for z in x]
else:
return [x]
def escape(name):
return name.replace('-','_')
# --------------------------------------------------------------------
# Parse options
# --------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Convert a Caffe CNN into a MATLAB structure.')
parser.add_argument('caffe_proto',
type=argparse.FileType('rb'),
help='The Caffe CNN parameter file (ASCII .proto)')
parser.add_argument('--caffe-data',
type=argparse.FileType('rb'),
help='The Caffe CNN data file (binary .proto)')
parser.add_argument('output',
type=argparse.FileType('w'),
help='Output MATLAB file')
parser.add_argument('--full-image-size',
type=str,
nargs='?',
default=None,
help='Size of the full image')
parser.add_argument('--average-image',
type=argparse.FileType('rb'),
nargs='?',
help='Average image')
parser.add_argument('--average-value',
type=str,
nargs='?',
default=None,
help='Average image value')
parser.add_argument('--synsets',
type=argparse.FileType('r'),
nargs='?',
help='Synset file (ASCII)')
parser.add_argument('--class-names',
type=str,
nargs='?',
help='Class names')
parser.add_argument('--caffe-variant',
type=str,
nargs='?',
default='caffe',
help='Variant of Caffe software (use ? to get a list)')
parser.add_argument('--transpose',
dest='transpose',
action='store_true',
help='Transpose CNN in a sane MATLAB format')
parser.add_argument('--no-transpose',
dest='transpose',
action='store_false',
help='Do not transpose CNN')
parser.add_argument('--color-format',
dest='color_format',
default='bgr',
action='store',
help='Set the color format used by the network: ''rgb'' or ''bgr'' (default)')
parser.add_argument('--preproc',
type=str,
nargs='?',
default='caffe',
help='Variant of image preprocessing to use (use ? to get a list)')
parser.add_argument('--simplify',
dest='simplify',
action='store_true',
help='Apply simplifications')
parser.add_argument('--no-simplify',
dest='simplify',
action='store_false',
help='Do not apply simplifications')
parser.add_argument('--remove-dropout',
dest='remove_dropout',
action='store_true',
help='Remove dropout layers')
parser.add_argument('--no-remove-dropout',
dest='remove_dropout',
action='store_false',
help='Do not remove dropout layers')
parser.add_argument('--remove-loss',
dest='remove_loss',
action='store_true',
help='Remove loss layers')
parser.add_argument('--no-remove-loss',
dest='remove_loss',
action='store_false',
help='Do not remove loss layers')
parser.add_argument('--append-softmax',
dest='append_softmax',
action='append',
default=[],
help='Add a softmax layer after the specified layer')
parser.add_argument('--output-format',
dest='output_format',
default='dagnn',
help='Either ''dagnn'' or ''simplenn''')
parser.set_defaults(transpose=True)
parser.set_defaults(remove_dropout=False)
parser.set_defaults(remove_loss=False)
parser.set_defaults(simplify=True)
args = parser.parse_args()
print 'Caffe varaint set to', args.caffe_variant
if args.caffe_variant == 'vgg-caffe':
import proto.vgg_caffe_pb2 as caffe_pb2
elif args.caffe_variant == 'caffe-old':
import proto.caffe_old_pb2 as caffe_pb2
elif args.caffe_variant == 'caffe':
import proto.caffe_pb2 as caffe_pb2
elif args.caffe_variant == 'caffe_0115':
import proto.caffe_0115_pb2 as caffe_pb2
elif args.caffe_variant == 'caffe_6e3916':
import proto.caffe_6e3916_pb2 as caffe_pb2
elif args.caffe_variant == 'caffe_b590f1d':
import proto.caffe_b590f1d_pb2 as caffe_pb2
elif args.caffe_variant == 'caffe_fastrcnn':
import proto.caffe_fastrcnn_pb2 as caffe_pb2
elif args.caffe_variant == '?':
print 'Supported variants: caffe, vgg-caffe, caffe-old, caffe_0115, caffe_6e3916, caffe_b590f1d, caffe_fastrcnn'
sys.exit(0)
else:
print 'Unknown Caffe variant', args.caffe_variant
sys.exit(1)
if args.preproc == '?':
print 'Preprocessing variants: caffe, vgg, fcn'
sys.exit(0)
if args.preproc not in ['caffe', 'vgg-caffe', 'fcn']:
print 'Unknown preprocessing variant', args.preproc
sys.exit(1)
# --------------------------------------------------------------------
# Helper functions
# --------------------------------------------------------------------
def keyboard(banner=None):
''' Function that mimics the matlab keyboard command '''
# use exception trick to pick up the current frame
try:
raise None
except:
frame = sys.exc_info()[2].tb_frame.f_back
print "# Use quit() to exit :) Happy debugging!"
# evaluate commands in current namespace
namespace = frame.f_globals.copy()
namespace.update(frame.f_locals)
try:
code.interact(banner=banner, local=namespace)
except SystemExit:
return
def bilinear_interpolate(im, x, y):
x = np.asarray(x)
y = np.asarray(y)
x0 = np.floor(x).astype(int)
x1 = x0 + 1
y0 = np.floor(y).astype(int)
y1 = y0 + 1
x0 = np.clip(x0, 0, im.shape[1]-1);
x1 = np.clip(x1, 0, im.shape[1]-1);
y0 = np.clip(y0, 0, im.shape[0]-1);
y1 = np.clip(y1, 0, im.shape[0]-1);
Ia = im[ y0, x0 ]
Ib = im[ y1, x0 ]
Ic = im[ y0, x1 ]
Id = im[ y1, x1 ]
wa = (1-x+x0) * (1-y+y0)
wb = (1-x+x0) * (y-y0)
wc = (x-x0) * (1-y+y0)
wd = (x-x0) * (y-y0)
wa = wa.reshape(x.shape[0], x.shape[1], 1)
wb = wb.reshape(x.shape[0], x.shape[1], 1)
wc = wc.reshape(x.shape[0], x.shape[1], 1)
wd = wd.reshape(x.shape[0], x.shape[1], 1)
return wa*Ia + wb*Ib + wc*Ic + wd*Id
# Get the parameters for a layer from Caffe's proto entries
def getopts(layer, name):
if hasattr(layer, name):
return getattr(layer, name)
else:
# Older Caffe proto formats did not have sub-structures for layer
# specific parameters but mixed everything up! This falls back to
# that situation when fetching the parameters.
return layer
# --------------------------------------------------------------------
# Load average image
# --------------------------------------------------------------------
average_image = None
resize_average_image = False
if args.average_image:
print 'Loading average image from {}'.format(args.average_image.name)
resize_average_image = True # in case different from data size
avgim_nm, avgim_ext = os.path.splitext(args.average_image.name)
if avgim_ext == '.binaryproto':
blob=caffe_pb2.BlobProto()
blob.MergeFromString(args.average_image.read())
average_image = blobproto_to_array(blob).astype('float32')
average_image = np.squeeze(average_image,3)
if args.transpose and average_image is not None:
average_image = average_image.transpose([1,0,2])
average_image = average_image[:,:,: : -1] # to RGB
elif avgim_ext == '.mat':
avgim_data = scipy.io.loadmat(args.average_image)
average_image = avgim_data['mean_img']
else:
print 'Unsupported average image format {}'.format(avgim_ext)
if args.average_value:
rgb = make_tuple(args.average_value)
print 'Using average image value', rgb
# this will be resized later to a constant image
average_image = np.array(rgb,dtype=float).reshape(1,1,3,order='F')
resize_average_image = False
# --------------------------------------------------------------------
# Load ImageNet synseths (if any)
# --------------------------------------------------------------------
synsets_wnid=None
synsets_name=None
if args.synsets:
print 'Loading synsets from {}'.format(args.synsets.name)
r=re.compile('(?P<wnid>n[0-9]{8}?) (?P<name>.*)')
synsets_wnid=[]
synsets_name=[]
for line in args.synsets:
match = r.match(line)
synsets_wnid.append(match.group('wnid'))
synsets_name.append(match.group('name'))
if args.class_names:
synsets_wnid=list(make_tuple(args.class_names))
synsets_name=synsets_wnid
# --------------------------------------------------------------------
# Load layers
# --------------------------------------------------------------------
# Caffe stores the network structure and data into two different files
# We load them both and merge them into a single MATLAB structure
net=caffe_pb2.NetParameter()
data=caffe_pb2.NetParameter()
print 'Loading Caffe CNN structure from {}'.format(args.caffe_proto.name)
google.protobuf.text_format.Merge(args.caffe_proto.read(), net)
if args.caffe_data:
print 'Loading Caffe CNN parameters from {}'.format(args.caffe_data.name)
data.MergeFromString(args.caffe_data.read())
# --------------------------------------------------------------------
# Read layers in a CaffeModel object
# --------------------------------------------------------------------
if args.caffe_variant in ['caffe_b590f1d', 'caffe_fastrcnn']:
layers_list = net.layer
data_layers_list = data.layer
else:
layers_list = net.layers
data_layers_list = data.layers
print 'Converting {} layers'.format(len(layers_list))
cmodel = CaffeModel()
for layer in layers_list:
# Depending on how old the proto-buf, the top and bottom parameters
# are found at a different level than the others
top = layer.top
bottom = layer.bottom
if args.caffe_variant in ['vgg-caffe', 'caffe-old']:
layer = layer.layer
# get the type of layer
# depending on the Caffe variant, this is a string or a numeric
# ID, which we convert back to a string
ltype = layer.type
if not isinstance(ltype, basestring): ltype = layers_type[ltype]
print 'Added layer \'{}\' ({})'.format(ltype, layer.name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if ltype in ['conv', 'deconvolution', 'Convolution', 'Deconvolution']:
opts = getopts(layer, 'convolution_param')
if hasattr(opts, 'kernelsize'):
kernel_size = opts.kernelsize
else:
kernel_size = opts.kernel_size
if hasattr(opts, 'bias_term'):
bias_term = opts.bias_term
else:
bias_term = True
if hasattr(opts, 'dilation'):
dilation = opts.dilation
else:
dilation = 1
if ltype in ['conv', 'Convolution']:
clayer = CaffeConv(layer.name, bottom, top,
kernel_size = tolist(kernel_size),
bias_term = bias_term,
num_output = opts.num_output,
group = opts.group,
dilation = dilation,
stride = tolist(opts.stride),
pad = tolist(opts.pad))
else:
clayer = CaffeDeconvolution(layer.name, bottom, top,
kernel_size = tolist(kernel_size),
bias_term = bias_term,
num_output = opts.num_output,
group = opts.group,
dilation = dilation,
stride = tolist(opts.stride),
pad = tolist(opts.pad))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['innerproduct', 'inner_product', 'InnerProduct']:
opts = getopts(layer, 'inner_product_param')
if hasattr(opts, 'bias_term'):
bias_term = opts.bias_term
else:
bias_term = True
if hasattr(opts, 'axis'):
axis = opts.axis
else:
axis = 1
clayer = CaffeInnerProduct(layer.name, bottom, top,
num_output = opts.num_output,
bias_term = bias_term,
axis = axis)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['relu', 'ReLU']:
clayer = CaffeReLU(layer.name, bottom, top)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['crop', 'Crop']:
clayer = CaffeCrop(layer.name, bottom, top)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['lrn', 'LRN']:
opts = getopts(layer, 'lrn_param')
local_size = float(opts.local_size)
alpha = float(opts.alpha)
beta = float(opts.beta)
kappa = opts.k if hasattr(opts,'k') else 1.
regions = ['across_channels', 'within_channel']
if hasattr(opts, 'norm_region'):
norm_region = opts.norm_region
else:
norm_region = 0
clayer = CaffeLRN(layer.name, bottom, top,
local_size = local_size,
alpha = alpha,
beta = beta,
norm_region = regions[norm_region],
kappa = kappa)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['pool', 'Pooling']:
opts = getopts(layer, 'pooling_param')
if hasattr(layer, 'kernelsize'):
kernel_size = opts.kernelsize
else:
kernel_size = opts.kernel_size
clayer = CaffePooling(layer.name, bottom, top,
method = ['max', 'avg'][opts.pool],
pad = tolist(opts.pad),
kernel_size = tolist(kernel_size),
stride = tolist(opts.stride))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['dropout', 'Dropout']:
opts = getopts(layer, 'dropout_param')
clayer = CaffeDropout(layer.name, bottom, top,
opts.dropout_ratio)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['softmax', 'Softmax']:
clayer = CaffeSoftMax(layer.name, bottom, top)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['softmax_loss', 'SoftmaxLoss']:
clayer = CaffeSoftMaxLoss(layer.name, bottom, top)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['concat', 'Concat']:
opts = getopts(layer, 'concat_param')
clayer = CaffeConcat(layer.name, bottom, top,
3 - opts.concat_dim) # todo: depreceted in recent Caffes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['Scale']:
opts = getopts(layer, 'scale_param')
clayer = CaffeScale(layer.name, bottom, top,
axis = opts.axis,
num_axes = opts.num_axes,
bias_term = opts.bias_term)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['BatchNorm']:
opts = getopts(layer, 'batch_norm_param')
clayer = CaffeBatchNorm(layer.name, bottom, top,
use_global_stats = opts.use_global_stats,
moving_average_fraction = opts.moving_average_fraction,
eps = opts.eps)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['eltwise', 'Eltwise']:
opts = getopts(layer, 'eltwise_param')
operations = ['prod', 'sum', 'max']
clayer = CaffeEltWise(layer.name, bottom, top,
operation = operations[opts.operation],
coeff = opts.coeff,
stable_prod_grad = opts.stable_prod_grad)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['data', 'Data']:
opts = getopts(layer, 'eltwise_param')
operations = ['prod', 'sum', 'max']
clayer = CaffeData(layer.name, bottom, top,
operation = operations[opts.operation],
coeff = opts.coeff,
stable_prod_grad = opts.stable_prod_grad)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['roipooling', 'ROIPooling']:
opts = getopts(layer, 'roi_pooling_param')
clayer = CaffeROIPooling(layer.name, bottom, top,
pooled_w = opts.pooled_w,
pooled_h = opts.pooled_h,
spatial_scale = opts.spatial_scale)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif ltype in ['accuracy', 'Accuracy']:
continue
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
else:
print 'Warning: unknown layer type', ltype
continue
if clayer is not None:
clayer.model = cmodel
cmodel.addLayer(clayer)
# Fill parameters
for dlayer in data_layers_list:
if args.caffe_variant in ['vgg-caffe', 'caffe-old']:
dlayer = dlayer.layer
if dlayer.name == layer.name:
for i, blob in enumerate(dlayer.blobs):
blob = blobproto_to_array(blob).astype('float32')
print ' + parameter \'%s\' <-- blob%s' % (clayer.params[i], blob.shape)
clayer.setBlob(cmodel, i, blob)
# --------------------------------------------------------------------
# Get the size of the network variables
# --------------------------------------------------------------------
# Get the sizes of the network inputs
for i, inputVarName in enumerate(net.input):
if hasattr(net, 'input_shape') and net.input_shape:
shape = net.input_shape[i].dim._values
# ensure that shape is a list of dimensions
if isinstance(shape, caffe_pb2.BlobShape):
# shape.tolist() may not preserve the order of dimensions
shape = shape.dim._values
shape.reverse()
else:
shape = [net.input_dim[k + 4*i] for k in [3,2,1,0]]
cmodel.vars[inputVarName].shape = shape
print ' c- Input \'{}\' is {}'.format(inputVarName, shape)
# --------------------------------------------------------------------
# Sanitize
# --------------------------------------------------------------------
# Rename layers, parametrs, and variables if they contain symbols that
# are incompatible with MatConvNet.
layerNames = cmodel.layers.keys()
for name in layerNames:
ename = escape(name)
if ename == name: continue
# ensure unique
while cmodel.layers.has_key(ename): ename = ename + 'x'
print "Renaming layer {} to {}".format(name, ename)
cmodel.renameLayer(name, ename)
varNames = cmodel.vars.keys()
for name in varNames:
ename = escape(name)
if ename == name: continue
while cmodel.vars.has_key(ename): ename = ename + 'x'
print "Renaming variable {} to {}".format(name, ename)
cmodel.renameVar(name, ename)
parNames = cmodel.params.keys()
for name in parNames:
ename = escape(name)
if ename == name: continue
while cmodel.params.has_key(ename): ename = ename + 'x'
print "Renaming parameter {} to {}".format(name, ename)
cmodel.renameParam(name, ename)
# Split in-place layers. MatConvNet handles such optimizations
# differently.
for layer in cmodel.layers.itervalues():
if len(layer.inputs[0]) >= 1 and \
len(layer.outputs[0]) >= 1 and \
layer.inputs[0] == layer.outputs[0]:
name = layer.inputs[0]
ename = layer.inputs[0]
while cmodel.vars.has_key(ename): ename = ename + 'x'
print "Splitting in-place layer: renaming variable {} to {}".format(name, ename)
cmodel.addVar(ename)
cmodel.renameVar(name, ename, afterLayer=layer.name)
layer.inputs[0] = name
layer.outputs[0] = ename
# --------------------------------------------------------------------
# Get variable sizes
# --------------------------------------------------------------------
# Get the size of all other variables. This information is required
# for some special layer conversions:
#
# * For Pooling layers, fix incompatibility between padding in
# MatConvNet and Caffe.
#
# * For Crop layers (in FCNs), determine the amount of crop (in Caffe
# this is done at run time).
# Unflatten ROIPooling. ROIPooling will produce a H x W array instead
# of a stacked version of the same. The reshape operation below will
# convert the following InnerProduct layers in corresponding
# convolitions. This works well with transposition later.
layerNames = cmodel.layers.keys()
for name in layerNames:
layer = cmodel.layers[name]
if type(layer) is CaffeROIPooling:
childrenNames = cmodel.getLayersWithInput(layer.outputs[0])
for childName in childrenNames:
child = cmodel.layers[childName]
if type(child) is not CaffeInnerProduct:
print "Error: cannot unflatten ROIPooling if this is not followed only InnerProduct layers"
sys.exit(1)
layer.flatten = False
cmodel.reshape()
# --------------------------------------------------------------------
# Edit
# --------------------------------------------------------------------
# Remove dropout
if args.remove_dropout:
layerNames = cmodel.layers.keys()
for name in layerNames:
layer = cmodel.layers[name]
if type(layer) is CaffeDropout:
print "Removing dropout layer ", name
cmodel.renameVar(layer.outputs[0], layer.inputs[0])
cmodel.removeLayer(name)
# Remove loss
if args.remove_loss:
layerNames = cmodel.layers.keys()
for name in layerNames:
layer = cmodel.layers[name]
if type(layer) is CaffeSoftMaxLoss:
print "Removing loss layer ", name
cmodel.renameVar(layer.outputs[0], layer.inputs[0])
cmodel.removeLayer(name)
# Append softmax
for i, name in enumerate(args.append_softmax):
# search for the layer to append SoftMax to
if not cmodel.layers.has_key(name):
print 'Cannot append softmax to layer {} as no such layer could be found'.format(name)
sys.exit(1)
if len(args.append_softmax) > 1:
layerName = 'softmax' + (l + 1)
outputs= ['prob' + (l + 1)]
else:
layerName = 'softmax'
outputs = ['prob']
cmodel.addLayer(CaffeSoftMax(layerName,
cmodel.layers[name].outputs[0:1],
outputs))
# Simplifications
if args.simplify:
# Merge BatchNorm followed by Scale
layerNames = cmodel.layers.keys()
for name in layerNames:
layer = cmodel.layers[name]
if type(layer) is CaffeScale:
if len(layer.inputs) > 1:
continue # the scaling factor is an input, not a parameter
if len(cmodel.getLayersWithInput(layer.inputs[0])) > 1:
continue # other layers use the same input
parentNames = cmodel.getLayersWithOutput(layer.inputs[0])
if len(parentNames) != 1: continue
parent = cmodel.layers[parentNames[0]]
if type(parent) is not CaffeBatchNorm: continue
smult = cmodel.params[layer.params[0]]
sbias = cmodel.params[layer.params[1]]
mult = cmodel.params[parent.params[0]]
bias = cmodel.params[parent.params[1]]
# simplification can only occur if scale layer is 1x1xC
if smult.shape[0] != 1 or smult.shape[1] != 1: continue
C = smult.shape[2]
mult.value = np.reshape(smult.value, (C,)) * mult.value
bias.value = np.reshape(smult.value, (C,)) * bias.value + \
np.reshape(sbias.value, (C,))
print "Simplifying scale layer \'{}\'".format(name)
cmodel.renameVar(layer.outputs[0], layer.inputs[0])
cmodel.removeLayer(name)
# --------------------------------------------------------------------
# Transposition
# --------------------------------------------------------------------
#
# There are a few different conventions in MATLAB and Caffe:
#
# * In MATLAB, the frist spatial dimension is Y (vertical) followed by
# X (horizontal), whereas in Caffe the opposite is true.
#
# * In MATLAB, images are stored in RGB format, whereas Caffe uses
# BGR.
#
# * In MatConvNet, the first spatial coordinate is Y, whereas in Caffe
# it is X. This affects layers such as ROI pooling.
#
# These conventions means that, if the network is directly saved in
# MCN format, then images and spatial coordinates are transposed as
# just described. While this is not a deal breaker, it is
# inconvenient.
#
# Thus we transpose all X,Y spatial dimensions in the network. For now,
# this is partially heuristic. In the future, we should add adapter layer to
# convert from MCN inputs and outputs to Caffe input and outputs and then
# simplity those away using graph transformations.
# Mark variables:
# - requiring BGR -> RGB conversion
# - requiring XY transposition
for i, inputVarName in enumerate(net.input):
if inputVarName == 'data' or i == 0:
if cmodel.vars[inputVarName].shape[2] == 3:
cmodel.vars[inputVarName].bgrInput = (args.color_format == 'bgr')
if not inputVarName == 'rois':
cmodel.vars[inputVarName].transposable = True
else:
cmodel.vars[inputVarName].transposable = False
# Apply transformations
if args.transpose: cmodel.transpose()
cmodel.display()
# --------------------------------------------------------------------
# Normalization
# --------------------------------------------------------------------
minputs = np.empty(shape=[0,], dtype=minputdt)
# Determine the size of the inputs and input image (dataShape)
for i, inputVarName in enumerate(net.input):
shape = cmodel.vars[inputVarName].shape
# add metadata
minput = np.empty(shape=[1,], dtype=minputdt)
minput['name'][0] = inputVarName
minput['size'][0] = row(shape)
minputs = np.append(minputs, minput, axis=0)
# heuristic: the first input or 'data' is the input image
if i == 0 or inputVarName == 'data':
dataShape = shape
print "Input image data tensor shape:", dataShape
fullImageSize = [256, 256]
if args.full_image_size:
fullImageSize = list(make_tuple(args.full_image_size))
print "Full input image size:", fullImageSize
if average_image is not None:
if resize_average_image:
x = numpy.linspace(0, average_image.shape[1]-1, dataShape[0])
y = numpy.linspace(0, average_image.shape[0]-1, dataShape[1])
x, y = np.meshgrid(x, y, sparse=False, indexing='xy')
average_image = bilinear_interpolate(average_image, x, y)
else:
average_image = np.zeros((0,),dtype='float')
mnormalization = {
'imageSize': row(dataShape),
'averageImage': average_image,
'interpolation': 'bilinear',
'keepAspect': True,
'border': row([0,0]),
'cropSize': 1.0}
if len(fullImageSize) == 1:
fw = max(fullImageSize[0],dataShape[1])
fh = max(fullImageSize[0],dataShape[0])
mnormalization['border'] = max([float(fw - dataShape[1]),
float(fh - dataShape[0])])
mnormalization['cropSize'] = min([float(dataShape[1]) / fw,
float(dataShape[0]) / fh])
else:
fw = max(fullImageSize[0],dataShape[1])
fh = max(fullImageSize[1],dataShape[0])
mnormalization['border'] = row([float(fw - dataShape[1]),
float(fh - dataShape[0])])
mnormalization['cropSize'] = row([float(dataShape[1]) / fw,
float(dataShape[0]) / fh])
if args.caffe_variant == 'caffe_fastrcnn':
mnormalization['interpolation'] = 'bilinear'
if args.preproc == 'caffe':
mnormalization['interpolation'] = 'bicubic'
mnormalization['keepAspect'] = False
print 'Input image border: ', mnormalization['border']
print 'Full input image relative crop size: ', mnormalization['cropSize']
# --------------------------------------------------------------------
# Classes
# --------------------------------------------------------------------
mclassnames = np.empty((0,), dtype=np.object)
mclassdescriptions = np.array((0,), dtype=np.object)
if synsets_wnid:
mclassnames = np.array(synsets_wnid, dtype=np.object).reshape(1,-1)
if synsets_name:
mclassdescriptions = np.array(synsets_name, dtype=np.object).reshape(1,-1)
mclasses = dictToMatlabStruct({'name': mclassnames,
'description': mclassdescriptions})
# --------------------------------------------------------------------
# Convert to MATLAB
# --------------------------------------------------------------------
# net.meta
mmeta = dictToMatlabStruct({'inputs': minputs.reshape(1,-1),
'normalization': mnormalization,
'classes': mclasses})
if args.output_format == 'dagnn':
# This object should stay a dictionary and not a NumPy array due to
# how NumPy saves to MATLAB
mnet = {'layers': np.empty(shape=[0,], dtype=mlayerdt),
'params': np.empty(shape=[0,], dtype=mparamdt),
'meta': mmeta}
for layer in cmodel.layers.itervalues():
mnet['layers'] = np.append(mnet['layers'], layer.toMatlab(), axis=0)
for param in cmodel.params.itervalues():
mnet['params'] = np.append(mnet['params'], param.toMatlab(), axis=0)
# to row
mnet['layers'] = mnet['layers'].reshape(1,-1)
mnet['params'] = mnet['params'].reshape(1,-1)
elif args.output_format == 'simplenn':
# This object should stay a dictionary and not a NumPy array due to
# how NumPy saves to MATLAB
mnet = {'layers': np.empty(shape=[0,], dtype=np.object),
'meta': mmeta}
for layer in cmodel.layers.itervalues():
mnet['layers'] = np.append(mnet['layers'], np.object)
mnet['layers'][-1] = dictToMatlabStruct(layer.toMatlabSimpleNN())
# to row
mnet['layers'] = mnet['layers'].reshape(1,-1)
# --------------------------------------------------------------------
# Save output
# --------------------------------------------------------------------
print 'Saving network to {}'.format(args.output.name)
scipy.io.savemat(args.output, mnet, oned_as='column')
|
synconics/odoo | refs/heads/8.0 | addons/payment_authorize/tests/__init__.py | 224 | # -*- coding: utf-8 -*-
from openerp.addons.payment_authorize.tests import test_authorize
|
RealImpactAnalytics/airflow | refs/heads/master | airflow/hooks/base_hook.py | 3 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
@provide_session
def _get_connections_from_db(cls, conn_id, session=None):
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
log = LoggingMixin().log
log.info("Using connection to: %s", conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
|
diana-hep/femtocode | refs/heads/master | lang/femtocode/thirdparty/meta/decompiler/__init__.py | 1 | '''
Decompiler module.
This module can decompile arbitrary code objects into a python ast.
'''
from ..decompiler.instructions import make_module, make_function
import _ast
import struct
import time
import sys
import marshal
def decompile_func(func):
'''
Decompile a function into ast.FunctionDef node.
:param func: python function (can not be a built-in)
:return: ast.FunctionDef instance.
'''
if hasattr(func, 'func_code'):
code = func.func_code
else:
code = func.__code__
# For python 3
# defaults = func.func_defaults if sys.version_info.major < 3 else func.__defaults__
# if defaults:
# default_names = code.co_varnames[:code.co_argcount][-len(defaults):]
# else:
# default_names = []
# defaults = [_ast.Name(id='%s_default' % name, ctx=_ast.Load() , lineno=0, col_offset=0) for name in default_names]
ast_node = make_function(code, defaults=[], lineno=code.co_firstlineno)
return ast_node
def compile_func(ast_node, filename, globals, **defaults):
'''
Compile a function from an ast.FunctionDef instance.
:param ast_node: ast.FunctionDef instance
:param filename: path where function source can be found.
:param globals: will be used as func_globals
:return: A python function object
'''
funcion_name = ast_node.name
module = _ast.Module(body=[ast_node])
ctx = {'%s_default' % key : arg for key, arg in defaults.items()}
code = compile(module, filename, 'exec')
eval(code, globals, ctx)
function = ctx[funcion_name]
return function
#from imp import get_magic
#
#def extract(binary):
#
# if len(binary) <= 8:
# raise Exception("Binary pyc must be greater than 8 bytes (got %i)" % len(binary))
#
# magic = binary[:4]
# MAGIC = get_magic()
#
# if magic != MAGIC:
# raise Exception("Python version mismatch (%r != %r) Is this a pyc file?" % (magic, MAGIC))
#
# modtime = time.asctime(time.localtime(struct.unpack('i', binary[4:8])[0]))
#
# code = marshal.loads(binary[8:])
#
# return modtime, code
def decompile_pyc(bin_pyc, output=sys.stdout):
'''
decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects
'''
from meta.asttools import python_source
bin = bin_pyc.read()
code = marshal.loads(bin[8:])
mod_ast = make_module(code)
python_source(mod_ast, file=output)
|
oleksa-pavlenko/gae-django-project-template | refs/heads/master | django/contrib/staticfiles/urls.py | 78 | from django.conf import settings
from django.conf.urls.static import static
urlpatterns = []
def staticfiles_urlpatterns(prefix=None):
"""
Helper function to return a URL pattern for serving static files.
"""
if prefix is None:
prefix = settings.STATIC_URL
return static(prefix, view='django.contrib.staticfiles.views.serve')
# Only append if urlpatterns are empty
if settings.DEBUG and not urlpatterns:
urlpatterns += staticfiles_urlpatterns()
|
caseyfw/slack-sounds | refs/heads/master | slackclient/_util.py | 27 | class SearchList(list):
def find(self, name):
items = []
for child in self:
if child.__class__ == self.__class__:
items += child.find(name)
else:
if child == name:
items.append(child)
if len(items) == 1:
return items[0]
elif items != []:
return items
|
mlperf/training_results_v0.7 | refs/heads/master | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/tensor_intrin.py | 2 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor intrinsics"""
from __future__ import absolute_import as _abs
from . import _api_internal
from . import api as _api
from . import expr as _expr
from . import stmt as _stmt
from . import make as _make
from . import tensor as _tensor
from . import schedule as _schedule
from .build_module import current_build_config
from ._ffi.node import NodeBase, register_node
def _get_region(tslice):
region = []
for idx in tslice.indices:
if isinstance(idx, slice):
assert idx.step is None
region.append(_api.Range(idx.start, idx.stop))
else:
if isinstance(idx, _schedule.IterVar):
begin = idx.var
else:
begin = idx
region.append(_make.range_by_min_extent(begin, 1))
return region
@register_node
class TensorIntrin(NodeBase):
"""Tensor intrinsic functions for certain computation.
See Also
--------
decl_tensor_intrin: Construct a TensorIntrin
"""
def __call__(self, *args, **kwargs):
tensors = [x.tensor for x in args if isinstance(x, _tensor.TensorSlice)]
scalar_inputs = [x for x in args if not isinstance(x, _tensor.TensorSlice)]
regions = [_get_region(x) for x in args if isinstance(x, _tensor.TensorSlice)]
reduce_axis = []
if "reduce_axis" in kwargs:
reduce_axis = kwargs["reduce_axis"]
if not isinstance(reduce_axis, (list, tuple)):
reduce_axis = [reduce_axis]
reduce_axis = _api.convert(reduce_axis)
if scalar_inputs:
scalar_inputs = _api.convert(scalar_inputs)
return _api_internal._TensorIntrinCall(self, tensors, regions, reduce_axis, scalar_inputs)
def decl_tensor_intrin(op,
fcompute,
name="tensor_intrin",
binds=None, scalar_params=None):
"""Declare a tensor intrinsic function.
Parameters
----------
op: Operation
The symbolic description of the intrinsic operation
fcompute: lambda function of inputs, outputs-> stmt
Specifies the IR statement to do the computation.
See the following note for function signature of fcompute
.. note::
**Parameters**
- **ins** (list of :any:`Buffer`) - Placeholder for each inputs
- **outs** (list of :any:`Buffer`) - Placeholder for each outputs
**Returns**
- **stmt** (:any:`Stmt`, or tuple of three stmts)
- If a single stmt is returned, it represents the body
- If tuple of three stmts are returned they corresponds to body,
reduce_init, reduce_update
name: str, optional
The name of the intrinsic.
binds: dict of :any:`Tensor` to :any:`Buffer`, optional
Dictionary that maps the Tensor to Buffer which specified the data layout
requirement of the function. By default, a new compact buffer is created
for each tensor in the argument.
scalar_params: a list of variables used by op, whose values will be passed
as scalar_inputs when the tensor intrinsic is called.
Returns
-------
intrin: TensorIntrin
A TensorIntrin that can be used in tensorize schedule.
"""
if not isinstance(op, _tensor.Operation):
raise TypeError("expect Operation")
inputs = op.input_tensors
binds = binds if binds else {}
tensors = [x for x in inputs]
for i in range(op.num_outputs):
tensors.append(op.output(i))
binds_list = []
for t in inputs:
if not isinstance(t.op, _tensor.PlaceholderOp):
raise ValueError("Do not yet support composition op")
cfg = current_build_config()
for t in tensors:
buf = (binds[t] if t in binds else
_api.decl_buffer(t.shape, t.dtype, t.op.name,
data_alignment=cfg.data_alignment,
offset_factor=cfg.offset_factor))
binds_list.append(buf)
if scalar_params:
body = fcompute(binds_list[:len(inputs)], binds_list[len(inputs):], scalar_params)
else:
body = fcompute(binds_list[:len(inputs)], binds_list[len(inputs):])
scalar_params = []
if isinstance(body, (_expr.Expr, _stmt.Stmt)):
body = [body]
body = [_make.Evaluate(x) if isinstance(x, _expr.Expr) else x for x in body]
if len(body) < 3:
body += [None] * (3 - len(body))
return _api_internal._TensorIntrin(
name, op, inputs, binds_list, scalar_params, *body)
|
kxliugang/edx-platform | refs/heads/master | lms/djangoapps/bulk_email/migrations/0009_force_unique_course_ids.py | 114 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CourseAuthorization', fields ['course_id']
db.create_unique('bulk_email_courseauthorization', ['course_id'])
def backwards(self, orm):
# Removing unique constraint on 'CourseAuthorization', fields ['course_id']
db.delete_unique('bulk_email_courseauthorization', ['course_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseauthorization': {
'Meta': {'object_name': 'CourseAuthorization'},
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
|
mlibrary/image-conversion-and-validation | refs/heads/master | falcom/test/hamcrest/composed_matcher.py | 2 | # Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
from hamcrest.core.base_matcher import BaseMatcher
class ComposedMatcher (BaseMatcher):
def _matches (self, item):
self.failed_matcher = None
self.mismatch_item = None
self.extra_message = None
for true_item, matcher, extra_message in \
self.__assertion_triples(item):
if not matcher.matches(true_item):
self.failed_matcher = matcher
self.mismatch_item = true_item
self.extra_message = extra_message
return False
return True
def describe_to (self, description):
if self.extra_message is not None:
description.append_text("{} ".format(self.extra_message))
self.failed_matcher.describe_to(description)
def describe_mismatch (self, item, description):
self.failed_matcher.describe_mismatch(self.mismatch_item,
description)
def __assertion_triples (self, item):
return (self.__get_triple(x, item)
for x in self.assertion(item))
def __get_triple (self, possible_tuple, item):
if isinstance(possible_tuple, tuple):
return self.__get_triple_from_tuple(possible_tuple, item)
else:
return item, possible_tuple, None
def __get_triple_from_tuple (self, definite_tuple, item):
if len(definite_tuple) == 3:
return definite_tuple
else:
assert len(definite_tuple) == 2
return definite_tuple + (None,)
|
our-city-app/oca-backend | refs/heads/master | src/rogerthat/restapi/build_api/to.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from mcfw.properties import unicode_property, typed_property, bool_property
from rogerthat.to import TO
class AppDeepLinksTO(TO):
host = unicode_property('host')
path_prefix = unicode_property('path_prefix')
scheme = unicode_property('scheme')
def __init__(self, host=None, path_prefix=None, scheme=None):
self.host = host
self.path_prefix = path_prefix
self.scheme = scheme
class AppBuildInfoTO(TO):
deep_links = typed_property('deep_links', AppDeepLinksTO, True) # type: List[AppDeepLinksTO]
ios_app_id = unicode_property('ios_app_id', default=None)
can_choose_home_screen = bool_property('can_choose_home_screen', default=False)
|
andrewfu0325/gem5-aladdin | refs/heads/ruby | ext/ply/example/BASIC/basinterp.py | 166 | # This file provides the runtime support for running a basic program
# Assumes the program has been parsed using basparse.py
import sys
import math
import random
class BasicInterpreter:
# Initialize the interpreter. prog is a dictionary
# containing (line,statement) mappings
def __init__(self,prog):
self.prog = prog
self.functions = { # Built-in function table
'SIN' : lambda z: math.sin(self.eval(z)),
'COS' : lambda z: math.cos(self.eval(z)),
'TAN' : lambda z: math.tan(self.eval(z)),
'ATN' : lambda z: math.atan(self.eval(z)),
'EXP' : lambda z: math.exp(self.eval(z)),
'ABS' : lambda z: abs(self.eval(z)),
'LOG' : lambda z: math.log(self.eval(z)),
'SQR' : lambda z: math.sqrt(self.eval(z)),
'INT' : lambda z: int(self.eval(z)),
'RND' : lambda z: random.random()
}
# Collect all data statements
def collect_data(self):
self.data = []
for lineno in self.stat:
if self.prog[lineno][0] == 'DATA':
self.data = self.data + self.prog[lineno][1]
self.dc = 0 # Initialize the data counter
# Check for end statements
def check_end(self):
has_end = 0
for lineno in self.stat:
if self.prog[lineno][0] == 'END' and not has_end:
has_end = lineno
if not has_end:
print("NO END INSTRUCTION")
self.error = 1
return
if has_end != lineno:
print("END IS NOT LAST")
self.error = 1
# Check loops
def check_loops(self):
for pc in range(len(self.stat)):
lineno = self.stat[pc]
if self.prog[lineno][0] == 'FOR':
forinst = self.prog[lineno]
loopvar = forinst[1]
for i in range(pc+1,len(self.stat)):
if self.prog[self.stat[i]][0] == 'NEXT':
nextvar = self.prog[self.stat[i]][1]
if nextvar != loopvar: continue
self.loopend[pc] = i
break
else:
print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc])
self.error = 1
# Evaluate an expression
def eval(self,expr):
etype = expr[0]
if etype == 'NUM': return expr[1]
elif etype == 'GROUP': return self.eval(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return -self.eval(expr[2])
elif etype == 'BINOP':
if expr[1] == '+': return self.eval(expr[2])+self.eval(expr[3])
elif expr[1] == '-': return self.eval(expr[2])-self.eval(expr[3])
elif expr[1] == '*': return self.eval(expr[2])*self.eval(expr[3])
elif expr[1] == '/': return float(self.eval(expr[2]))/self.eval(expr[3])
elif expr[1] == '^': return abs(self.eval(expr[2]))**self.eval(expr[3])
elif etype == 'VAR':
var,dim1,dim2 = expr[1]
if not dim1 and not dim2:
if var in self.vars:
return self.vars[var]
else:
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# May be a list lookup or a function evaluation
if dim1 and not dim2:
if var in self.functions:
# A function
return self.functions[var](dim1)
else:
# A list evaluation
if var in self.lists:
dim1val = self.eval(dim1)
if dim1val < 1 or dim1val > len(self.lists[var]):
print("LIST INDEX OUT OF BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.lists[var][dim1val-1]
if dim1 and dim2:
if var in self.tables:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]):
print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.tables[var][dim1val-1][dim2val-1]
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# Evaluate a relational expression
def releval(self,expr):
etype = expr[1]
lhs = self.eval(expr[2])
rhs = self.eval(expr[3])
if etype == '<':
if lhs < rhs: return 1
else: return 0
elif etype == '<=':
if lhs <= rhs: return 1
else: return 0
elif etype == '>':
if lhs > rhs: return 1
else: return 0
elif etype == '>=':
if lhs >= rhs: return 1
else: return 0
elif etype == '=':
if lhs == rhs: return 1
else: return 0
elif etype == '<>':
if lhs != rhs: return 1
else: return 0
# Assignment
def assign(self,target,value):
var, dim1, dim2 = target
if not dim1 and not dim2:
self.vars[var] = self.eval(value)
elif dim1 and not dim2:
# List assignment
dim1val = self.eval(dim1)
if not var in self.lists:
self.lists[var] = [0]*10
if dim1val > len(self.lists[var]):
print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.lists[var][dim1val-1] = self.eval(value)
elif dim1 and dim2:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if not var in self.tables:
temp = [0]*10
v = []
for i in range(10): v.append(temp[:])
self.tables[var] = v
# Variable already exists
if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]):
print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.tables[var][dim1val-1][dim2val-1] = self.eval(value)
# Change the current line number
def goto(self,linenum):
if not linenum in self.prog:
print("UNDEFINED LINE NUMBER %d AT LINE %d" % (linenum, self.stat[self.pc]))
raise RuntimeError
self.pc = self.stat.index(linenum)
# Run it
def run(self):
self.vars = { } # All variables
self.lists = { } # List variables
self.tables = { } # Tables
self.loops = [ ] # Currently active loops
self.loopend= { } # Mapping saying where loops end
self.gosub = None # Gosub return point (if any)
self.error = 0 # Indicates program error
self.stat = list(self.prog) # Ordered list of all line numbers
self.stat.sort()
self.pc = 0 # Current program counter
# Processing prior to running
self.collect_data() # Collect all of the data statements
self.check_end()
self.check_loops()
if self.error: raise RuntimeError
while 1:
line = self.stat[self.pc]
instr = self.prog[line]
op = instr[0]
# END and STOP statements
if op == 'END' or op == 'STOP':
break # We're done
# GOTO statement
elif op == 'GOTO':
newline = instr[1]
self.goto(newline)
continue
# PRINT statement
elif op == 'PRINT':
plist = instr[1]
out = ""
for label,val in plist:
if out:
out += ' '*(15 - (len(out) % 15))
out += label
if val:
if label: out += " "
eval = self.eval(val)
out += str(eval)
sys.stdout.write(out)
end = instr[2]
if not (end == ',' or end == ';'):
sys.stdout.write("\n")
if end == ',': sys.stdout.write(" "*(15-(len(out) % 15)))
if end == ';': sys.stdout.write(" "*(3-(len(out) % 3)))
# LET statement
elif op == 'LET':
target = instr[1]
value = instr[2]
self.assign(target,value)
# READ statement
elif op == 'READ':
for target in instr[1]:
if self.dc < len(self.data):
value = ('NUM',self.data[self.dc])
self.assign(target,value)
self.dc += 1
else:
# No more data. Program ends
return
elif op == 'IF':
relop = instr[1]
newline = instr[2]
if (self.releval(relop)):
self.goto(newline)
continue
elif op == 'FOR':
loopvar = instr[1]
initval = instr[2]
finval = instr[3]
stepval = instr[4]
# Check to see if this is a new loop
if not self.loops or self.loops[-1][0] != self.pc:
# Looks like a new loop. Make the initial assignment
newvalue = initval
self.assign((loopvar,None,None),initval)
if not stepval: stepval = ('NUM',1)
stepval = self.eval(stepval) # Evaluate step here
self.loops.append((self.pc,stepval))
else:
# It's a repeat of the previous loop
# Update the value of the loop variable according to the step
stepval = ('NUM',self.loops[-1][1])
newvalue = ('BINOP','+',('VAR',(loopvar,None,None)),stepval)
if self.loops[-1][1] < 0: relop = '>='
else: relop = '<='
if not self.releval(('RELOP',relop,newvalue,finval)):
# Loop is done. Jump to the NEXT
self.pc = self.loopend[self.pc]
self.loops.pop()
else:
self.assign((loopvar,None,None),newvalue)
elif op == 'NEXT':
if not self.loops:
print("NEXT WITHOUT FOR AT LINE %s" % line)
return
nextvar = instr[1]
self.pc = self.loops[-1][0]
loopinst = self.prog[self.stat[self.pc]]
forvar = loopinst[1]
if nextvar != forvar:
print("NEXT DOESN'T MATCH FOR AT LINE %s" % line)
return
continue
elif op == 'GOSUB':
newline = instr[1]
if self.gosub:
print("ALREADY IN A SUBROUTINE AT LINE %s" % line)
return
self.gosub = self.stat[self.pc]
self.goto(newline)
continue
elif op == 'RETURN':
if not self.gosub:
print("RETURN WITHOUT A GOSUB AT LINE %s" % line)
return
self.goto(self.gosub)
self.gosub = None
elif op == 'FUNC':
fname = instr[1]
pname = instr[2]
expr = instr[3]
def eval_func(pvalue,name=pname,self=self,expr=expr):
self.assign((pname,None,None),pvalue)
return self.eval(expr)
self.functions[fname] = eval_func
elif op == 'DIM':
for vname,x,y in instr[1]:
if y == 0:
# Single dimension variable
self.lists[vname] = [0]*x
else:
# Double dimension variable
temp = [0]*y
v = []
for i in range(x):
v.append(temp[:])
self.tables[vname] = v
self.pc += 1
# Utility functions for program listing
def expr_str(self,expr):
etype = expr[0]
if etype == 'NUM': return str(expr[1])
elif etype == 'GROUP': return "(%s)" % self.expr_str(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return "-"+str(expr[2])
elif etype == 'BINOP':
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
elif etype == 'VAR':
return self.var_str(expr[1])
def relexpr_str(self,expr):
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
def var_str(self,var):
varname,dim1,dim2 = var
if not dim1 and not dim2: return varname
if dim1 and not dim2: return "%s(%s)" % (varname, self.expr_str(dim1))
return "%s(%s,%s)" % (varname, self.expr_str(dim1),self.expr_str(dim2))
# Create a program listing
def list(self):
stat = list(self.prog) # Ordered list of all line numbers
stat.sort()
for line in stat:
instr = self.prog[line]
op = instr[0]
if op in ['END','STOP','RETURN']:
print("%s %s" % (line, op))
continue
elif op == 'REM':
print("%s %s" % (line, instr[1]))
elif op == 'PRINT':
_out = "%s %s " % (line, op)
first = 1
for p in instr[1]:
if not first: _out += ", "
if p[0] and p[1]: _out += '"%s"%s' % (p[0],self.expr_str(p[1]))
elif p[1]: _out += self.expr_str(p[1])
else: _out += '"%s"' % (p[0],)
first = 0
if instr[2]: _out += instr[2]
print(_out)
elif op == 'LET':
print("%s LET %s = %s" % (line,self.var_str(instr[1]),self.expr_str(instr[2])))
elif op == 'READ':
_out = "%s READ " % line
first = 1
for r in instr[1]:
if not first: _out += ","
_out += self.var_str(r)
first = 0
print(_out)
elif op == 'IF':
print("%s IF %s THEN %d" % (line,self.relexpr_str(instr[1]),instr[2]))
elif op == 'GOTO' or op == 'GOSUB':
print("%s %s %s" % (line, op, instr[1]))
elif op == 'FOR':
_out = "%s FOR %s = %s TO %s" % (line,instr[1],self.expr_str(instr[2]),self.expr_str(instr[3]))
if instr[4]: _out += " STEP %s" % (self.expr_str(instr[4]))
print(_out)
elif op == 'NEXT':
print("%s NEXT %s" % (line, instr[1]))
elif op == 'FUNC':
print("%s DEF %s(%s) = %s" % (line,instr[1],instr[2],self.expr_str(instr[3])))
elif op == 'DIM':
_out = "%s DIM " % line
first = 1
for vname,x,y in instr[1]:
if not first: _out += ","
first = 0
if y == 0:
_out += "%s(%d)" % (vname,x)
else:
_out += "%s(%d,%d)" % (vname,x,y)
print(_out)
elif op == 'DATA':
_out = "%s DATA " % line
first = 1
for v in instr[1]:
if not first: _out += ","
first = 0
_out += v
print(_out)
# Erase the current program
def new(self):
self.prog = {}
# Insert statements
def add_statements(self,prog):
for line,stat in prog.items():
self.prog[line] = stat
# Delete a statement
def del_line(self,lineno):
try:
del self.prog[lineno]
except KeyError:
pass
|
bperreault-va/eloworld | refs/heads/master | src/lib/markupsafe/_compat.py | 864 | # -*- coding: utf-8 -*-
"""
markupsafe._compat
~~~~~~~~~~~~~~~~~~
Compatibility module for different Python versions.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
text_type = str
string_types = (str,)
unichr = chr
int_types = (int,)
iteritems = lambda x: iter(x.items())
else:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
int_types = (int, long)
iteritems = lambda x: x.iteritems()
|
mattrobenolt/django | refs/heads/master | tests/save_delete_hooks/models.py | 409 | """
Adding hooks before/after saving and deleting
To execute arbitrary code around ``save()`` and ``delete()``, just subclass
the methods.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.data = []
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.data.append("Before save")
# Call the "real" save() method
super(Person, self).save(*args, **kwargs)
self.data.append("After save")
def delete(self):
self.data.append("Before deletion")
# Call the "real" delete() method
super(Person, self).delete()
self.data.append("After deletion")
|
bassio/omicexperiment | refs/heads/master | omicexperiment/util.py | 1 | import hashlib
import pandas as pd
def parse_fasta_labels(fasta_filepath):
with open(fasta_filepath) as f:
for l in f:
l = l.strip()
if l.startswith(">"):
desc = l[1:]
yield desc
def parse_fasta(fasta_filepath):
def _parse_fasta(fasta):
with open(fasta) as f:
seq = ""
desc = ""
for l in f:
l = l.strip()
if l.startswith(">"):
yield desc, seq
desc = l[1:]
seq = ""
continue
else:
seq = seq + l
continue
yield desc, seq
iter_fasta = iter(_parse_fasta(fasta_filepath))
next(iter_fasta)
for desc, seq in iter_fasta:
yield desc, seq
def parse_fasta_relabel(fasta_filepath, relabel_fn=lambda x:x):
for desc, seq in parse_fasta(fasta_filepath):
yield relabel_fn(desc), seq
def parse_fastq(fastq_filepath):
with open(fastq_filepath) as f:
seq = ""
desc = ""
qual = ""
for l in f:
if l.startswith("@"):
desc = l[1:].strip()
seq = f.readline().strip()
plus = f.readline()
qual = f.readline().strip()
yield desc, seq, qual
else:
raise
def find_sequence_by_label(fasta_filepaths, label):
seq_found = None
if isinstance(fasta_filepaths, str):
fasta_filepaths = [fasta_filepaths]
fasta_filepaths = list(fasta_filepaths)
for fasta_filepath in fasta_filepaths:
for desc, seq in parse_fasta(fasta_filepath):
if desc == label:
seq_found = seq
return seq_found
return None
def find_sequences_for_labels(fasta_filepaths, labels):
seq_found = None
if isinstance(fasta_filepaths, str):
fasta_filepaths = [fasta_filepaths]
fasta_filepaths = list(fasta_filepaths)
lbls = list(labels) #copy list
for fasta_filepath in fasta_filepaths:
for desc, seq in parse_fasta(fasta_filepath):
if desc in lbls:
seq_found = seq
lbls.remove(desc) #remove from search list to avoid duplication
yield (desc, seq_found)
def counts_df_to_repset_fasta(fasta_counts_df, output_fasta, sizes_out=False):
sums_df = fasta_counts_df.sum(axis=1).sort_values(ascending=False)
with open(output_fasta, 'w') as f:
if sizes_out:
for (sha1, seq), size in sums_df.iteritems():
f.write(">{0};size={1};\n".format(sha1,int(size)))
f.write(seq + "\n")
else:
for sha1, seq in sums_df.index:
f.write(">" + sha1 + "\n")
f.write(seq + "\n")
def sha1_to_sequences(sequence_array):
seq_series = pd.Series(sequence_array)
seq_df = pd.DataFrame({'sequence':seq_series})
seq_df['sha1'] = seq_df['sequence'].apply(lambda x: hashlib.sha1(x.encode('utf-8')).hexdigest())
seq_df.set_index('sha1', inplace=True)
return seq_df
def desc_seq_tuples_to_fasta(desc_seq_tuples, filename):
with open(filename, 'w') as f:
for desc, seq in desc_seq_tuples:
print('>' + desc, file=f)
print(seq, file=f)
def dataframe_to_fasta(sequence_df, filename):
with open(filename, 'w') as f:
for row in sequence_df[['sequence']].iterrows():
print('>' + row[0], file=f) #identifier in index
print(row[1][0], file=f) #sequence itself
def dict_to_fasta(sequence_dict, filename):
with open(filename, 'w') as f:
for k, v in sequence_dict.items():
print('>' + k, file=f) #identifier in key
print(v, file=f) #sequence itself is value
def iterable_tuples_to_fasta(sequence_iter, filename):
with open(filename, 'w') as f:
for k, v in sequence_iter:
print('>' + k, file=f) #identifier in key
print(v, file=f) #sequence itself is value
#adapted from sqlalchemy's hybrid extension module:
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
class hybridmethod(object):
"""A decorator which allows definition of a Python object method with both
instance-level and class-level behavior.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
|
GrandmasterK/XScheduler | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.py | 894 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
|
AWNystrom/BloomML | refs/heads/master | bloom_freqmap.py | 1 | """
the squashed counts are stored. So the lookup should look like
c = 1
while c in bf:
c+= 1
return self.decode(c-1)
the increment should look like
def increment(item, by):
#Get the current quantized count
cur = count(item, decode=True)
to = self.encode(cur+by)
for i in xrange(cur+1, to+1):
bf.add(item+'_'_str(i)) #adding quantized counts
"""
#TODO: quantized and binsearch thresh
#Needs this: https://github.com/jaybaird/python-bloomfilter
from pybloom import BloomFilter
from random import random
from time import time
from math import log, floor
import logging
#Needs https://github.com/axiak/pybloomfiltermmap
#10X faster!
#from pybloomfilter import BloomFilter
from lru_cacher import LruCacher
from numpy import median, mean
from code import interact
from scipy.stats.mstats import mquantiles
class BloomFreqMapSet(object):
def __init__(self, num, b, bloom_size=500000, bloom_error=0.001,
cache_size=500, bin_search_lookback=3, quantum_leap=True):
self.num = num
self.b = b
self.bloom_size = bloom_size
self.bloom_error = bloom_error
self.cache_size = cache_size
self.bin_search_lookback = bin_search_lookback
self.quantum_leap = quantum_leap
self.bfms = [BloomFreqMap(b, bloom_size, bloom_error,
cache_size, bin_search_lookback,
quantum_leap) for i in xrange(num)]
def __getitem__(self, item):
counts = [bfm[item] for bfm in self.bfms]
return mquantiles(counts, prob=[0.375])[0]
def increase_count(self, item, by):
for bfm in self.bfms:
for i in xrange(int(by)):
bfm.increase_count(item, 1.0)
def __setitem__(self, item, val):
for bfm in self.bfms:
bfm.__setitem__(item, val)
class BloomFreqMap(object):
def __init__(self, b, bloom_size=500000, bloom_error=0.001,
cache_size=500, bin_search_lookback=3, quantum_leap=True):
"bloom_size: the number of elements that can be stored in the"
" internal bloom filter while keeping the specified error"
" rate."
"bloom_error: the max error rate of the bloom filter so long as"
" at most bloom_size are stored."
"cache_size: the max size of the internal LRU cache."
"base: the base of the logarithm used to quantize the counts."
" lower values have cause more precise counting, but have"
" less compression."
"bin_search_lookback: when a binary search is performed to find"
" the frequency, don't just ensure that we've found a"
" situation in which mid is in the bloom fliter and mid+1"
" isn't, but also check this many to the left of mid."
self.bloom_size = bloom_size
self.bloom_error = bloom_error
self.bf = BloomFilter(capacity=bloom_size, error_rate=bloom_error)
self.cache = LruCacher(cache_size, self.plan_b_count)
self.base = b
self.adjust = quantum_leap
if b is None:
self.encode = lambda n: n
self.decode = lambda n: n
else:
self.encode = lambda c: 1.0 + floor(log(c, b))
self.decode = lambda q: (b**(q-1) + b**q - 1) / 2
#Holds tokens that have at least the threshold number of tokens for which binary
#search is faster than linear scan
self.binsearch_bf = BloomFilter(capacity=bloom_size, error_rate=bloom_error)
self.bin_search_lookback = bin_search_lookback
self.bin_search_cutoff = self.determine_lookup_speed_threshold()
def linear_scan_count(self, item):
bf = self.bf
c = 1
while item+'_'+str(c) in bf:
c += 1
return c-1
def binsearch_count(self, item):
bf = self.bf
if item+'_'+str(1) not in bf:
return 0
#Find upper and lower bounds
c = 1
while item+'_'+str(c) in bf:
c *= 2
upper = c
lower = c/2
while True:
mid = lower + (upper-lower)/2
if item+'_'+str(mid+1) not in bf and \
all(item+'_'+str(i) in bf for i in \
xrange(mid, max(mid-self.bin_search_lookback-1, 0), -1)):
return mid
#Which side to follow?
if item+'_'+str(mid) in bf:
#Go up
lower = mid + 1
else:
upper = mid - 1
#We should never be here
assert 2+2==5
def plan_b_count(self, item):
#See if we've reached the threshold for which a binary search lookup becomes faster
#If so, use that method. Else use a linear scan.
search = self.binsearch_count if item in self.binsearch_bf else self.linear_scan_count
result = search(item)
if result == 0:
return 0
return result
def increase_count(self, item, by):
for i in xrange(int(by)):
self.increment(item, 1.0)
def count(self, item):
result, found_in_cache = self.cache.lookup(item)
return result
def __getitem__(self, item):
return self.decode(self.count(item))
def __setitem__(self, item, val):
cur_count = self.__getitem__(item)
if val < cur_count:
# logging.warning("Cannot decrease count of " + item + " from " + \
# str(cur_count) + " to " + str(val))
return
self.increment(item, val-cur_count)
def increment(self, item, by=1):
"""
Increment the frequency of item by the amount "by" (default 1).
"""
cur_q = self.count(item)
cur_count = self.decode(cur_q)
new_count = cur_count + by
new_q = self.encode(new_count)
quant_inc = new_q-cur_q
for i in xrange(int(cur_q)+1, int(new_q)+1):
self.bf.add(item + '_'+ str(i))
self.cache.update(item, new_q) #Not necessary. Just manually calculate it as (b**new_q + b**(new_q-1) - 1) /2
# print 'inced by', quant_inc
if self.adjust:
actual_new_count = self.__getitem__(item) #Can you get this without calling this function?
# print 'actual_new_count', actual_new_count
# print new_count, actual_new_count, (self.decode(new_q+1) - self.decode(new_q))
p = 1.*(new_count-actual_new_count) / (self.decode(new_q+1) - self.decode(new_q))
# print 'p', p
if random() <= p:
new_q += 1
self.bf.add(item + '_'+ str(int(new_q)))
new_count = self.__getitem__(item) #Can you get this without calling this function?
if new_q >= self.bin_search_cutoff:
self.binsearch_bf.add(item)
self.cache.update(item, new_q)
def determine_lookup_speed_threshold(self):
from time import time
#do each one 5 times
bf = BloomFilter(capacity=self.bloom_size, error_rate=self.bloom_error)
count = 1
repetitions = 5
self_bf_holder = self.bf
self.bf = bf
while True:
bf.add('andrew_' + str(count))
bin_faster_count = 0
for j in xrange(repetitions):
#Linear scan
t1 = time()
self.linear_scan_count('andrew')
t2 = time()
linear_time = t2-t1
t1 = time()
self.binsearch_count('andrew')
t2 = time()
bin_time = t2-t1
bin_faster_count += int(bin_time < linear_time)
if 1.*bin_faster_count / repetitions >= 0.75:
del bf
self.bf = self_bf_holder
return count
count += 1
|
NejcZupec/ggrc-core | refs/heads/develop | src/ggrc_basic_permissions/converters/__init__.py | 256 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
|
batermj/algorithm-challenger | refs/heads/master | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/unittest/test/test_case.py | 2 | import contextlib
import difflib
import pprint
import pickle
import re
import sys
import logging
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from unittest.test.support import (
TestEquality, TestHashing, LoggingResult, LegacyLoggingResult,
ResultWithNoStartTestRunStopTestRun
)
from test.support import captured_stderr
log_foo = logging.getLogger('foo')
log_foobar = logging.getLogger('foo.bar')
log_quux = logging.getLogger('quux')
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
# Order is the following:
# i=1 => subtest failure
# i=2, j=2 => subtest success
# i=2, j=3 => subtest error
# i=3, j=2 => subtest error
# i=3, j=3 => subtest success
# toplevel => error
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
# With a legacy result object (without an addSubTest method),
# text execution stops after the first subtest failure.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown']
+ 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
# With a legacy result, only the whole test success is recorded.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSuccess', 'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
# Ensure proper test flow with subtests and failfast (issue #22894)
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**5
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**4)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**6)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertEqual_shorten(self):
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p))
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d))
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testAssertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testEqualityBytesWarning(self):
if sys.flags.bytes_warning:
def bytes_warning():
return self.assertWarnsRegex(BytesWarning,
'Comparison between bytes and string')
else:
def bytes_warning():
return contextlib.ExitStack()
with bytes_warning(), self.assertRaises(self.failureException):
self.assertEqual('a', b'a')
with bytes_warning():
self.assertNotEqual('a', b'a')
a = [0, 'a']
b = [0, b'a']
with bytes_warning(), self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with bytes_warning(), self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual('a', b'a')
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
with self.assertRaises(self.failureException):
self.assertListEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), b)
a = [0, b'a']
b = [0]
with self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
a = [0]
b = [0, b'a']
with self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertDictEqual({'a': 0}, {b'a': 0})
with self.assertRaises(self.failureException):
self.assertDictEqual({}, {b'a': 0})
with self.assertRaises(self.failureException):
self.assertDictEqual({b'a': 0}, {})
with self.assertRaises(self.failureException):
self.assertCountEqual([b'a', b'a'], [b'a', b'a', b'a'])
with bytes_warning():
self.assertCountEqual(['a', b'a'], ['a', b'a'])
with bytes_warning(), self.assertRaises(self.failureException):
self.assertCountEqual(['a', 'a'], [b'a', b'a'])
with bytes_warning(), self.assertRaises(self.failureException):
self.assertCountEqual(['a', 'a', []], [b'a', b'a', []])
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesCallable(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaises(ExceptionMock, Stub)
# A tuple of exception classes is accepted
self.assertRaises((ValueError, ExceptionMock), Stub)
# *args and **kwargs also work
self.assertRaises(ValueError, int, '19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
self.assertRaises(ExceptionMock, lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertRaises(ExceptionMock, None)
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesContext(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
with self.assertRaises(ExceptionMock):
Stub()
# A tuple of exception classes is accepted
with self.assertRaises((ValueError, ExceptionMock)) as cm:
Stub()
# The context manager exposes caught exception
self.assertIsInstance(cm.exception, ExceptionMock)
self.assertEqual(cm.exception.args[0], 'We expect')
# *args and **kwargs also work
with self.assertRaises(ValueError):
int('19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
with self.assertRaises(ExceptionMock):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaises(ExceptionMock, msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertRaises(ExceptionMock, foobar=42):
pass
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaises()
with self.assertRaises(TypeError):
self.assertRaises(1)
with self.assertRaises(TypeError):
self.assertRaises(object)
with self.assertRaises(TypeError):
self.assertRaises((ValueError, 1))
with self.assertRaises(TypeError):
self.assertRaises((ValueError, object))
def testAssertRaisesRefcount(self):
# bpo-23890: assertRaises() must not keep objects alive longer
# than expected
def func() :
try:
raise ValueError
except ValueError:
raise ValueError
refcount = sys.getrefcount(func)
self.assertRaises(ValueError, func)
self.assertEqual(refcount, sys.getrefcount(func))
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
with self.assertWarns(DeprecationWarning):
self.assertRaisesRegex(ExceptionMock, 'expect$', None)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaisesRegex(Exception, 'expect', msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertRaisesRegex(Exception, 'expect', foobar=42):
pass
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertWarnsRegexInvalidRegex(self):
# Issue 20145.
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertRaisesRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaisesRegex()
with self.assertRaises(TypeError):
self.assertRaisesRegex(ValueError)
with self.assertRaises(TypeError):
self.assertRaisesRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, 1), 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, object), 'expect')
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertWarns(RuntimeWarning, None)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarns(RuntimeWarning, msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertWarns(RuntimeWarning, foobar=42):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarns()
with self.assertRaises(TypeError):
self.assertWarns(1)
with self.assertRaises(TypeError):
self.assertWarns(object)
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, 1))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, object))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, Exception))
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertWarnsRegex(RuntimeWarning, "o+", None)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarnsRegex(RuntimeWarning, 'o+', msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertWarnsRegex(RuntimeWarning, 'o+', foobar=42):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testAssertWarnsRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarnsRegex()
with self.assertRaises(TypeError):
self.assertWarnsRegex(UserWarning)
with self.assertRaises(TypeError):
self.assertWarnsRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, 1), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, object), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, Exception), 'expect')
@contextlib.contextmanager
def assertNoStderr(self):
with captured_stderr() as buf:
yield
self.assertEqual(buf.getvalue(), "")
def assertLogRecords(self, records, matches):
self.assertEqual(len(records), len(matches))
for rec, match in zip(records, matches):
self.assertIsInstance(rec, logging.LogRecord)
for k, v in match.items():
self.assertEqual(getattr(rec, k), v)
def testAssertLogsDefaults(self):
# defaults: root logger, level INFO
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
self.assertEqual(cm.output, ["INFO:foo:1"])
self.assertLogRecords(cm.records, [{'name': 'foo'}])
def testAssertLogsTwoMatchingMessages(self):
# Same, but with two matching log messages
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "WARNING:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'quux'}])
def checkAssertLogsPerLevel(self, level):
# Check level filtering
with self.assertNoStderr():
with self.assertLogs(level=level) as cm:
log_foo.warning("1")
log_foobar.error("2")
log_quux.critical("3")
self.assertEqual(cm.output, ["ERROR:foo.bar:2", "CRITICAL:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo.bar'}, {'name': 'quux'}])
def testAssertLogsPerLevel(self):
self.checkAssertLogsPerLevel(logging.ERROR)
self.checkAssertLogsPerLevel('ERROR')
def checkAssertLogsPerLogger(self, logger):
# Check per-logger filtering
with self.assertNoStderr():
with self.assertLogs(level='DEBUG') as outer_cm:
with self.assertLogs(logger, level='DEBUG') as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "DEBUG:foo.bar:2"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'foo.bar'}])
# The outer catchall caught the quux log
self.assertEqual(outer_cm.output, ["WARNING:quux:3"])
def testAssertLogsPerLogger(self):
self.checkAssertLogsPerLogger(logging.getLogger('foo'))
self.checkAssertLogsPerLogger('foo')
def testAssertLogsFailureNoLogs(self):
# Failure due to no logs
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs():
pass
def testAssertLogsFailureLevelTooHigh(self):
# Failure due to level too high
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs(level='WARNING'):
log_foo.info("1")
def testAssertLogsFailureMismatchingLogger(self):
# Failure due to mismatching logger (and the logged message is
# passed through)
with self.assertLogs('quux', level='ERROR'):
with self.assertRaises(self.failureException):
with self.assertLogs('foo'):
log_quux.error("1")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
def test_no_exception_leak(self):
# Issue #19880: TestCase.run() should not keep a reference
# to the exception
class MyException(Exception):
ninstance = 0
def __init__(self):
MyException.ninstance += 1
Exception.__init__(self)
def __del__(self):
MyException.ninstance -= 1
class TestCase(unittest.TestCase):
def test1(self):
raise MyException()
@unittest.expectedFailure
def test2(self):
raise MyException()
for method_name in ('test1', 'test2'):
testcase = TestCase(method_name)
testcase.run()
self.assertEqual(MyException.ninstance, 0)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.