text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
from kolibri.core.tasks.job import Job
from kolibri.core.tasks.job import State
from kolibri.core.tasks.storage import Storage
DEFAULT_QUEUE = "ICEQUBE_DEFAULT_QUEUE"
class Queue(object):
def __init__(self, queue=DEFAULT_QUEUE, connection=None):
if connection is None:
raise ValueError("Connection must be defined")
self.name = queue
self.storage = Storage(connection)
def __len__(self):
return self.storage.count_all_jobs(self.name)
@property
def job_ids(self):
return [job.job_id for job in self.storage.get_all_jobs(self.name)]
@property
def jobs(self):
"""
Return all the jobs scheduled, queued, running, failed or completed.
Returns: A list of all jobs.
"""
return self.storage.get_all_jobs(self.name)
def enqueue(self, func, *args, **kwargs):
"""
Enqueues a function func for execution.
One special parameter is track_progress. If passed in and not None, the func will be passed in a
keyword parameter called update_progress:
def update_progress(progress, total_progress, stage=""):
The running function can call the update_progress function to notify interested parties of the function's
current progress.
Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special
"check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job
to be cancelled.
The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored
within the job and can be retrieved when the job status is queried.
All other parameters are directly passed to the function when it starts running.
:type func: callable or str
:param func: A callable object that will be scheduled for running.
:return: a string representing the job_id.
"""
# if the func is already a job object, just schedule that directly.
if isinstance(func, Job):
job = func
# else, turn it into a job first.
else:
job = Job(func, *args, **kwargs)
job.state = State.QUEUED
job_id = self.storage.enqueue_job(job, self.name)
return job_id
def cancel(self, job_id):
"""
Mark a job as canceling, and let the worker pick this up to initiate
the cancel of the job.
:param job_id: the job_id of the Job to cancel.
"""
self.storage.mark_job_as_canceling(job_id)
def fetch_job(self, job_id):
"""
Returns a Job object corresponding to the job_id. From there, you can query for the following attributes:
- function string to run
- its current state (see Job.State for the list of states)
- progress (returning an int), total_progress (returning an int), and percentage_progress
(derived from running job.progress/total_progress)
- the job.exception and job.traceback, if the job's function returned an error
:param job_id: the job_id to get the Job object for
:return: the Job object corresponding to the job_id
"""
return self.storage.get_job(job_id)
def empty(self):
"""
Clear all jobs.
"""
self.storage.clear(force=True, queue=self.name)
def clear(self):
"""
Clear all succeeded, failed, or cancelled jobs.
"""
self.storage.clear(force=False, queue=self.name)
def clear_job(self, job_id):
"""
Clear a job if it has succeeded, failed, or been cancelled.
:type job_id: str
:param job_id: id of job to clear.
"""
self.storage.clear(job_id=job_id, force=False)
|
mrpau/kolibri
|
kolibri/core/tasks/queue.py
|
Python
|
mit
| 3,851
| 0.002337
|
__author__ = 'PaleNeutron'
import os
from urllib.parse import urlparse, unquote
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
class MyMainWindow(QtWidgets.QMainWindow):
file_loaded = QtCore.pyqtSignal(str)
image_loaded = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self):
super(MyMainWindow, self).__init__()
self.windowList = []
self.text_path = ''
self.epub_path = ''
self.win_file_mime = "application/x-qt-windows-mime;value=\"FileNameW\""
self.text_uri_mime = "text/uri-list"
self.create_content_browser()
def create_content_browser(self):
self.content_browser = QtWidgets.QTextBrowser()
self.content_browser.setFontPointSize(12)
self.content_browser.setGeometry(QtCore.QRect(300, 150, 600, 400))
self.windowList.append(self.content_browser)
def dragEnterEvent(self, ev):
ev.accept()
def load_file(self, file_path):
self.file_loaded.emit(file_path)
# def image_loaded(self, file_path):
# with open(file_path, "b") as f:
# r = f.read()
# with open("images/cover.jpg", "wb") as f:
# f.write(r)
# def epub_loaded(self, file_path):
# self.epub_path = file_path
# self.file_loaded.emit(False, )
def uri_to_path(self, uri):
if sys.platform == "win32":
path = unquote(urlparse(uri).path)[1:]
elif sys.platform == "linux":
path = unquote(urlparse(uri).path)
else:
path = None
return path
def dropEvent(self, ev):
# formats = ev.mimeData().formats()
# for i in formats:
# print(i)
# if ev.mimeData().hasFormat(self.win_file_mime):
# ev.accept()
# file_path = bytes(ev.mimeData().data(self.win_file_mime).data())[:-2].decode('utf16')
# if file_path.endswith(".txt"):
# self.text_loaded(file_path)
# elif file_path.endswith(".jpg") or file_path.endswith(".jpeg") or file_path.endswith(".png"):
# self.image_loaded(file_path)
# elif file_path.endswith(".epub"):
# self.epub_loaded(file_path)
# print(file_path)
if ev.mimeData().hasImage():
self.image_loaded.emit(ev.mimeData().imageData())
if ev.mimeData().hasFormat(self.text_uri_mime):
uri = ev.mimeData().data(self.text_uri_mime).data().decode("utf8").strip()
file_path = self.uri_to_path(uri)
if uri.lower().endswith(".txt") or uri.lower().endswith(".epub"):
self.load_file(file_path)
elif uri.lower().endswith(".zip"):
#ๆๅผไธไธชzipๆๆกฃ๏ผ่ทๅๅ
ถไธญ็txt
import zipfile
zf = zipfile.ZipFile(file_path)
for filename in zf.namelist():
#ๅฆๆๆๆกฃไธญtxtๆไปถๅคงไบ10kbๅ่งฃๅๅฐๅฝๅๆไปถๅคน
if filename.lower().endswith(".txt") and zf.getinfo(filename).file_size > 10 * 1024:
zf.extract(filename)
# ๅ้ๆไปถไฝ็ฝฎไฟกๅท
self.load_file(os.curdir + os.sep + filename)
break
elif uri.lower().endswith(".rar"):
import rarfile
rf = rarfile.RarFile(file_path)
for filename in rf.namelist():
# ๅฆๆๆๆกฃไธญtxtๆไปถๅคงไบ10kbๅ่งฃๅๅฐๅฝๅๆไปถๅคน
if filename.lower().endswith(".txt") and rf.getinfo(filename).file_size > 10 * 1024:
rf.extract(filename)
#ๅ้ๆไปถไฝ็ฝฎไฟกๅท
self.load_file(os.curdir + os.sep + filename)
break
else:
ev.ignore()
|
PaleNeutron/EpubBuilder
|
my_mainwindow.py
|
Python
|
apache-2.0
| 3,933
| 0.002628
|
#!/usr/bin/python
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans.
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure
required: true
default: null
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot
required: false
default: no
choices: [ "yes", "no" ]
state:
description:
- Desired boolean value
required: true
default: null
choices: [ 'yes', 'no' ]
notes:
- Not tested on any debian based system
requirements: [ libselinux-python, libsemanage-python ]
author: "Stephen Fromm (@sfromm)"
'''
EXAMPLES = '''
# Set (httpd_can_network_connect) flag on and keep it persistent across reboots
- seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import semanage
HAVE_SEMANAGE=True
except ImportError:
HAVE_SEMANAGE=False
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
if to_bytes(name) in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
handle = semanage.semanage_handle_create()
if handle is None:
module.fail_json(msg="Failed to create semanage library handle")
try:
managed = semanage.semanage_is_managed(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
module.fail_json(msg="Failed to connect to semanage")
if semanage.semanage_begin_transaction(handle) < 0:
module.fail_json(msg="Failed to begin semanage transaction")
rc, sebool = semanage.semanage_bool_create(handle)
if rc < 0:
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, sebool, name) < 0:
module.fail_json(msg="Failed to set seboolean name with semanage")
semanage.semanage_bool_set_value(sebool, value)
rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool)
if rc < 0:
module.fail_json(msg="Failed to extract boolean key with semanage")
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(sebool)
semanage.semanage_set_reload(handle, 0)
if semanage.semanage_commit(handle) < 0:
module.fail_json(msg="Failed to commit changes to semanage")
semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e)))
return True
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True),
persistent=dict(default='no', type='bool'),
state=dict(required=True, type='bool')
),
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = {}
result['name'] = name
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
cur_value = get_boolean_value(module, name)
if cur_value == state:
result['state'] = cur_value
result['changed'] = False
module.exit_json(**result)
if module.check_mode:
module.exit_json(changed=True)
if persistent:
r = semanage_boolean_value(module, name, state)
else:
r = set_boolean_value(module, name, state)
result['changed'] = r
if not r:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
try:
selinux.security_commit_booleans()
except:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils._text import to_bytes
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/system/seboolean.py
|
Python
|
gpl-3.0
| 7,200
| 0.003194
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# ๅฝไปค่กๆฅๅฃ###################################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to main() is at the end of the file.
def _cli_parse(args):
from optparse import OptionParser
parser = OptionParser(
usage="usage: %prog [options] package.module:app")
opt = parser.add_option
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opts, args = parser.parse_args(args[1:])
return opts, args, parser
def _cli_patch(args):
opts, _, _ = _cli_parse(args)
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ###########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
from inspect import getargspec
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser, Error as ConfigParserError
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser, \
Error as ConfigParserError
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it):
return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self):
pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" %
(rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
def _inner_handle():
# Maybe pass variables as locals for better performance?
try:
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return _inner_handle()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
try:
out = None
environ['bottle.app'] = self
request.bind(environ)
response.bind()
self.trigger_hook('before_request')
out = _inner_handle()
return out;
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
self.trigger_hook('after_request')
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" :attr:`query_string`่งฃๆๆ:class:`FormsDict`.
่ฟไบๅผๆๆถ็งฐไธบโURLๅๆฐโๆโGETๅๆฐโ๏ผ
ไฝไธ่ฝไธโURL้้
็ฌฆโๆททๆท๏ผๅ ไธบๅฎไปฌ็ฑ:class:`Router`ๆไพใ"""
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. Invalid JSON raises a 400 error response. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'],
encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name, value):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', )),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_module(self, path, squash):
""" Load values from a Python module.
:param squash: Squash nested dicts into namespaces by using
load_dict(), otherwise use update()
Example: load_config('my.app.settings', True)
Example: load_config('my.app.settings', False)
"""
config_obj = __import__(path)
obj = dict([(key, getattr(config_obj, key))
for key in dir(config_obj) if key.isupper()])
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype='auto',
download=False,
charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
if download and download != True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self, handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AiohttpServer(ServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO': GeventSocketIOServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.',
True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are not supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cli_parse(sys.argv)
def _cli_error(msg):
parser.print_help()
_stderr('\nError: %s\n' % msg)
sys.exit(1)
if opt.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in opt.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except ConfigParserError:
_cli_error(str(_e()))
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError):
_cli_error("Unable to parse config file %r: %s" % (cfile, _e()))
for cval in opt.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args[0],
host=host,
port=int(port),
server=opt.server,
reloader=opt.reload,
plugins=opt.plugin,
debug=opt.debug,
config=config)
# THE END
|
hackersql/sq1map
|
thirdparty/bottle/bottle.py
|
Python
|
gpl-3.0
| 152,507
| 0.001489
|
import numpy as np
def zero_mean_normalize_image_data(data, axis=(0, 1, 2)):
return np.divide(data - data.mean(axis=axis), data.std(axis=axis))
def foreground_zero_mean_normalize_image_data(data, channel_dim=4, background_value=0, tolerance=1e-5):
data = np.copy(data)
if data.ndim == channel_dim or data.shape[channel_dim] == 1:
# only 1 channel, so the std and mean calculations are straight forward
foreground_mask = np.abs(data) > (background_value + tolerance)
foreground = data[foreground_mask]
mean = foreground.mean()
std = foreground.std()
data[foreground_mask] = np.divide(foreground - mean, std)
return data
else:
# std and mean need to be calculated for each channel in the 4th dimension
for channel in range(data.shape[channel_dim]):
channel_data = data[..., channel]
channel_mask = np.abs(channel_data) > (background_value + tolerance)
channel_foreground = channel_data[channel_mask]
channel_mean = channel_foreground.mean()
channel_std = channel_foreground.std()
channel_data[channel_mask] = np.divide(channel_foreground - channel_mean, channel_std)
data[..., channel] = channel_data
return data
def zero_floor_normalize_image_data(data, axis=(0, 1, 2), floor_percentile=1, floor=0):
floor_threshold = np.percentile(data, floor_percentile, axis=axis)
if data.ndim != len(axis):
floor_threshold_shape = np.asarray(floor_threshold.shape * data.ndim)
floor_threshold_shape[np.asarray(axis)] = 1
floor_threshold = floor_threshold.reshape(floor_threshold_shape)
background = data <= floor_threshold
data = np.ma.masked_array(data - floor_threshold, mask=background)
std = data.std(axis=axis)
if data.ndim != len(axis):
std = std.reshape(floor_threshold_shape)
return np.divide(data, std).filled(floor)
def zero_one_window(data, axis=(0, 1, 2), ceiling_percentile=99, floor_percentile=1, floor=0, ceiling=1,
channels_axis=None):
"""
:param data: Numpy ndarray.
:param axis:
:param ceiling_percentile: Percentile value of the foreground to set to the ceiling.
:param floor_percentile: Percentile value of the image to set to the floor.
:param floor: New minimum value.
:param ceiling: New maximum value.
:param channels_axis:
:return:
"""
data = np.copy(data)
if len(axis) != data.ndim:
floor_threshold = np.percentile(data, floor_percentile, axis=axis)
if channels_axis is None:
channels_axis = find_channel_axis(data.ndim, axis=axis)
data = np.moveaxis(data, channels_axis, 0)
for channel in range(data.shape[0]):
channel_data = data[channel]
# find the background
bg_mask = channel_data <= floor_threshold[channel]
# use background to find foreground
fg = channel_data[bg_mask == False]
# find threshold based on foreground percentile
ceiling_threshold = np.percentile(fg, ceiling_percentile)
# normalize the data for this channel
data[channel] = window_data(channel_data, floor_threshold=floor_threshold[channel],
ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling)
data = np.moveaxis(data, 0, channels_axis)
else:
floor_threshold = np.percentile(data, floor_percentile)
fg_mask = data > floor_threshold
fg = data[fg_mask]
ceiling_threshold = np.percentile(fg, ceiling_percentile)
data = window_data(data, floor_threshold=floor_threshold, ceiling_threshold=ceiling_threshold, floor=floor,
ceiling=ceiling)
return data
def find_channel_axis(ndim, axis):
for i in range(ndim):
if i not in axis and (i - ndim) not in axis:
# I don't understand the second part of this if statement
# answer: it is checking ot make sure that the axis is not indexed in reverse (i.e. axis 3 might be
# indexed as -1)
channels_axis = i
return channels_axis
def static_windows(data, windows, floor=0, ceiling=1):
"""
Normalizes the data according to a set of predefined windows. This is helpful for CT normalization where the
units are static and radiologists often have a set of windowing parameters that the use that allow them to look at
different features in the image.
:param data: 3D numpy array.
:param windows:
:param floor: defaults to 0.
:param ceiling: defaults to 1.
:return: Array with data windows listed in the final dimension
"""
data = np.squeeze(data)
normalized_data = np.ones(data.shape + (len(windows),)) * floor
for i, (l, w) in enumerate(windows):
normalized_data[..., i] = radiology_style_windowing(data, l, w, floor=floor, ceiling=ceiling)
return normalized_data
def radiology_style_windowing(data, l, w, floor=0, ceiling=1):
upper = l + w/2
lower = l - w/2
return window_data(data, floor_threshold=lower, ceiling_threshold=upper, floor=floor, ceiling=ceiling)
def window_data(data, floor_threshold, ceiling_threshold, floor, ceiling):
data = (data - floor_threshold) / (ceiling_threshold - floor_threshold)
# set the data below the floor to equal the floor
data[data < floor] = floor
# set the data above the ceiling to equal the ceiling
data[data > ceiling] = ceiling
return data
def hist_match(source, template):
"""
Source: https://stackoverflow.com/a/33047048
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
|
ellisdg/3DUnetCNN
|
unet3d/utils/normalize.py
|
Python
|
mit
| 7,176
| 0.002508
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import os
import struct
import logging
import stat
from .exceptions import ParseError
from . import core
from six import byte2int, indexbytes
# get logging object
log = logging.getLogger(__name__)
# #------------------------------------------------------------------------
# # START_CODE
# #
# # Start Codes, with 'slice' occupying 0x01..0xAF
# #------------------------------------------------------------------------
START_CODE = {
0x00: 'picture_start_code',
0xB0: 'reserved',
0xB1: 'reserved',
0xB2: 'user_data_start_code',
0xB3: 'sequence_header_code',
0xB4: 'sequence_error_code',
0xB5: 'extension_start_code',
0xB6: 'reserved',
0xB7: 'sequence end',
0xB8: 'group of pictures',
}
for i in range(0x01, 0xAF):
START_CODE[i] = 'slice_start_code'
# #------------------------------------------------------------------------
# # START CODES
# #------------------------------------------------------------------------
PICTURE = 0x00
USERDATA = 0xB2
SEQ_HEAD = 0xB3
SEQ_ERR = 0xB4
EXT_START = 0xB5
SEQ_END = 0xB7
GOP = 0xB8
SEQ_START_CODE = 0xB3
PACK_PKT = 0xBA
SYS_PKT = 0xBB
PADDING_PKT = 0xBE
AUDIO_PKT = 0xC0
VIDEO_PKT = 0xE0
PRIVATE_STREAM1 = 0xBD
PRIVATE_STREAM2 = 0xBf
TS_PACKET_LENGTH = 188
TS_SYNC = 0x47
# #------------------------------------------------------------------------
# # FRAME_RATE
# #
# # A lookup table of all the standard frame rates. Some rates adhere to
# # a particular profile that ensures compatibility with VLSI capabilities
# # of the early to mid 1990s.
# #
# # CPB
# # Constrained Parameters Bitstreams, an MPEG-1 set of sampling and
# # bitstream parameters designed to normalize decoder computational
# # complexity, buffer size, and memory bandwidth while still addressing
# # the widest possible range of applications.
# #
# # Main Level
# # MPEG-2 Video Main Profile and Main Level is analogous to MPEG-1's
# # CPB, with sampling limits at CCIR 601 parameters (720x480x30 Hz or
# # 720x576x24 Hz).
# #
# #------------------------------------------------------------------------
FRAME_RATE = [
0,
24000.0 / 1001, # # 3-2 pulldown NTSC (CPB/Main Level)
24, # # Film (CPB/Main Level)
25, # # PAL/SECAM or 625/60 video
30000.0 / 1001, # # NTSC (CPB/Main Level)
30, # # drop-frame NTSC or component 525/60 (CPB/Main Level)
50, # # double-rate PAL
60000.0 / 1001, # # double-rate NTSC
60, # # double-rate, drop-frame NTSC/component 525/60 video
]
# #------------------------------------------------------------------------
# # ASPECT_RATIO -- INCOMPLETE?
# #
# # This lookup table maps the header aspect ratio index to a float value.
# # These are just the defined ratios for CPB I believe. As I understand
# # it, a stream that doesn't adhere to one of these aspect ratios is
# # technically considered non-compliant.
# #------------------------------------------------------------------------
ASPECT_RATIO = (None, # Forbidden
1.0, # 1/1 (VGA)
4.0 / 3, # 4/3 (TV)
16.0 / 9, # 16/9 (Widescreen)
2.21 # (Cinema)
)
class MPEG(core.AVContainer):
"""
Parser for various MPEG files. This includes MPEG-1 and MPEG-2
program streams, elementary streams and transport streams. The
reported length differs from the length reported by most video
players but the provides length here is correct. An MPEG file has
no additional metadata like title, etc; only codecs, length and
resolution is reported back.
"""
def __init__(self, file):
core.AVContainer.__init__(self)
self.sequence_header_offset = 0
self.mpeg_version = 2
self.get_time = None
self.audio = []
self.video = []
self.start = None
self.__seek_size__ = None
self.__sample_size__ = None
self.__search__ = None
self.filename = None
self.length = None
self.audio_ok = None
# detect TS (fast scan)
if not self.isTS(file):
# detect system mpeg (many infos)
if not self.isMPEG(file):
# detect PES
if not self.isPES(file):
# Maybe it's MPEG-ES
if self.isES(file):
# If isES() succeeds, we needn't do anything further.
return
if file.name.lower().endswith('mpeg') or \
file.name.lower().endswith('mpg'):
# This has to be an mpeg file. It could be a bad
# recording from an ivtv based hardware encoder with
# same bytes missing at the beginning.
# Do some more digging...
if not self.isMPEG(file, force=True) or \
not self.video or not self.audio:
# does not look like an mpeg at all
raise ParseError()
else:
# no mpeg at all
raise ParseError()
self.mime = 'video/mpeg'
if not self.video:
self.video.append(core.VideoStream())
if self.sequence_header_offset <= 0:
return
self.progressive(file)
for vi in self.video:
vi.width, vi.height = self.dxy(file)
vi.fps, vi.aspect = self.framerate_aspect(file)
vi.bitrate = self.bitrate(file)
if self.length:
vi.length = self.length
if not self.type:
self.type = 'MPEG Video'
# set fourcc codec for video and audio
vc, ac = 'MP2V', 'MP2A'
if self.mpeg_version == 1:
vc, ac = 'MPEG', 0x0050
for v in self.video:
v.codec = vc
for a in self.audio:
if not a.codec:
a.codec = ac
def dxy(self, file):
"""
get width and height of the video
"""
file.seek(self.sequence_header_offset + 4, 0)
v = file.read(4)
x = struct.unpack('>H', v[:2])[0] >> 4
y = struct.unpack('>H', v[1:3])[0] & 0x0FFF
return x, y
def framerate_aspect(self, file):
"""
read framerate and aspect ratio
"""
file.seek(self.sequence_header_offset + 7, 0)
v = struct.unpack('>B', file.read(1))[0]
try:
fps = FRAME_RATE[v & 0xf]
except IndexError:
fps = None
if v >> 4 < len(ASPECT_RATIO):
aspect = ASPECT_RATIO[v >> 4]
else:
aspect = None
return fps, aspect
def progressive(self, file):
"""
Try to find out with brute force if the mpeg is interlaced or not.
Search for the Sequence_Extension in the extension header (01B5)
"""
file.seek(0)
buffer = ''
count = 0
while 1:
if len(buffer) < 1000:
count += 1
if count > 1000:
break
buffer += file.read(1024)
if len(buffer) < 1000:
break
pos = buffer.find('\x00\x00\x01\xb5')
if pos == -1 or len(buffer) - pos < 5:
buffer = buffer[-10:]
continue
ext = (indexbytes(buffer, pos + 4) >> 4)
if ext == 8:
pass
elif ext == 1:
if (indexbytes(buffer, pos + 5) >> 3) & 1:
self._set('progressive', True)
else:
self._set('interlaced', True)
return True
else:
log.debug(u'ext: %r' % ext)
buffer = buffer[pos + 4:]
return False
# #------------------------------------------------------------------------
# # bitrate()
# #
# # From the MPEG-2.2 spec:
# #
# # bit_rate -- This is a 30-bit integer. The lower 18 bits of the
# # integer are in bit_rate_value and the upper 12 bits are in
# # bit_rate_extension. The 30-bit integer specifies the bitrate of the
# # bitstream measured in units of 400 bits/second, rounded upwards.
# # The value zero is forbidden.
# #
# # So ignoring all the variable bitrate stuff for now, this 30 bit integer
# # multiplied times 400 bits/sec should give the rate in bits/sec.
# #
# # TODO: Variable bitrates? I need one that implements this.
# #
# # Continued from the MPEG-2.2 spec:
# #
# # If the bitstream is a constant bitrate stream, the bitrate specified
# # is the actual rate of operation of the VBV specified in annex C. If
# # the bitstream is a variable bitrate stream, the STD specifications in
# # ISO/IEC 13818-1 supersede the VBV, and the bitrate specified here is
# # used to dimension the transport stream STD (2.4.2 in ITU-T Rec. xxx |
# # ISO/IEC 13818-1), or the program stream STD (2.4.5 in ITU-T Rec. xxx |
# # ISO/IEC 13818-1).
# #
# # If the bitstream is not a constant rate bitstream the vbv_delay
# # field shall have the value FFFF in hexadecimal.
# #
# # Given the value encoded in the bitrate field, the bitstream shall be
# # generated so that the video encoding and the worst case multiplex
# # jitter do not cause STD buffer overflow or underflow.
# #
# #
# # ------------------------------------------------------------------------
# # Some parts in the code are based on mpgtx (mpgtx.sf.net)
def bitrate(self, file):
"""
read the bitrate (most of the time broken)
"""
file.seek(self.sequence_header_offset + 8, 0)
t, b = struct.unpack('>HB', file.read(3))
vrate = t << 2 | b >> 6
return vrate * 400
@staticmethod
def ReadSCRMpeg2(buffer):
"""
read SCR (timestamp) for MPEG2 at the buffer beginning (6 Bytes)
"""
if len(buffer) < 6:
return None
highbit = (byte2int(buffer) & 0x20) >> 5
low4Bytes = ((int(byte2int(buffer)) & 0x18) >> 3) << 30
low4Bytes |= (byte2int(buffer) & 0x03) << 28
low4Bytes |= indexbytes(buffer, 1) << 20
low4Bytes |= (indexbytes(buffer, 2) & 0xF8) << 12
low4Bytes |= (indexbytes(buffer, 2) & 0x03) << 13
low4Bytes |= indexbytes(buffer, 3) << 5
low4Bytes |= (indexbytes(buffer, 4)) >> 3
sys_clock_ref = (indexbytes(buffer, 4) & 0x3) << 7
sys_clock_ref |= (indexbytes(buffer, 5) >> 1)
return (int(highbit * (1 << 16) * (1 << 16)) + low4Bytes) / 90000
@staticmethod
def ReadSCRMpeg1(buffer):
"""
read SCR (timestamp) for MPEG1 at the buffer beginning (5 Bytes)
"""
if len(buffer) < 5:
return None
highbit = (byte2int(buffer) >> 3) & 0x01
low4Bytes = ((int(byte2int(buffer)) >> 1) & 0x03) << 30
low4Bytes |= indexbytes(buffer, 1) << 22
low4Bytes |= (indexbytes(buffer, 2) >> 1) << 15
low4Bytes |= indexbytes(buffer, 3) << 7
low4Bytes |= indexbytes(buffer, 4) >> 1
return (int(highbit) * (1 << 16) * (1 << 16) + low4Bytes) / 90000
@staticmethod
def ReadPTS(buffer):
"""
read PTS (PES timestamp) at the buffer beginning (5 Bytes)
"""
high = ((byte2int(buffer) & 0xF) >> 1)
med = (indexbytes(buffer, 1) << 7) + (indexbytes(buffer, 2) >> 1)
low = (indexbytes(buffer, 3) << 7) + (indexbytes(buffer, 4) >> 1)
return ((int(high) << 30) + (med << 15) + low) / 90000
def ReadHeader(self, buffer, offset):
"""
Handle MPEG header in buffer on position offset
Return None on error, new offset or 0 if the new offset can't be scanned
"""
if buffer[offset:offset + 3] != '\x00\x00\x01':
return None
_id = indexbytes(buffer, offset + 3)
if _id == PADDING_PKT:
return offset + (indexbytes(buffer, offset + 4) << 8) + \
indexbytes(buffer, offset + 5) + 6
if _id == PACK_PKT:
if indexbytes(buffer, offset + 4) & 0xF0 == 0x20:
self.type = 'MPEG-1 Video'
self.get_time = self.ReadSCRMpeg1
self.mpeg_version = 1
return offset + 12
elif (indexbytes(buffer, offset + 4) & 0xC0) == 0x40:
self.type = 'MPEG-2 Video'
self.get_time = self.ReadSCRMpeg2
return offset + (indexbytes(buffer, offset + 13) & 0x07) + 14
else:
# I have no idea what just happened, but for some DVB
# recordings done with mencoder this points to a
# PACK_PKT describing something odd. Returning 0 here
# (let's hope there are no extensions in the header)
# fixes it.
return 0
if 0xC0 <= _id <= 0xDF:
# code for audio stream
for a in self.audio:
if a.id == _id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', _id)
return 0
if 0xE0 <= _id <= 0xEF:
# code for video stream
for v in self.video:
if v.id == _id:
break
else:
self.video.append(core.VideoStream())
self.video[-1]._set('id', _id)
return 0
if _id == SEQ_HEAD:
# sequence header, remember that position for later use
self.sequence_header_offset = offset
return 0
if _id in [PRIVATE_STREAM1, PRIVATE_STREAM2]:
# private stream. we don't know, but maybe we can guess later
add = indexbytes(buffer, offset + 8)
# if (indexbytes(buffer, offset+6) & 4) or 1:
# id = indexbytes(buffer, offset+10+add)
if buffer[offset + 11 + add:offset + 15 + add].find('\x0b\x77') != -1:
# AC3 stream
for a in self.audio:
if a.id == _id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', _id)
self.audio[-1].codec = 0x2000 # AC3
return 0
if _id == SYS_PKT:
return 0
if _id == EXT_START:
return 0
return 0
# Normal MPEG (VCD, SVCD) ========================================
def isMPEG(self, file, force=False):
"""
This MPEG starts with a sequence of 0x00 followed by a PACK Header
http://dvd.sourceforge.net/dvdinfo/packhdr.html
"""
file.seek(0, 0)
buffer = file.read(10000)
offset = 0
# seek until the 0 byte stop
while offset < len(buffer) - 100 and buffer[offset] == '\0':
offset += 1
offset -= 2
# test for mpeg header 0x00 0x00 0x01
header = '\x00\x00\x01%s' % chr(PACK_PKT)
if offset < 0 or not buffer[offset:offset + 4] == header:
if not force:
return 0
# brute force and try to find the pack header in the first
# 10000 bytes somehow
offset = buffer.find(header)
if offset < 0:
return 0
# scan the 100000 bytes of data
buffer += file.read(100000)
# scan first header, to get basic info about
# how to read a timestamp
self.ReadHeader(buffer, offset)
# store first timestamp
self.start = self.get_time(buffer[offset + 4:])
while len(buffer) > offset + 1000 and \
buffer[offset:offset + 3] == '\x00\x00\x01':
# read the mpeg header
new_offset = self.ReadHeader(buffer, offset)
# header scanning detected error, this is no mpeg
if new_offset is None:
return 0
if new_offset:
# we have a new offset
offset = new_offset
# skip padding 0 before a new header
while len(buffer) > offset + 10 and \
not indexbytes(buffer, offset + 2):
offset += 1
else:
# seek to new header by brute force
offset += buffer[offset + 4:].find('\x00\x00\x01') + 4
# fill in values for support functions:
self.__seek_size__ = 1000000
self.__sample_size__ = 10000
self.__search__ = self._find_timer_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1
@staticmethod
def _find_timer_(buffer):
"""
Return position of timer in buffer or None if not found.
This function is valid for 'normal' mpeg files
"""
pos = buffer.find('\x00\x00\x01%s' % chr(PACK_PKT))
if pos == -1:
return None
return pos + 4
# PES ============================================================
def ReadPESHeader(self, offset, buffer, id=0):
"""
Parse a PES header.
Since it starts with 0x00 0x00 0x01 like 'normal' mpegs, this
function will return (0, None) when it is no PES header or
(packet length, timestamp position (maybe None))
http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
"""
if not buffer[0:3] == '\x00\x00\x01':
return 0, None
packet_length = (indexbytes(buffer, 4) << 8) + indexbytes(buffer, 5) + 6
align = indexbytes(buffer, 6) & 4
header_length = indexbytes(buffer, 8)
# PES ID (starting with 001)
if indexbytes(buffer, 3) & 0xE0 == 0xC0:
id = id or indexbytes(buffer, 3) & 0x1F
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
elif indexbytes(buffer, 3) & 0xF0 == 0xE0:
id = id or indexbytes(buffer, 3) & 0xF
for v in self.video:
if v.id == id:
break
else:
self.video.append(core.VideoStream())
self.video[-1]._set('id', id)
# new mpeg starting
if buffer[header_length + 9:header_length + 13] == \
'\x00\x00\x01\xB3' and not self.sequence_header_offset:
# yes, remember offset for later use
self.sequence_header_offset = offset + header_length + 9
elif indexbytes(buffer, 3) == 189 or indexbytes(buffer, 3) == 191:
# private stream. we don't know, but maybe we can guess later
id = id or indexbytes(buffer, 3) & 0xF
if align and \
buffer[header_length + 9:header_length + 11] == '\x0b\x77':
# AC3 stream
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
self.audio[-1].codec = 0x2000 # AC3
else:
# unknown content
pass
ptsdts = indexbytes(buffer, 7) >> 6
if ptsdts and ptsdts == indexbytes(buffer, 9) >> 4:
if indexbytes(buffer, 9) >> 4 != ptsdts:
log.warning(u'WARNING: bad PTS/DTS, please contact us')
return packet_length, None
# timestamp = self.ReadPTS(buffer[9:14])
high = ((indexbytes(buffer, 9) & 0xF) >> 1)
med = (indexbytes(buffer, 10) << 7) + (indexbytes(buffer, 11) >> 1)
low = (indexbytes(buffer, 12) << 7) + (indexbytes(buffer, 13) >> 1)
return packet_length, 9
return packet_length, None
def isPES(self, file):
log.info(u'trying mpeg-pes scan')
file.seek(0, 0)
buffer = file.read(3)
# header (also valid for all mpegs)
if not buffer == '\x00\x00\x01':
return 0
self.sequence_header_offset = 0
buffer += file.read(10000)
offset = 0
while offset + 1000 < len(buffer):
pos, timestamp = self.ReadPESHeader(offset, buffer[offset:])
if not pos:
return 0
if timestamp is not None and not hasattr(self, 'start'):
self.get_time = self.ReadPTS
bpos = buffer[offset + timestamp:offset + timestamp + 5]
self.start = self.get_time(bpos)
if self.sequence_header_offset and hasattr(self, 'start'):
# we have all informations we need
break
offset += pos
if offset + 1000 < len(buffer) < 1000000 or 1:
# looks like a pes, read more
buffer += file.read(10000)
if not self.video and not self.audio:
# no video and no audio?
return 0
self.type = 'MPEG-PES'
# fill in values for support functions:
self.__seek_size__ = 10000000 # 10 MB
self.__sample_size__ = 500000 # 500 k scanning
self.__search__ = self._find_timer_PES_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1
def _find_timer_PES_(self, buffer):
"""
Return position of timer in buffer or -1 if not found.
This function is valid for PES files
"""
pos = buffer.find('\x00\x00\x01')
offset = 0
if pos == -1 or offset + 1000 >= len(buffer):
return None
retpos = -1
ackcount = 0
while offset + 1000 < len(buffer):
pos, timestamp = self.ReadPESHeader(offset, buffer[offset:])
if timestamp is not None and retpos == -1:
retpos = offset + timestamp
if pos == 0:
# Oops, that was a mpeg header, no PES header
offset += buffer[offset:].find('\x00\x00\x01')
retpos = -1
ackcount = 0
else:
offset += pos
if retpos != -1:
ackcount += 1
if ackcount > 10:
# looks ok to me
return retpos
return None
# Elementary Stream ===============================================
def isES(self, file):
file.seek(0, 0)
try:
header = struct.unpack('>LL', file.read(8))
except (struct.error, IOError):
return False
if header[0] != 0x1B3:
return False
# Is an mpeg video elementary stream
self.mime = 'video/mpeg'
video = core.VideoStream()
video.width = header[1] >> 20
video.height = (header[1] >> 8) & 0xfff
if header[1] & 0xf < len(FRAME_RATE):
video.fps = FRAME_RATE[header[1] & 0xf]
if (header[1] >> 4) & 0xf < len(ASPECT_RATIO):
# FIXME: Empirically the aspect looks like PAR rather than DAR
video.aspect = ASPECT_RATIO[(header[1] >> 4) & 0xf]
self.video.append(video)
return True
# Transport Stream ===============================================
def isTS(self, file):
file.seek(0, 0)
buffer = file.read(TS_PACKET_LENGTH * 2)
c = 0
while c + TS_PACKET_LENGTH < len(buffer):
if indexbytes(buffer, c) == indexbytes(buffer, c + TS_PACKET_LENGTH) == TS_SYNC:
break
c += 1
else:
return 0
buffer += file.read(10000)
self.type = 'MPEG-TS'
while c + TS_PACKET_LENGTH < len(buffer):
start = indexbytes(buffer, c + 1) & 0x40
# maybe load more into the buffer
if c + 2 * TS_PACKET_LENGTH > len(buffer) and c < 500000:
buffer += file.read(10000)
# wait until the ts payload contains a payload header
if not start:
c += TS_PACKET_LENGTH
continue
tsid = ((indexbytes(buffer, c + 1) & 0x3F) << 8) + indexbytes(buffer, c + 2)
adapt = (indexbytes(buffer, c + 3) & 0x30) >> 4
offset = 4
if adapt & 0x02:
# meta info present, skip it for now
adapt_len = indexbytes(buffer, c + offset)
offset += adapt_len + 1
if not indexbytes(buffer, c + 1) & 0x40:
# no new pes or psi in stream payload starting
pass
elif adapt & 0x01:
# PES
timestamp = self.ReadPESHeader(c + offset, buffer[c + offset:],
tsid)[1]
if timestamp is not None:
if not hasattr(self, 'start'):
self.get_time = self.ReadPTS
timestamp = c + offset + timestamp
self.start = self.get_time(buffer[timestamp:timestamp + 5])
elif not hasattr(self, 'audio_ok'):
timestamp = c + offset + timestamp
start = self.get_time(buffer[timestamp:timestamp + 5])
if start is not None and self.start is not None and \
abs(start - self.start) < 10:
# looks ok
self.audio_ok = True
else:
# timestamp broken
del self.start
log.warning(u'Timestamp error, correcting')
if hasattr(self, 'start') and self.start and \
self.sequence_header_offset and self.video and self.audio:
break
c += TS_PACKET_LENGTH
if not self.sequence_header_offset:
return 0
# fill in values for support functions:
self.__seek_size__ = 10000000 # 10 MB
self.__sample_size__ = 100000 # 100 k scanning
self.__search__ = self._find_timer_TS_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1
def _find_timer_TS_(self, buffer):
c = 0
while c + TS_PACKET_LENGTH < len(buffer):
if indexbytes(buffer, c) == indexbytes(buffer, c + TS_PACKET_LENGTH) == TS_SYNC:
break
c += 1
else:
return None
while c + TS_PACKET_LENGTH < len(buffer):
start = indexbytes(buffer, c + 1) & 0x40
if not start:
c += TS_PACKET_LENGTH
continue
tsid = ((indexbytes(buffer, c + 1) & 0x3F) << 8) + indexbytes(buffer, c + 2)
adapt = (indexbytes(buffer, c + 3) & 0x30) >> 4
offset = 4
if adapt & 0x02:
# meta info present, skip it for now
offset += indexbytes(buffer, c + offset) + 1
if adapt & 0x01:
timestamp = self.ReadPESHeader(c + offset, buffer[c + offset:], tsid)[1]
if timestamp is None:
# this should not happen
log.error(u'bad TS')
return None
return c + offset + timestamp
c += TS_PACKET_LENGTH
return None
# Support functions ==============================================
def get_endpos(self):
"""
get the last timestamp of the mpeg, return -1 if this is not possible
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return None
length = os.stat(self.filename)[stat.ST_SIZE]
if length < self.__sample_size__:
return
file = open(self.filename)
file.seek(length - self.__sample_size__)
buffer = file.read(self.__sample_size__)
end = None
while 1:
pos = self.__search__(buffer)
if pos is None:
break
end = self.get_time(buffer[pos:]) or end
buffer = buffer[pos + 100:]
file.close()
return end
def get_length(self):
"""
get the length in seconds, return -1 if this is not possible
"""
end = self.get_endpos()
if end is None or self.start is None:
return None
if self.start > end:
return int(((int(1) << 33) - 1) / 90000) - self.start + end
return end - self.start
def seek(self, end_time):
"""
Return the byte position in the file where the time position
is 'pos' seconds. Return 0 if this is not possible
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return 0
file = open(self.filename)
seek_to = 0
while 1:
file.seek(self.__seek_size__, 1)
buffer = file.read(self.__sample_size__)
if len(buffer) < 10000:
break
pos = self.__search__(buffer)
if pos is not None:
# found something
nt = self.get_time(buffer[pos:])
if nt is not None and nt >= end_time:
# too much, break
break
# that wasn't enough
seek_to = file.tell()
file.close()
return seek_to
def __scan__(self):
"""
scan file for timestamps (may take a long time)
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return 0
file = open(self.filename)
log.debug(u'scanning file...')
while 1:
file.seek(self.__seek_size__ * 10, 1)
buffer = file.read(self.__sample_size__)
if len(buffer) < 10000:
break
pos = self.__search__(buffer)
if pos is None:
continue
log.debug(u'buffer position: %r' % self.get_time(buffer[pos:]))
file.close()
log.debug(u'done scanning file')
Parser = MPEG
|
SickGear/SickGear
|
lib/enzyme/mpeg.py
|
Python
|
gpl-3.0
| 31,404
| 0.00035
|
# Configuration settings for Enso. Eventually this will take
# localization into account too (or we can make a separate module for
# such strings).
# The keys to start, exit, and cancel the quasimode.
# Their values are strings referring to the names of constants defined
# in the os-specific input module in use.
QUASIMODE_START_KEY = "KEYCODE_RCONTROL"
QUASIMODE_END_KEY = "KEYCODE_RETURN"
QUASIMODE_CANCEL_KEY1 = "KEYCODE_ESCAPE"
QUASIMODE_CANCEL_KEY2 = "KEYCODE_RCONTROL"
# Whether the Quasimode is actually modal ("sticky").
IS_QUASIMODE_MODAL = True
# Amount of time, in seconds (float), to wait from the time
# that the quasimode begins drawing to the time that the
# suggestion list begins to be displayed. Setting this to a
# value greater than 0 will effectively create a
# "spring-loaded suggestion list" behavior.
QUASIMODE_SUGGESTION_DELAY = 0.2
# The maximum number of suggestions to display in the quasimode.
QUASIMODE_MAX_SUGGESTIONS = 6
# The minimum number of characters the user must type before the
# auto-completion mechanism engages.
QUASIMODE_MIN_AUTOCOMPLETE_CHARS = 2
# The message displayed when the user types some text that is not a command.
BAD_COMMAND_MSG = "<p><command>%s</command> is not a command.</p>"\
"%s"
# Minimum number of characters that should have been typed into the
# quasimode for a bad command message to be shown.
BAD_COMMAND_MSG_MIN_CHARS = 2
# The captions for the above message, indicating commands that are related
# to the command the user typed.
ONE_SUGG_CAPTION = "<caption>Did you mean <command>%s</command>?</caption>"
# The string that is displayed in the quasimode window when the user
# first enters the quasimode.
QUASIMODE_DEFAULT_HELP = u"Welcome to Enso! Enter a command, " \
u"or type \u201chelp\u201d for assistance."
# The string displayed when the user has typed some characters but there
# is no matching command.
QUASIMODE_NO_COMMAND_HELP = "There is no matching command. "\
"Use backspace to delete characters."
# Message XML for the Splash message shown when Enso first loads.
OPENING_MSG_XML = "<p>Welcome to <command>Enso</command>!</p>" + \
"<caption>Copyright © 2008 Humanized, Inc.</caption>"
# Message XML displayed when the mouse hovers over a mini message.
MINI_MSG_HELP_XML = "<p>The <command>hide mini messages</command>" \
" and <command>put</command> commands control" \
" these mini-messages.</p>"
ABOUT_BOX_XML = u"<p><command>Enso</command> Community Edition</p>" \
"<caption> </caption>" \
"<p>Copyright © 2008 <command>Humanized, Inc.</command></p>" \
"<p>Copyright © 2008-2009 <command>Enso Community</command></p>" \
"<p>Version 1.0</p>"
# List of default platforms supported by Enso; platforms are specific
# types of providers that provide a suite of platform-specific
# functionality.
DEFAULT_PLATFORMS = ["enso.platform.win32"]
# List of modules/packages that support the provider interface to
# provide required platform-specific functionality to Enso.
PROVIDERS = []
PROVIDERS.extend(DEFAULT_PLATFORMS)
# List of modules/packages that support the plugin interface to
# extend Enso. The plugins are loaded in the order that they
# are specified in this list.
PLUGINS = ["enso.contrib.scriptotron",
"enso.contrib.help",
"enso.contrib.google",
"enso.contrib.evaluate"]
FONT_NAME = {"normal" : "Gentium (Humanized)", "italic" : "Gentium Italic"}
|
tartakynov/enso
|
enso/config.py
|
Python
|
bsd-3-clause
| 3,462
| 0.000578
|
import math
import json
import os
import pytest
import rti_python.ADCP.AdcpCommands
def calculate_predicted_range(**kwargs):
"""
:param SystemFrequency=: System frequency for this configuration.
:param CWPON=: Flag if Water Profile is turned on.
:param CWPBL=: WP Blank in meters.
:param CWPBS=: WP bin size in meters.
:param CWPBN=: Number of bins.
:param CWPBB_LagLength=: WP lag length in meters.
:param CWPBB=: WP broadband or narrowband.
:param CWPP=: Number of pings to average.
:param CWPTBP=: Time between each ping in the average.
:param CBTON=: Is Bottom Track turned on.
:param CBTBB=: BT broadband or narrowband.
:param BeamAngle=: Beam angle in degrees. Default 20 degrees.
:param BeamDiameter=: The beam diameter in meters.
:param CyclesPerElement=: Cycles per element.
:param Salinity=: Salinity in ppt.
:param Temperature=: Temperature in C.
:param XdcrDepth=: Tranducer Depth in meter.
:return: BT Range, WP Range, Range First Bin, Configured Ranges
"""
# Get the configuration from the json file
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error opening JSON file Range", e)
return (0.0, 0.0, 0.0, 0.0)
return _calculate_predicted_range(kwargs.pop('CWPON', config['DEFAULT']['CWPON']),
kwargs.pop('CWPBB', config['DEFAULT']['CWPBB']),
kwargs.pop('CWPBS', config['DEFAULT']['CWPBS']),
kwargs.pop('CWPBN', config['DEFAULT']['CWPBN']),
kwargs.pop('CWPBL', config['DEFAULT']['CWPBL']),
kwargs.pop('CBTON', config['DEFAULT']['CBTON']),
kwargs.pop('CBTBB', config['DEFAULT']['CBTBB']),
kwargs.pop('SystemFrequency', config['DEFAULT']['SystemFrequency']),
kwargs.pop('BeamDiameter', config["BeamDiameter"]),
kwargs.pop('CyclesPerElement', config["CyclesPerElement"]),
kwargs.pop('BeamAngle', config["BeamAngle"]),
kwargs.pop('SpeedOfSound', config["SpeedOfSound"]),
kwargs.pop('CWPBB_LagLength', config["DEFAULT"]["CWPBB_LagLength"]),
kwargs.pop('BroadbandPower', config["BroadbandPower"]),
kwargs.pop('Salinity', config["Salinity"]),
kwargs.pop('Temperature', config["Temperature"]),
kwargs.pop('XdcrDepth', config["XdcrDepth"]))
def _calculate_predicted_range(_CWPON_, _CWPBB_TransmitPulseType_, _CWPBS_, _CWPBN_, _CWPBL_,
_CBTON_, _CBTBB_TransmitPulseType_,
_SystemFrequency_, _BeamDiameter_, _CyclesPerElement_,
_BeamAngle_, _SpeedOfSound_, _CWPBB_LagLength_, _BroadbandPower_,
_Salinity_, _Temperature_, _XdcrDepth_):
"""
Get the predicted ranges for the given setup. This will use the parameter given to calculate
the bottom track predicted range, the water profile predicted range, range to the first bin and
the configured range. All results are in meters.
All values with underscores before and after the name are given variables by the user. All caps
variables are given by the JSON configuration. All other variables are calculated.
:param _CWPON_: Flag if Water Profile is turned on.
:param _CWPBB_TransmitPulseType_: WP broadband or narrowband.
:param _CWPBB_LagLength_: WP lag length in meters.
:param _CWPBS_: Bin size in meters.
:param _CWPBN_: Number of bins.
:param _CWPBL_: Blank distance in meters.
:param _CBTON_: Flag if Bottom Track is turned on.
:param _CBTBB_TransmitPulseType_: BT broadband or narrowband.
:param _SystemFrequency_: System frequency in hz.
:param _BeamDiameter_: Beam diameter in meters.
:param _CyclesPerElement_: Cycles per element.
:param _BeamAngle_: Beam angle in degrees.
:param _SpeedOfSound_: Speed of sound in m/s.
:param _BroadbandPower_: Broadband power.
:param _Salinity_: Salinity in ppt.
:param _Temperature_: Temperature in C.
:param _XdcrDepth_: Transducer Depth in meter.
:return: BT Range, WP Range, Range First Bin, Configured Range
"""
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
# Get the configuration from the json file
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error getting the configuration file. Range", e)
return (0.0, 0.0, 0.0, 0.0)
# Speed of sound must be a value
if _SpeedOfSound_ == 0:
_SpeedOfSound_ = 1490
# Wave length
waveLength = _SpeedOfSound_ / _SystemFrequency_
# DI
dI = 0.0
if waveLength == 0:
dI = 0.0
else:
dI = 20.0 * math.log10(math.pi * _BeamDiameter_ / waveLength)
# Absorption
absorption = calc_absorption(_SystemFrequency_, _SpeedOfSound_, _Salinity_, _Temperature_, _XdcrDepth_)
# 1200khz
btRange_1200000 = 0.0
wpRange_1200000 = 0.0
refBin_1200000 = 0.0
xmtW_1200000 = 0.0
rScale_1200000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["1200000"]["BEAM_ANGLE"] / 180.0 * math.pi);
dI_1200000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["1200000"]["DIAM"] / waveLength);
dB_1200000 = 0.0;
if (config["DEFAULT"]["1200000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_1200000 = 0.0
else:
dB_1200000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["1200000"]["BIN"]) + dI - dI_1200000 - 10.0 * math.log10(config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_)
absorption_range_1200000 = config["DEFAULT"]["1200000"]["RANGE"] + ((config["DEFAULT"]["1200000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["1200000"]["RANGE"])
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]:
# Ref in and xmt watt
refBin_1200000 = config["DEFAULT"]["1200000"]["BIN"]
xmtW_1200000 = config["DEFAULT"]["1200000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + 15.0 * config["DEFAULT"]["1200000"]["BIN"])
else:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
btRange_1200000 = 0.0
if _CWPON_:
# Check if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_1200000 = rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["1200000"]["BIN"])
else:
wpRange_1200000 = rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
wpRange_1200000 = 0.0
else:
btRange_1200000 = 0.0
wpRange_1200000 = 0.0
# 600khz
btRange_600000 = 0.0
wpRange_600000 = 0.0
refBin_600000 = 0.0
xmtW_600000 = 0.0
rScale_600000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["600000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_600000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["600000"]["DIAM"] / waveLength)
dB_600000 = 0.0;
if config["DEFAULT"]["600000"]["BIN"] == 0 or _CyclesPerElement_ == 0:
dB_600000 = 0.0;
else:
dB_600000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["600000"]["BIN"]) + dI - dI_600000 - 10.0 * math.log10(config["DEFAULT"]["600000"]["CPE"] / _CyclesPerElement_)
absorption_range_600000 = config["DEFAULT"]["600000"]["RANGE"] + ((config["DEFAULT"]["600000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["600000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_600000 = config["DEFAULT"]["600000"]["BIN"];
xmtW_600000 = config["DEFAULT"]["600000"]["XMIT_W"];
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_600000 = 2.0 * rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000 + 15.0 * config["DEFAULT"]["600000"]["BIN"] )
else:
btRange_600000 = 2.0 * rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000)
else:
btRange_600000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_600000 = rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["600000"]["BIN"] )
else:
wpRange_600000 = rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000)
else:
wpRange_600000 = 0.0
else:
btRange_600000 = 0.0
wpRange_600000 = 0.0
# 300khz
btRange_300000 = 0.0
wpRange_300000 = 0.0
refBin_300000 = 0.0
xmtW_300000 = 0.0
rScale_300000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["300000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_300000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["300000"]["DIAM"] / waveLength)
dB_300000 = 0.0
if (config["DEFAULT"]["300000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_300000 = 0.0
else:
dB_300000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["300000"]["BIN"]) + dI - dI_300000 - 10.0 * math.log10(config["DEFAULT"]["300000"]["CPE"] / _CyclesPerElement_)
absorption_range_300000 = config["DEFAULT"]["300000"]["RANGE"] + ((config["DEFAULT"]["300000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["300000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_300000 = config["DEFAULT"]["300000"]["BIN"]
xmtW_300000 = config["DEFAULT"]["300000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_300000 = 2.0 * rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000 + 15.0 * config["DEFAULT"]["300000"]["BIN"])
else:
btRange_300000 = 2.0 * rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000)
else:
btRange_300000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_300000 = rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["300000"]["BIN"])
else:
wpRange_300000 = rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000)
else:
wpRange_300000 = 0.0
else:
# Return 0 if not selected
btRange_300000 = 0.0
wpRange_300000 = 0.0
# 150khz
btRange_150000 = 0.0
wpRange_150000 = 0.0
refBin_150000 = 0.0
xmtW_150000 = 0.0
rScale_150000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["150000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_150000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["150000"]["DIAM"] / waveLength)
dB_150000 = 0.0;
if (config["DEFAULT"]["150000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_150000 = 0.0
else:
dB_150000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["150000"]["BIN"]) + dI - dI_150000 - 10.0 * math.log10(config["DEFAULT"]["150000"]["CPE"] / _CyclesPerElement_)
absorption_range_150000 = config["DEFAULT"]["150000"]["RANGE"] + ((config["DEFAULT"]["150000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["150000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_150000 = config["DEFAULT"]["150000"]["BIN"]
xmtW_150000 = config["DEFAULT"]["150000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_150000 = 2.0 * rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000 + 15.0 * config["DEFAULT"]["150000"]["BIN"])
else:
btRange_150000 = 2.0 * rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000)
else:
btRange_150000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_150000 = rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["150000"]["BIN"])
else:
wpRange_150000 = rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000)
else:
wpRange_150000 = 0.0
else:
# Return 0 if not selected
btRange_150000 = 0.0
wpRange_150000 = 0.0
# 75khz
btRange_75000 = 0.0
wpRange_75000 = 0.0
refBin_75000 = 0.0
xmtW_75000 = 0.0
rScale_75000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["75000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_75000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["75000"]["DIAM"] / waveLength)
dB_75000 = 0.0;
if (config["DEFAULT"]["75000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_75000 = 0.0
else:
dB_75000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["75000"]["BIN"]) + dI - dI_75000 - 10.0 * math.log10(config["DEFAULT"]["75000"]["CPE"] / _CyclesPerElement_)
absorption_range_75000 = config["DEFAULT"]["75000"]["RANGE"] + ((config["DEFAULT"]["75000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["75000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_75000 = config["DEFAULT"]["75000"]["BIN"]
xmtW_75000 = config["DEFAULT"]["75000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_75000 = 2.0 * rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000 + 15.0 * config["DEFAULT"]["75000"]["BIN"])
else:
btRange_75000 = 2.0 * rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000)
else:
btRange_75000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_75000 = rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["75000"]["BIN"])
else:
wpRange_75000 = rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000)
else:
wpRange_75000 = 0.0;
else:
# Return 0 if not selected
btRange_75000 = 0.0
wpRange_75000 = 0.0
# 38khz
btRange_38000 = 0.0
wpRange_38000 = 0.0
refBin_38000 = 0.0
xmtW_38000 = 0.0
rScale_38000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["38000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_38000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["38000"]["DIAM"] / waveLength)
dB_38000 = 0.0;
if (config["DEFAULT"]["38000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_38000 = 0.0
else:
dB_38000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["38000"]["BIN"]) + dI - dI_38000 - 10.0 * math.log10(config["DEFAULT"]["38000"]["CPE"] / _CyclesPerElement_)
absorption_range_38000 = config["DEFAULT"]["38000"]["RANGE"] + ((config["DEFAULT"]["38000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["38000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_38000 = config["DEFAULT"]["38000"]["BIN"]
xmtW_38000 = config["DEFAULT"]["38000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_38000 = 2.0 * rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000 + 15.0 * config["DEFAULT"]["38000"]["BIN"]);
else:
btRange_38000 = 2.0 * rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000)
else:
btRange_38000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_38000 = rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["38000"]["BIN"])
else:
wpRange_38000 = rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000)
else:
wpRange_38000 = 0.0
else:
# Return 0 if not selected
btRange_38000 = 0.0
wpRange_38000 = 0.0
# Sample Rate
sumSampling = 0.0;
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]: # 1200 khz
sumSampling += config["DEFAULT"]["1200000"]["SAMPLING"] * config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]): # 600 khz
sumSampling += config["DEFAULT"]["600000"]["SAMPLING"] * config["DEFAULT"]["600000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]): # 300 khz
sumSampling += config["DEFAULT"]["300000"]["SAMPLING"] * config["DEFAULT"]["300000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]): # 150 khz
sumSampling += config["DEFAULT"]["150000"]["SAMPLING"] * config["DEFAULT"]["150000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]): # 75 khz
sumSampling += config["DEFAULT"]["75000"]["SAMPLING"] * config["DEFAULT"]["75000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]): #38 khz
sumSampling += config["DEFAULT"]["38000"]["SAMPLING"] * config["DEFAULT"]["38000"]["CPE"] / _CyclesPerElement_
sampleRate = _SystemFrequency_ * (sumSampling)
# Meters Per Sample
metersPerSample = 0
if sampleRate == 0:
metersPerSample = 0.0
else:
metersPerSample = math.cos(_BeamAngle_ / 180.0 * math.pi) * _SpeedOfSound_ / 2.0 / sampleRate
# Lag Samples
lagSamples = 0
if metersPerSample == 0:
lagSamples = 0
else:
lagSamples = 2 * math.trunc((math.trunc(_CWPBB_LagLength_ / metersPerSample) + 1.0) / 2.0)
# Xmt Scale
xmtScale = 1.0;
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value: # Check if NB
xmtScale = 1.0
else:
# Check for bad value
if lagSamples == 0:
xmtScale = 1.0
# Check which Broadband power is used
elif _BroadbandPower_:
xmtScale = (lagSamples - 1.0) / lagSamples
else:
xmtScale = 1.0 / lagSamples
# Range Reduction
rangeReduction = 0.0;
# Get the sum of all the selected WP XmtW and RefBin
sumXmtW = xmtW_1200000 + xmtW_600000 + xmtW_300000 + xmtW_150000 + xmtW_75000 + xmtW_38000
sumRefBin = refBin_1200000 + refBin_600000 + refBin_300000 + refBin_150000 + refBin_75000 + refBin_38000
beamXmtPowerProfile = xmtScale * sumXmtW
# Check for bad values
if sumXmtW == 0:
rangeReduction = 0.0
else:
rangeReduction = 10.0 * math.log10(beamXmtPowerProfile / sumXmtW) * sumRefBin + 1.0
# Bin Samples
binSamples = 0;
if metersPerSample == 0:
binSamples = 0
else:
binSamples = math.trunc(_CWPBS_ / metersPerSample)
# Code Repeats
codeRepeats = 0;
if lagSamples == 0:
codeRepeats = 0
else:
# Cast BinSamples and LagSamples to double because Truncate only takes doubles
# Make the result of Truncate an int
if (math.trunc(binSamples / lagSamples)) + 1.0 < 2.0:
codeRepeats = 2
else:
codeRepeats = (math.trunc(binSamples / lagSamples)) + 1
# First Bin Position
pos = 0.0;
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
pos = (2.0 * _CWPBS_ + 0.05) / 2.0
else:
if _CWPBB_TransmitPulseType_ > 1:
pos = _CWPBS_
else:
pos = (lagSamples * (codeRepeats - 1.0) * metersPerSample + _CWPBS_ + _CWPBB_LagLength_) / 2.0
firstBinPosition = _CWPBL_ + pos;
# Profile Range based off Settings
profileRangeSettings = _CWPBL_ + (_CWPBS_ * _CWPBN_);
# Set the predicted ranges PredictedRanges
wp = 0.0;
bt = 0.0;
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]: # 1200 khz
bt = btRange_1200000
wp = wpRange_1200000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]): # 600 khz
bt = btRange_600000
wp = wpRange_600000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]): # 300 khz
bt = btRange_300000
wp = wpRange_300000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]): # 150 khz
bt = btRange_150000
wp = wpRange_150000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]): # 75 khz
bt = btRange_75000
wp = wpRange_75000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]): #38 khz
bt = btRange_38000;
wp = wpRange_38000 + rangeReduction;
return (bt, wp, firstBinPosition, profileRangeSettings)
def calc_absorption(_SystemFrequency_, _SpeedOfSound_, _Salinity_, _Temperature_, _XdcrDepth_):
"""
Calculate the water absorption.
:param _SystemFrequency_: System frequency
:param _SpeedOfSound_: Speed of Sound m/s
:param _Salinity_: Salinity in ppt.
:param _Temperature_: Water Temperature in C
:param _XdcrDepth_: Transducer Depth in m.
:return: Water Absorption.
"""
if _SpeedOfSound_ == 0 or _Salinity_ == 0 or _SystemFrequency_ == 0:
return 0
pH = 8.0
P1 = 1.0
# Frequency
freq = _SystemFrequency_ / 1000.0
# A1
# dB Km^-1 KHz^-1
A1 = 8.68 / _SpeedOfSound_ * 10.0 ** (0.78 * pH - 5.0)
# f1
f1 = 2.8 * ((_Salinity_ / 35.0) ** 0.5) * (10.0 ** (4.0 - 1245.0 / (273.0 + _Temperature_)))
# A2
# dB km^-1 kHz^-1
A2 = 21.44 * _Salinity_ / _SpeedOfSound_ * (1.0 + 0.025 * _Temperature_)
# P2
P2 = 1.0 - 1.37 * (10.0 ** (-4.0)) * _XdcrDepth_ + 6.2 * (10.0 ** (-9.0)) * (_XdcrDepth_ ** 2)
# f2
# kHz
f2 = 8.17 * (10.0 ** (8.0 - 1990.0 / (273.0 + _Temperature_))) / (1.0 + 0.0018 * (_Salinity_ - 35.0))
# A3
A3 = 4.93 * (10.0 ** (-4.0)) - 2.59 * (10.0 ** (-5.0)) * _Temperature_ + 9.11 * (10.0 ** (-7.0)) * (_Temperature_ ** 2.0)
# P3
P3 = 1.0 - 3.83 * (10.0 ** (-5.0)) * _XdcrDepth_ + 4.9 * (10.0 ** (-10.0)) * (_XdcrDepth_ ** 2.0)
# Boric Acid Relaxation
bar = A1 * P1 * f1 * (freq ** 2.0) / ((freq ** 2.0) + (f1 ** 2.0)) / 1000.0
# MgSO3 Magnesium Sulphate Relaxation
msr = A2 * P2 * f2 * (freq ** 2.0) / ((freq ** 2.0) + (f2 ** 2.0)) / 1000.0
# Freshwater Attenuation
fa = A3 * P3 * (freq ** 2.0) / 1000.0
# Absorption
return bar + msr + fa
def test_calc_range():
(bt_range, wp_range, first_bin, cfg_range) = calculate_predicted_range(CWPON=True,
CWPBB=1,
CWPBS=4.0,
CWPBN=30,
CWPBL=1.0,
CBTON=True,
CBTBB=1,
SystemFrequency=288000.0,
BeamDiameter=0.075,
CyclesPerElement=12,
BeamAngle=20,
SpeedOfSound=1490,
CWPBB_LagLength=1.0,
BroadbandPower=True,
Temperature=10.0,
Salinity=35.0,
XdcrDepth=0.0)
user_cfg_range = 1.0 + (4.0 * 30)
assert pytest.approx(wp_range, 0.01) == 100.05
assert pytest.approx(bt_range, 0.01) == 199.14
assert pytest.approx(first_bin, 0.01) == 5.484
assert pytest.approx(cfg_range, 0.01) == user_cfg_range
def test_calc_range_nb():
(bt_range, wp_range, first_bin, cfg_range) = calculate_predicted_range(CWPON=True,
CWPBB=0,
CWPBS=4.0,
CWPBN=30,
CWPBL=1.0,
CBTON=True,
CBTBB=0,
SystemFrequency=288000.0,
BeamDiameter=0.075,
CyclesPerElement=12,
BeamAngle=20,
SpeedOfSound=1490,
CWPBB_LagLength=1.0,
BroadbandPower=True,
Temperature=10.0,
Salinity=35.0,
XdcrDepth=0.0)
user_cfg_range = 1.0 + (4.0 * 30)
assert pytest.approx(wp_range, 0.01) == 152.57
assert pytest.approx(bt_range, 0.01) == 319.14
assert pytest.approx(first_bin, 0.01) == 5.025
assert pytest.approx(cfg_range, 0.01) == user_cfg_range
|
ricorx7/rti_python
|
ADCP/Predictor/Range.py
|
Python
|
bsd-3-clause
| 30,092
| 0.007344
|
from toee import *
from utilities import *
from Co8 import *
from py00439script_daemon import npc_set, npc_get
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
if (npc_get(attachee, 1) == 0):
triggerer.begin_dialog( attachee, 1 )
elif (npc_get(attachee, 1) == 1):
triggerer.begin_dialog( attachee, 100 )
return SKIP_DEFAULT
def san_start_combat( attachee, triggerer ):
leader = game.party[0]
StopCombat(attachee, 0)
leader.begin_dialog( attachee, 4000 )
return RUN_DEFAULT
def give_default_starting_equipment(x = 0):
for pc in game.party:
if pc.stat_level_get(stat_level_barbarian) > 0:
for aaa in [4074, 6059, 6011, 6216, 8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_bard) > 0:
for aaa in [4009, 6147, 6011, 4096 ,5005 ,5005 ,6012 ,6238 ,12564 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_druid) > 0:
for aaa in [6216 ,6217 ,4116 ,4115 ,5007 ,5007 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_cleric) > 0 or pc.divine_spell_level_can_cast() > 0:
for aaa in [6013 ,6011 ,6012 ,6059 ,4071 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_fighter) > 0:
for aaa in [6013 ,6010 ,6011 ,6012 ,6059 ,4062 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_monk) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6205 ,6202 ,4060 ,8014]: # dagger (4060) instead of quarterstaff
create_item_in_inventory( aaa, pc )
else:
for aaa in [6205 ,6202 ,4110 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_paladin) > 0:
for aaa in [6013 ,6012 ,6011 ,6032 ,6059 ,4036 ,6124 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_ranger) > 0:
for aaa in [6013 ,6012 ,6011 ,6059 ,4049 ,4201 ,5004 ,5004 ,8014 ,6269]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_rogue) > 0:
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_swashbuckler) > 0:
for aaa in [6013 ,6045 ,6046 ,4009 ,4060 ,6238 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_sorcerer) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6211 ,6045 ,6046 ,6124 ,4060 ,4115 ,5007 ,5007 ,8014]: # dagger (4060) instead of spear
create_item_in_inventory( aaa, pc )
else:
for aaa in [6211 ,6045 ,6046 ,6124 ,4117 ,4115 ,5007 ,5007 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_warmage) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6013 ,6045 ,6046 ,6059, 4071 , 4115 ,5007 ,5007, 8014]: # mace (4071) instead of spear
create_item_in_inventory( aaa, pc )
else:
for aaa in [6013 ,6045 ,6046 ,6059, 4117 , 4115 ,5007 ,5007, 8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_beguiler) > 0:
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_wizard) > 0 or pc.arcane_spell_level_can_cast() > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [4060 ,4096 ,5005 ,5005 ,6081 ,6143 ,6038 ,6011 ,8014]:
create_item_in_inventory( aaa, pc )
else:
for aaa in [4110 ,4096 ,5005 ,5005 ,6081 ,6143 ,6038 ,6011 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_scout) > 0:
for aaa in [6013 ,6012 ,6011, 4049, 4201 ,5004 ,5004 ,8014, 6269, 12012]:
create_item_in_inventory( aaa, pc )
else: # default to rogue outfit
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
return
def defalt_equipment_autoequip():
for pc in game.party:
pc.item_wield_best_all()
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/co8infra/scr/py00416standard_equipment_chest.py
|
Python
|
mit
| 4,171
| 0.101415
|
from functions.science import rms, mae, average, nan, inf
from collections import OrderedDict
from rawdata.table import table
from numpy import array, log10
import cma
from time import time, strftime
__all__ = ['fmin', 'optimbox', 'box', 'array', 'log10', 'rms', 'mae', 'average', 'nan', 'inf']
def box(x, y, xmin=-inf, xmax=inf, ymin=-inf, ymax=inf):
xs, ys = [], []
for xi, yi in zip(x, y):
if xmin<=xi<=xmax and ymin<=yi<=ymax:
xs.append(xi)
ys.append(yi)
return array(xs), array(ys)
class optimbox(object):
"""optimbox is a class used for fitting curves and linked with the fmin decorator.
as input, it must contains a dictionary with the keys 'objective', 'goal'.
it can contain optionally the keys 'xlim', 'ylim', 'weight', 'yscale'.
if yscale is set to 'lin' (default), the error calculation is done by weight*(objective-goal)
if yscale is set to 'log', the fit is done by weight*(objective-goal)/goal.
if weight is not defined, weight is calculated when yscale='lin' as mae(goal)
if weight is not defined, weight is set when yscale='log' as 1.0.
the optimbox's error is returned using the class function self.error().
self.error() is used in fmin.
"""
def mean(self, x):
return mae(x)
def __init__(self, kwargs):
self._error = 0.0
if 'objective' in kwargs and 'goal' in kwargs:
x1, y1 = kwargs['objective']
x2, y2 = kwargs['goal']
else:
raise Exception('instances for the optimbox are not correct')
yscale = kwargs.get('yscale', 'lin')
xmin, xmax = kwargs.get('xlim', (-inf, inf))
ymin, ymax = kwargs.get('ylim', (-inf, inf))
x1, y1 = box(x1, y1, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
x2, y2 = box(x2, y2, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
if yscale == 'lin':
weight = kwargs.get('weight', self.mean(y2))
if hasattr(weight, '__iter__'):
raise Exception('weight cannot be a list of values')
error = weight*(y1-y2)
if hasattr(error, '__iter__'):
self._error = self.mean(error)
else:
self._error = abs(error)
elif yscale == 'log':
weight = kwargs.get('weight', 1.0)
if hasattr(weight, '__iter__'):
raise Exception('weight cannot be a list of values')
try:
error = weight*(y1-y2)/y2
except ZeroDivisionError:
ZeroDivisionError('at least one point of the scatter data is zero')
if hasattr(error, '__iter__'):
self._error = self.mean(error)
else:
self._error = abs(error)
def error(self):
return self._error
class fmin(object):
x = OrderedDict() # ordered dictionary
bounds = OrderedDict() # ordered dictionary
def __init__(self, method='cma-es', **options):
"""fmin is a function decorator used for minimization of function.
options:
for method = 'cma-es'
variables = 'all'
sigma0 = 0.1
tolx = 1e-3
tolfun = 1e-5
seed = 1234
maxiter = '100 + 50*(N+3)**2 // popsize**0.5'
maxfevals = inf
popsize = '4 + int(3*log(N))'
verbose = -1
fmin.x <- dict
fmin.bounds <- dict
"""
self.method = method
self.options = options
def __call__(self, func):
if self.method == 'cma-es':
results = self._fmin_cma_es(func=func, **dict(self.options))
return results
def _fmin_cma_es(self, func, variables='all', sigma0=0.1, tolx=1e-3, seed=1234,
maxiter='100+50*(N+3)**2//popsize**0.5', verbose=-1,
maxfevals=float('inf'), popsize='4+int(3*log(N))', tolfun=1e-5 ):
now = time()
def tf(X, bounds):
Y = []
for x, (xmin, xmax) in zip(X, bounds):
slope = 1./(xmax-xmin)
intercept = 1.0-slope*xmax
y = slope*x + intercept
Y.append(y)
return Y
def tfinv(Y, bounds):
X = []
for y, (xmin, xmax) in zip(Y, bounds):
slope = xmax-xmin
intercept = xmax-slope
x = slope*y + intercept
X.append(x)
return X
def eval_error(output):
if isinstance(output, dict):
return optimbox(output).error()
elif isinstance(output, (float, int)):
return float(abs(output))
elif isinstance(output, tuple):
return average([ eval_error(elt) for elt in output ])
elif hasattr(output, '__iter__'):
return mae(output)
else:
raise Exception('output must be based on optimbox, float, tuple or list/array')
# init
if variables == 'all':
variables = fmin.x.keys()
x0 = [fmin.x[key] for key in variables]
bounds = [fmin.bounds[key] for key in variables]
options = { 'boundary_handling' : 'BoundTransform ',
'bounds' : [[0]*len(x0), [1]*len(x0)],
'seed' : seed,
'verb_time' : False,
'scaling_of_variables' : None,
'verb_disp' : 1,
'maxiter' : maxiter,
'maxfevals' : maxfevals,
'signals_filename' : 'cmaes_signals.par',
'tolx' : tolx,
'popsize' : popsize,
'verbose' : verbose,
'ftarget': 1e-12,
'tolfun' : 1e-5,
}
es = cma.CMAEvolutionStrategy(tf(x0, bounds), sigma0, options)
# initial error with the original set of variables values
error = eval_error( func(**fmin.x) )
best_objective = error
print 'Start CMA-ES Optimizer...'
print
print '{step:>6}{residual:>11}{x}'.format(step='step', x='{:>11}'*len(variables), residual='residual').format(*variables)
print '-'*(6+11+11*len(variables))
print '{step:>6}{residual:>11.3e}{x}'.format(step=0, x='{:>11.3e}'*len(x0), residual=error).format(*x0)
while not es.stop():
solutions = es.ask() # provide a set of variables values
objectives = [] # init
for i, x in enumerate(solutions):
xt = { k:v for k, v in zip(variables, tfinv(x, bounds)) }
# add other keyword arguments
for key in fmin.x.keys():
if not(key in variables):
xt[key] = fmin.x[key]
error = eval_error( func(**xt) )
objectives.append( error )
# if error is better then update fmin.x
if error < best_objective:
fmin.x.update(xt)
best_objective = error
es.tell(solutions, objectives)
#es.disp(1)
if es.countiter%10==0:
print
print '{step:>6}{residual:>11}{x}'.format(step='step', x='{:>11}'*len(variables), residual='residual').format(*variables)
print '-'*(6+11+11*len(variables))
indx = objectives.index(min(objectives))
x = tfinv(solutions[indx], bounds)
isbest = ''
if objectives[indx] == best_objective:
isbest = '*'
print '{step:>6}{residual:>11.3e}{x} {isbest}'.format(step=es.countiter, x='{:>11.3e}'*len(x), residual=objectives[indx], isbest=isbest).format(*x)
#es.result_pretty()
xbest, f_xbest, evaluations_xbest, evaluations, iterations, pheno_xmean, effective_stds = es.result()
stop = es.stop()
print '-----------------'
print 'termination on %s=%.2e'%(stop.keys()[0], stop.values()[0])
print 'bestever f-value: %r'%(f_xbest)
print 'incumbent solution: %r'%(list(tfinv(xbest, bounds)))
print 'std deviation: %r'%(list(effective_stds))
print 'evaluation func: %r'%(evaluations)
print 'total time:',
minutes = int((time()-now)/60)
if minutes>1:
print "%d minutes"%(minutes),
elif minutes>0:
print "%d minute"%(minutes),
print "%d seconds"%((time()-now)-minutes*60)
|
raphaelvalentin/Utils
|
optimize/optlib2.py
|
Python
|
gpl-2.0
| 8,671
| 0.011302
|
from __future__ import print_function
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import sqlalchemy
import sys
# This value must be incremented after schema changes on replicated tables!
SCHEMA_VERSION = 1
engine = None
def init_db_engine(connect_str):
global engine
engine = create_engine(connect_str, poolclass=NullPool)
def run_sql_script(sql_file_path):
with open(sql_file_path) as sql:
connection = engine.connect()
connection.execute(sql.read())
connection.close()
def run_sql_script_without_transaction(sql_file_path):
with open(sql_file_path) as sql:
connection = engine.connect()
connection.connection.set_isolation_level(0)
lines = sql.read().splitlines()
try:
for line in lines:
# TODO: Not a great way of removing comments. The alternative is to catch
# the exception sqlalchemy.exc.ProgrammingError "can't execute an empty query"
if line and not line.startswith("--"):
connection.execute(line)
except sqlalchemy.exc.ProgrammingError as e:
print("Error: {}".format(e))
return False
finally:
connection.connection.set_isolation_level(1)
connection.close()
return True
|
Freso/listenbrainz-server
|
messybrainz/db/__init__.py
|
Python
|
gpl-2.0
| 1,338
| 0.003737
|
import random
import musictheory
import filezart
import math
from pydub import AudioSegment
from pydub.playback import play
class Part:
def __init__(self, typ=None, intensity=0, size=0, gen=0, cho=0):
self._type = typ #"n1", "n2", "bg", "ch", "ge"
if intensity<0 or gen<0 or cho<0 or size<0 or intensity>1 or size>1 or gen>1 or cho>1:
raise ValueError ("Invalid Values for Structure Part")
self._intensity = intensity # [0-1]
self._size = size # [0-1]
self._genover = gen # [0-1] overlay of general type lines
self._chover = cho # [0-1] overlay of chorus type lines
def __repr__(self):
return "[" + self._type + "-" + str(self._intensity) + "-" + str(self._size) + "-" + str(self._genover) + "-" + str(self._chover) + "]"
@classmethod
def fromString(cls, string): # [n1-0.123-1-0.321-0.2] type, intensity, size, genoverlay, chooverlay
while string[0] == " ":
string = string[1:]
while string[0] == "\n":
string = string[1:]
while string[-1] == " ":
string = string[:-1]
while string[-1] == "\0":
string = string[:-1]
while string[-1] == "\n":
string = string[:-1]
if len(string)<8:
raise ValueError("Invalid Part string: "+string)
if string[0] == "[" and string[-1] == "]":
string = string[1:-1]
else:
raise ValueError("Invalid Part string: "+string)
typ = string[:2]
string = string[3:]
if not typ in ("n1", "n2", "bg", "ch", "ge"):
raise ValueError("Invalid Part Type string: "+typ)
valstrings = str.split(string, "-")
inten = eval(valstrings[0])
size = eval(valstrings[1])
gen = eval(valstrings[2])
cho = eval(valstrings[3])
return cls(typ, inten, size, gen, cho)
def getTheme(self, pal):
if self._type == "n1":
return pal._n1
if self._type == "n2":
return pal._n2
if self._type == "bg":
return pal._bg
if self._type == "ch":
return pal._ch
if self._type == "ge":
return pal._ge
def getAudio(self, pal, bpm):
base = self.baseDur(pal, bpm)
total = base + 3000 #extra time for last note to play
nvoic = math.ceil(self._intensity * self.getTheme(pal).countVoices())
try:
ngeno = math.ceil(self._genover * pal._ge.countVoices())
except:
ngeno = 0
try:
nchoo = math.ceil(self._chover * pal._ch.countVoices())
except:
nchoo = 0
sound = AudioSegment.silent(total)
them = self.getTheme(pal)
for i in range(nvoic):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ge
for i in range(ngeno):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ch
for i in range(nchoo):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
return sound
def baseDur(self, pal, bpm): #get the base duration of this part of the song
return self.getTheme(pal).baseDurForStruct(self._size, bpm)
class Structure:
def __init__(self):
self._parts = ()
def add(self, part):
self._parts = self._parts+(part,)
def __repr__(self):
return "@STRUCTURE:" + str(self._parts)
def baseDur(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
curTime = 0
for p in self._parts:
curTime = curTime + p.baseDur(pal, bpm)
return curTime
def songAudio(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
total = self.baseDur(pal, bpm) + 3000 # 3 seconds for last note to play
sound = AudioSegment.silent(total)
curTime = 0
for p in self._parts:
paudio = p.getAudio(pal, bpm)
sound = sound.overlay(paudio, curTime)
curTime = curTime + p.baseDur(pal, bpm)
print("curTime:",curTime)
return sound
# wselect WeightedSelect returns element of dictionary based on dict weights {element:weight}
def wselect(dicti):
total=0
for i in list(dicti):
total = total + dicti[i]
indice = total*random.random()
for i in list(dicti):
if dicti[i]>=indice:
return i
indice = indice - dicti[i]
raise ValueError ("something went wrong")
# rselect RandomSelect returns random element of list
def rselect(lista):
return random.choice(lista)
def lenweights():
return {3:1, 4:1, 5:2, 6:3, 7:4, 8:3, 9:2, 10:1, 11:1}
def stweights():
return {"n1":5, "n2":4, "ch":2, "bg":1}
def n1weights():
return {"n1":4, "n2":2, "ch":3, "bg":1}
def n2weights():
return {"n1":2, "n2":3, "ch":4, "bg":2}
def chweights():
return {"n1":2, "n2":1, "ch":4, "bg":1}
def bgweights():
return {"n1":1, "n2":1, "ch":20, "bg":8}
def typeSequence(size):
last = wselect(stweights())
sequence=(last,)
while len(sequence)<size:
if last == "n1":
last = wselect(n1weights())
elif last == "n2":
last = wselect(n2weights())
elif last == "ch":
last = wselect(chweights())
elif last == "bg":
last = wselect(bgweights())
sequence = sequence + (last,)
return sequence
def siweights():
return {0.1:1, 0.2:2, 0.3:4, 0.4:5, 0.5:5, 0.6:4, 0.7:3, 0.8:2, 0.9:1}
def deltaweights():
return {-0.3:1, -0.2:1, -0.1:1, 0:5, 0.1:3, 0.2:2, 0.3:2}
def intensitySequence(size):
val = wselect(siweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltaweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def soweights():
return {0:6, 0.1:2, 0.2:1}
def deltoweights():
return {-0.2:1, -0.1:1, 0:8, 0.1:2, 0.2:2}
def overlaySequence(size):
val = wselect(soweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltoweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def ssweights():
return {0.2:1, 0.4:1, 0.6:1, 0.8:1, 1:16}
def sizeSequence(size):
sequence = ()
while len(sequence)<size:
sequence = sequence + (wselect(ssweights()),)
return sequence
def makeStruct(size = None):
if size == None:
size = wselect(lenweights())
types = typeSequence(size)
inten = intensitySequence(size)
sizes = sizeSequence(size)
overl = overlaySequence(size)
return joinSeqs(types, inten, sizes, overl)
def joinSeqs(types, inten, sizes, overl):
struct = Structure()
for i in range(len(types)):
if types[i]=="bg":
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+"0"+"-"+str(overl[i])+"]" # If its a bridge it has chord overlay
pt = Part.fromString(string)
struct.add(pt)
else:
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+str(overl[i])+"-"+"0"+"]" # Else it has gen overlay
pt = Part.fromString(string)
struct.add(pt)
return struct
def pooptest():
for i in range(30):
print(makeStruct())
|
joaoperfig/mikezart
|
source/markovzart2.py
|
Python
|
mit
| 8,058
| 0.018367
|
# -*- coding: utf-8 -*-
import sqlite3
from flask import g, current_app
def connect_db():
db = sqlite3.connect(current_app.config['DATABASE_URI'])
db.row_factory = sqlite3.Row
return db
# http://flask.pocoo.org/docs/0.10/appcontext/
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_db()
return db
|
gaowhen/summer
|
summer/db/connect.py
|
Python
|
mit
| 485
| 0
|
import threading
import time
class Status:
lock = None
statusno =0
def __init__(self):
self.lock = threading.Lock()
def update(self, add):
self.lock.acquire()
self.statusno = self.statusno + add
self.lock.release()
def get(self):
self.lock.acquire()
n = self.statusno
self.lock.release()
return n
def md5calc(status, args):
for i in args:
time.sleep (1)
#print i
status.update(1)
def show_status(status):
while threading.active_count() > 2:
time.sleep(1)
print status.get()
status = Status()
slaves = []
for i in range(5):
t = threading.Thread(target=md5calc, args=(status, [1,2,5]))
t.start()
slaves.append(t)
m = threading.Thread(target=show_status, args=(status,))
m.start()
m.join()
for t in slaves:
t.join()
|
RedFoxPi/Playground
|
threadtest.py
|
Python
|
gpl-2.0
| 929
| 0.01507
|
import os.path
import platform
from nose2.compat import unittest
from nose2.tests._common import FunctionalTestCase
class TestCoverage(FunctionalTestCase):
@unittest.skipIf(
platform.python_version_tuple()[:2] == ('3', '2'),
'coverage package does not support python 3.2')
def test_run(self):
proc = self.runIn(
'scenario/test_with_module',
'-v',
'--with-coverage',
'--coverage=lib/'
)
STATS = '\s+8\s+5\s+38%'
expected = os.path.join('lib', 'mod1(.py)?')
expected = expected.replace('\\', r'\\')
expected = expected + STATS
stdout, stderr = proc.communicate()
self.assertTestRunOutputMatches(
proc,
stderr=expected)
self.assertTestRunOutputMatches(
proc,
stderr='TOTAL\s+' + STATS)
|
usc-isi-i2/WEDC
|
spark_dependencies/python_lib/nose2/tests/functional/test_coverage.py
|
Python
|
apache-2.0
| 878
| 0.004556
|
__author__ = 'Alex Breshears'
__license__ = '''
Copyright (C) 2012 Alex Breshears
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.contrib.admin import site
from django.contrib import admin
from shorturls.models import *
class LinkClickInline(admin.TabularInline):
model = LinkClick
extras = 0
class LinkAdmin(admin.ModelAdmin):
inlines = [LinkClickInline]
def save_model(self, request, obj, form, change):
obj.save()
site.register(Link, LinkAdmin)
|
t3hi3x/p-k.co
|
shorturls/admin.py
|
Python
|
mit
| 1,450
| 0.008276
|
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from collections import defaultdict
from ..anagonda.context import guru
from commands.base import Command
class PackageSymbols(Command):
"""Run guru to get a detailed list of the package symbols
"""
def __init__(self, callback, uid, vid, scope, code, path, buf, go_env):
self.vid = vid
self.scope = scope
self.code = code
self.path = path
self.buf = buf
self.go_env = go_env
super(PackageSymbols, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
offset = getattr(self, 'offset', None)
if offset is None:
offset = self.code.find('package ') + len('package ') + 1
with guru.Guru(
self.scope, 'describe', self.path,
offset, self.buf, self.go_env) as desc:
symbols = []
for symbol in self._sort(desc):
path, line, col = symbol['pos'].split(':')
symbols.append({
'filename': path,
'line': int(line),
'col': int(col),
'ident': symbol['name'],
'full': symbol['type'],
'keyword': symbol['kind'],
'show_filename': True
})
self.callback({
'success': True,
'result': symbols,
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc())
self.callback({
'success': False,
'error': str(error),
'uid': self.uid,
'vid': self.vid
})
def _sort(self, desc):
"""Sort the output by File -> Vars -> Type -> Funcs
"""
symbols = []
aggregated_data = defaultdict(lambda: [])
for elem in desc.get('package', {}).get('members', []):
filename = elem['pos'].split(':')[0]
aggregated_data[filename].append(elem)
for filename, elems in aggregated_data.items():
symbols += sorted(
[e for e in elems if e['kind'] in ['var', 'const']],
key=lambda x: x['pos']
)
symbols += sorted(
[e for e in elems if e['kind'] == 'type'],
key=lambda x: x['pos']
)
symbols += sorted(
[e for e in elems if e['kind'] == 'func'],
key=lambda x: x['pos']
)
for e in elems:
if e['kind'] == 'type':
methods = []
for method in e.get('methods', []):
new_elem = method
new_elem['kind'] = 'func'
new_elem['type'] = method['name']
methods.append(new_elem)
symbols += sorted(methods, key=lambda x: x['pos'])
return symbols
class PackageSymbolsCursor(PackageSymbols):
"""Run guru to get detailed information about the symbol under cursor
"""
def __init__(self, cb, uid, vid, scope, code, path, buf, off, go_env):
self.offset = off
super(PackageSymbolsCursor, self).__init__(
cb, uid, vid, scope, code, path, buf, go_env
)
def _sort(self, desc):
"""Sort the output by File -> Vars -> Type -> Funcs
"""
if desc.get('package') is not None:
return super(PackageSymbolsCursor, self)._sort(desc)
symbols = []
aggregated_data = defaultdict(lambda: [])
detail_field = desc.get('detail')
if detail_field is None:
return symbols
details = desc.get(detail_field)
if details is None:
return symbols
if detail_field == 'type':
filename = details.get('namepos', desc['pos']).split(':')[0]
details['pos'] = details.get('namepos', desc['pos'])
details['name'] = desc['desc']
details['kind'] = details['type']
aggregated_data[filename].append(details)
for elem in details.get('methods', []):
filename = elem['pos'].split(':')[0]
elem['type'] = elem['name']
elem['kind'] = elem['type']
aggregated_data[filename].append(elem)
else:
filename = details['objpos'].split(':')[0]
details['pos'] = details['objpos']
details['name'] = details['type']
details['kind'] = details['type']
aggregated_data[filename].append(details)
for filename, elems in aggregated_data.items():
symbols += sorted(elems, key=lambda x: x['pos'])
return symbols
|
danalec/dotfiles
|
sublime/.config/sublime-text-3/Packages/anaconda_go/plugin/handlers_go/commands/package_symbols.py
|
Python
|
mit
| 5,136
| 0
|
import time
import numpy as np
import keras
import tensorflow as tf
import keras.backend as K
from keras import optimizers
from keras.models import load_model
from keras.callbacks import Callback
from functions import calculate_top_k_new_only
"""
PeriodicValidation - Keras callback - checks val_loss periodically instead of using Model.fit() every epoch
"""
class PeriodicValidation(Callback):
def __init__(self, val_data, batch_size, filepath):
super(PeriodicValidation, self).__init__()
self.val_data = val_data
self.batch_size = batch_size
self.filepath = filepath
self.min_val_loss = np.Inf
def on_epoch_end(self, epoch, logs={}):
if epoch % 5 == 4 or epoch % 5 == 2:
if self.filepath:
self.model.save(self.filepath+".ep_"+str(epoch)+".h5", overwrite=True)
if self.val_data is None:
return
h = self.model.evaluate(self.val_data[0], self.val_data[1], batch_size=self.batch_size, verbose=0)
print("validating on " + str(self.val_data[1].shape[0]) + " samples on epoch " + str(epoch) + ": ", h)
y_top_k_new_only = calculate_top_k_new_only(self.model,
self.val_data[0][0], self.val_data[0][1], self.val_data[1], self.batch_size,
(not self.val_data[0][1].shape[2] == self.val_data[1].shape[1]))
print("testing MAP@K for NEW products: ", y_top_k_new_only)
if h[0] < self.min_val_loss:
if self.filepath:
self.model.save(self.filepath, overwrite=True)
print("val_loss improved from "+str(self.min_val_loss)+" to "+str(h[0])+", saving model to "+self.filepath)
else:
print("val_loss improved from "+str(self.min_val_loss)+" to "+str(h[0]))
self.min_val_loss = h[0]
def on_train_end(self, logs=None): # also log training metrics with higher decimal precision
print("epoch", [m for m in self.model.history.params['metrics']])
for epoch in self.model.history.epoch:
print(epoch, [self.model.history.history[m][epoch] for m in self.model.history.params['metrics']])
#
|
DimiterM/santander
|
PeriodicValidation.py
|
Python
|
mit
| 2,287
| 0.00962
|
# -*- coding: utf8 -*-
from .task import TaskID
from .core import Handler
from .queue import EventQueue
__all__ = [
'TaskID',
'Handler',
'EventQueue',
]
|
nosix/PyCraft
|
src/pycraft/service/whole/handler/__init__.py
|
Python
|
lgpl-3.0
| 171
| 0.005848
|
from .PBXResolver import *
from .PBX_Constants import *
class PBX_Base(object):
def __init__(self, lookup_func, dictionary, project, identifier):
# default 'name' property of a PBX object is the type
self.name = self.__class__.__name__;
# this is the identifier for this object
self.identifier = str(identifier);
# set of any referenced identifiers on this object
self.referencedIdentifiers = set();
def __attrs(self):
return (self.identifier);
def __repr__(self):
return '(%s : %s : %s)' % (type(self), self.name, self.identifier);
def __eq__(self, other):
return isinstance(other, type(self)) and self.identifier == other.identifier;
def __hash__(self):
return hash(self.__attrs());
def resolve(self, type, item_list):
return filter(lambda item: isinstance(item, type), item_list);
def fetchObjectFromProject(self, lookup_func, identifier, project):
find_object = project.objectForIdentifier(identifier);
if find_object == None:
result = lookup_func(project.contents[kPBX_objects][identifier]);
if result[0] == True:
find_object = result[1](lookup_func, project.contents[kPBX_objects][identifier], project, identifier);
project.objects.add(find_object);
return find_object;
def parseProperty(self, prop_name, lookup_func, dictionary, project, is_array):
dict_item = dictionary[prop_name];
if is_array == True:
property_list = [];
for item in dict_item:
self.referencedIdentifiers.add(item);
find_object = self.fetchObjectFromProject(lookup_func, item, project);
property_list.append(find_object);
return property_list;
else:
self.referencedIdentifiers.add(dict_item);
return self.fetchObjectFromProject(lookup_func, dict_item, project);
|
samdmarshall/xcparse
|
xcparse/Xcode/PBX/PBX_Base.py
|
Python
|
bsd-3-clause
| 2,017
| 0.020327
|
##########################################################################
#
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class PrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
s = GafferScene.Sphere()
p = GafferScene.PrimitiveVariables()
p["in"].setInput( s["out"] )
self.assertScenesEqual( s["out"], p["out"] )
self.assertSceneHashesEqual( s["out"], p["out"] )
p["primitiveVariables"].addMember( "a", IECore.IntData( 10 ) )
self.assertScenesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertSceneHashesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertNotEqual( s["out"].objectHash( "/sphere" ), p["out"].objectHash( "/sphere" ) )
self.assertNotEqual( s["out"].object( "/sphere" ), p["out"].object( "/sphere" ) )
o1 = s["out"].object( "/sphere" )
o2 = p["out"].object( "/sphere" )
self.assertEqual( set( o1.keys() + [ "a" ] ), set( o2.keys() ) )
self.assertEqual( o2["a"], IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.IntData( 10 ) ) )
del o2["a"]
self.assertEqual( o1, o2 )
if __name__ == "__main__":
unittest.main()
|
chippey/gaffer
|
python/GafferSceneTest/PrimitiveVariablesTest.py
|
Python
|
bsd-3-clause
| 2,924
| 0.027018
|
import logging
logger = logging.getLogger(__name__)
def get(isdsAppliance, check_mode=False, force=False):
"""
Retrieve available updates
"""
return isdsAppliance.invoke_get("Retrieving available updates",
"/updates/available.json")
def discover(isdsAppliance, check_mode=False, force=False):
"""
Discover available updates
"""
return isdsAppliance.invoke_get("Discover available updates",
"/updates/available/discover")
def upload(isdsAppliance, file, check_mode=False, force=False):
"""
Upload Available Update
"""
if force is True or _check_file(isdsAppliance, file) is False:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post_files(
"Upload Available Update",
"/updates/available",
[{
'file_formfield': 'uploadedfile',
'filename': file,
'mimetype': 'application/octet-stream'
}],
{}, json_response=False)
return isdsAppliance.create_return_object()
def _check_file(isdsAppliance, file):
"""
Parse the file name to see if it is already uploaded - use version and release date from pkg file name
Also check to see if the firmware level is already uploaded
Note: Lot depends on the name of the file.
:param isdsAppliance:
:param file:
:return:
"""
import os.path
# If there is an exception then simply return False
# Sample filename - 8.0.1.9-ISS-ISDS_20181207-0045.pkg
logger.debug("Checking provided file is ready to upload: {0}".format(file))
try:
# Extract file name from path
f = os.path.basename(file)
fn = os.path.splitext(f)
logger.debug("File name without path: {0}".format(fn[0]))
# Split of file by '-' hyphen and '_' under score
import re
fp = re.split('-|_', fn[0])
firm_file_version = fp[0]
firm_file_product = fp[2]
firm_file_date = fp[3]
logger.debug("PKG file details: {0}: version: {1} date: {2}".format(firm_file_product, firm_file_version, firm_file_date))
# Check if firmware level already contains the update to be uploaded or greater, check Active partition
# firmware "name" of format - 8.0.1.9-ISS-ISDS_20181207-0045
import ibmsecurity.isds.firmware
ret_obj = ibmsecurity.isds.firmware.get(isdsAppliance)
for firm in ret_obj['data']:
# Split of file by '-' hyphen and '_' under score
fp = re.split('-|_', firm['name'])
firm_appl_version = fp[0]
firm_appl_product = fp[2]
firm_appl_date = fp[3]
logger.debug("Partition details ({0}): {1}: version: {2} date: {3}".format(firm['partition'], firm_appl_product, firm_appl_version, firm_appl_date))
if firm['active'] is True:
from ibmsecurity.utilities import tools
if tools.version_compare(firm_appl_version, firm_file_version) >= 0:
logger.info(
"Active partition has version {0} which is greater or equals than install package at version {1}.".format(
firm_appl_version, firm_file_version))
return True
else:
logger.info(
"Active partition has version {0} which is smaller than install package at version {1}.".format(
firm_appl_version, firm_file_version))
# Check if update uploaded - will not show up if installed though
ret_obj = get(isdsAppliance)
for upd in ret_obj['data']:
rd = upd['release_date']
rd = rd.replace('-', '') # turn release date into 20161102 format from 2016-11-02
if upd['version'] == fp[0] and rd == fp[3]: # Version of format 8.0.1.9
return True
except Exception as e:
logger.debug("Exception occured: {0}".format(e))
pass
return False
def install(isdsAppliance, type, version, release_date, name, check_mode=False, force=False):
"""
Install Available Update
"""
if force is True or _check(isdsAppliance, type, version, release_date, name) is True:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
ret_obj = isdsAppliance.invoke_post("Install Available Update",
"/updates/available/install",
{"updates": [
{
"type": type,
"version": version,
"release_date": release_date,
"name": name
}
]
})
isdsAppliance.facts['version'] = version
return ret_obj
return isdsAppliance.create_return_object()
def _check(isdsAppliance, type, version, release_date, name):
ret_obj = get(isdsAppliance)
for upd in ret_obj['data']:
# If there is an installation in progress then abort
if upd['state'] == 'Installing':
logger.debug("Detecting a state of installing...")
return False
if upd['type'] == type and upd['version'] == version and upd['release_date'] == release_date and upd[
'name'] == name:
logger.debug("Requested firmware ready for install...")
return True
logger.debug("Requested firmware not available for install...")
return False
|
IBM-Security/ibmsecurity
|
ibmsecurity/isds/available_updates.py
|
Python
|
apache-2.0
| 6,037
| 0.002319
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import task_classes
from task_classes import QsubAnalysisTask
class DemoQsubAnalysisTask(QsubAnalysisTask):
"""
Demo task that will submit a single qsub job for the analysis
"""
def __init__(self, analysis, taskname = 'DemoQsubAnalysisTask', config_file = 'DemoQsubAnalysisTask.yml', extra_handlers = None):
"""
Parameters
----------
analysis: SnsWESAnalysisOutput
the `sns` pipeline output object to run the task on. If ``None`` is passed, ``self.analysis`` is retrieved instead.
extra_handlers: list
a list of extra Filehandlers to use for logging
"""
QsubAnalysisTask.__init__(self, taskname = taskname, config_file = config_file, analysis = analysis, extra_handlers = extra_handlers)
def main(self, analysis):
"""
Main function for performing the analysis task on the entire analysis
Put your code for performing the analysis task on the entire analysis here
Parameters
----------
analysis: SnsWESAnalysisOutput
the `sns` pipeline output object to run the task on. If ``None`` is passed, ``self.analysis`` is retrieved instead.
Returns
-------
qsub.Job
a single qsub job object
"""
self.logger.debug('Put your code for doing the analysis task in this function')
self.logger.debug('The global configs for all tasks will be in this dict: {0}'.format(self.main_configs))
self.logger.debug('The configs loaded from the task YAML file will be in this dict: {0}'.format(self.task_configs))
self.logger.debug('Analysis is: {0}'.format(analysis.id))
# output file
output_foo = self.get_analysis_file_outpath(file_basename = 'foo.txt')
output_bar = self.get_analysis_file_outpath(file_basename = 'bar.txt')
self.logger.debug('output_foo is: {0}'.format(output_foo))
self.logger.debug('output_bar is: {0}'.format(output_bar))
# get the dir for the qsub logs
qsub_log_dir = analysis.list_none(analysis.get_dirs('logs-qsub'))
self.logger.debug('qsub_log_dir is {0}:'.format(qsub_log_dir))
# make the shell command to run
command = 'touch "{0}"; touch "{1}"; sleep 10'.format(output_foo, output_bar)
self.logger.debug('command will be:\n{0}'.format(command))
# submit the command as a qsub job on the HPC
job = self.qsub.submit(command = command, name = self.taskname + '.' + analysis.id, stdout_log_dir = qsub_log_dir, stderr_log_dir = qsub_log_dir, verbose = True, sleeps = 1)
return(job)
|
NYU-Molecular-Pathology/snsxt
|
snsxt/sns_tasks/DemoQsubAnalysisTask.py
|
Python
|
gpl-3.0
| 2,720
| 0.015074
|
๏ปฟ#!/usr/bin/python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------#
# Security - Linux Authentication Tester with /etc/shadow #
# ============================================================================ #
# Note: To be used for test purpose only #
# Developer: Chavaillaz Johan #
# Filename: LinuxAuthenticationTesterShadow.py #
# Version: 1.0 #
# #
# Licensed to the Apache Software Foundation (ASF) under one #
# or more contributor license agreements. See the NOTICE file #
# distributed with this work for additional information #
# regarding copyright ownership. The ASF licenses this file #
# to you under the Apache License, Version 2.0 (the #
# "License"); you may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, #
# software distributed under the License is distributed on an #
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #
# KIND, either express or implied. See the License for the #
# specific language governing permissions and limitations #
# under the License. #
# #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# #
# LIBRARIES IMPORT #
# #
#------------------------------------------------------------------------------#
import sys
import crypt
import spwd
import argparse
#------------------------------------------------------------------------------#
# #
# UTILITIES FUNCTIONS #
# #
#------------------------------------------------------------------------------#
def checkAuthentication(shadowPwdDb, password):
"""
Test authentication in linux
:param shadowPwdDb: Shadow password database entry for the user
:type shadowPwdDb: spwd
:param password: Account password to test
:type password: str
"""
if crypt.crypt(password, shadowPwdDb) == shadowPwdDb:
return True
else:
return False
def bruteForce(username, dictionary):
"""
Authentication test for each password in the dictionary
with the given user name on the current computer
:param username: Username used to test each password in given dictionary
:type username: str
:param dictionary: Dictionary file path that contains all password
:type dictionary: str
"""
# Return the shadow password database entry for the given user name
shadowPwdDb = spwd.getspnam(username)[1]
# Open dictionary file
with open(dictionary) as file:
# Read each line : One line = One password
for line in file:
# Delete new line character
password = line.rstrip('\n')
# Check authentication
if checkAuthentication(shadowPwdDb, password):
return password
return False
#------------------------------------------------------------------------------#
# #
# "MAIN" FUNCTION #
# #
#------------------------------------------------------------------------------#
# If this is the main module, run this
if __name__ == '__main__':
argsCount = len(sys.argv)
# Create argument parser to help user
parser = argparse.ArgumentParser(
description='Test user authentication with a given dictionary.'
)
parser.add_argument(
'username',
type=str,
help='Username used to test each password in given dictionary file.'
)
parser.add_argument(
'dictionary',
type=str,
help='Dictionary file path that contains all password to test.'
)
# Show help if one of the arguments is missing
if argsCount != 3:
parser.print_help()
sys.exit()
# User and dictionary file in scripts arguments
username = sys.argv[1]
dictionary = sys.argv[2]
# Launch script
try:
password = bruteForce(username, dictionary)
if not password:
print("Password not found in dictionary")
else:
print("Password found : " + password)
except (OSError, IOError) as e:
print("Dictionary not found")
except KeyError:
print("User '%s' not found" % username)
|
Chavjoh/LinuxAuthenticationTester
|
LinuxAuthenticationTesterShadow.py
|
Python
|
apache-2.0
| 5,520
| 0.025915
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for the registry flows."""
import os
from absl import app
from grr_response_client.client_actions import file_fingerprint
from grr_response_client.client_actions import searching
from grr_response_client.client_actions import standard
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import artifact
from grr_response_server import data_store
from grr_response_server.flows.general import registry
from grr_response_server.flows.general import transfer
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class RegistryFlowTest(flow_test_lib.FlowTestsBaseclass):
def setUp(self):
super().setUp()
vfs_overrider = vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegistryVFSHandler)
vfs_overrider.Start()
self.addCleanup(vfs_overrider.Stop)
class TestFakeRegistryFinderFlow(RegistryFlowTest):
"""Tests for the RegistryFinder flow."""
runkey = "HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/CurrentVersion/Run/*"
def RunFlow(self, client_id, keys_paths=None, conditions=None):
if keys_paths is None:
keys_paths = [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"
]
if conditions is None:
conditions = []
client_mock = action_mocks.ActionMock(
searching.Find,
searching.Grep,
)
session_id = flow_test_lib.TestFlowHelper(
registry.RegistryFinder.__name__,
client_mock,
client_id=client_id,
keys_paths=keys_paths,
conditions=conditions,
creator=self.test_username)
return session_id
def testFindsNothingIfNothingMatchesTheGlob(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/NonMatch*"
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeysWithSingleGlobWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeysWithTwoGlobsWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Side*",
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Mct*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeyWithInterpolatedGlobWithoutConditions(self):
user = rdf_client.User(sid="S-1-5-20")
client_id = self.SetupClient(0, users=[user])
session_id = self.RunFlow(client_id, [
"HKEY_USERS/%%users.sid%%/Software/Microsoft/Windows/"
"CurrentVersion/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
key = ("/HKEY_USERS/S-1-5-20/"
"Software/Microsoft/Windows/CurrentVersion/Run")
self.assertEqual(results[0].stat_entry.pathspec.CollapsePath(), key)
self.assertEqual(results[0].stat_entry.pathspec.path, key)
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfNothingMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10, bytes_after=10, literal=b"CanNotFindMe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10,
bytes_after=10,
literal=b"Windows Sidebar\\Sidebar.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfRegexMatchesNothing(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b".*CanNotFindMe.*")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesRegexMatchCondition(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b"Windows.+\\.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/"
"CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfModiciationTimeConditionMatchesNothing(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeysIfModificationTimeConditionMatches(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 - 1),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 + 1))
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeyWithLiteralAndModificationTimeConditions(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 - 1),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
1247546054 + 1))
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10,
bytes_after=10,
literal=b"Windows Sidebar\\Sidebar.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODIFICATION_TIME,
modification_time=modification_time),
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Sidebar")
def testSizeCondition(self):
client_id = self.SetupClient(0)
# There are two values, one is 20 bytes, the other 53.
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type.SIZE,
size=rdf_file_finder.FileFinderSizeCondition(min_file_size=50))
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertGreater(results[0].stat_entry.st_size, 50)
class TestRegistryFlows(RegistryFlowTest):
"""Test the Run Key registry flows."""
@parser_test_lib.WithAllParsers
def testCollectRunKeyBinaries(self):
"""Read Run key from the client_fixtures to test parsing and storage."""
client_id = self.SetupClient(0, system="Windows", os_version="6.2")
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.FakeFullVFSHandler):
client_mock = action_mocks.ActionMock(
file_fingerprint.FingerprintFile,
searching.Find,
standard.GetFileStat,
)
# Get KB initialized
session_id = flow_test_lib.TestFlowHelper(
artifact.KnowledgeBaseInitializationFlow.__name__,
client_mock,
client_id=client_id,
creator=self.test_username)
kb = flow_test_lib.GetFlowResults(client_id, session_id)[0]
client = data_store.REL_DB.ReadClientSnapshot(client_id)
client.knowledge_base = kb
data_store.REL_DB.WriteClientSnapshot(client)
with test_lib.Instrument(transfer.MultiGetFile,
"Start") as getfile_instrument:
# Run the flow in the emulated way.
flow_test_lib.TestFlowHelper(
registry.CollectRunKeyBinaries.__name__,
client_mock,
client_id=client_id,
creator=self.test_username)
# Check MultiGetFile got called for our runkey file
download_requested = False
for pathspec in getfile_instrument.args[0][0].args.pathspecs:
if pathspec.path == u"C:\\Windows\\TEMP\\A.exe":
download_requested = True
self.assertTrue(download_requested)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
google/grr
|
grr/server/grr_response_server/flows/general/registry_test.py
|
Python
|
apache-2.0
| 12,992
| 0.002617
|
from bitmovin.resources.models import AbstractModel
from bitmovin.resources import AbstractNameDescriptionResource
from bitmovin.errors import InvalidTypeError
from bitmovin.utils import Serializable
from .encoding_output import EncodingOutput
class Sprite(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, height, width, sprite_name, vtt_name, outputs, distance=None, id_=None, custom_data=None,
name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._outputs = None
self.height = height
self.width = width
self.distance = distance
self.spriteName = sprite_name
self.vttName = vtt_name
if outputs is not None and not isinstance(outputs, list):
raise InvalidTypeError('outputs must be a list')
self.outputs = outputs
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
custom_data = json_object.get('customData')
width = json_object.get('width')
height = json_object.get('height')
distance = json_object.get('distance')
sprite_name = json_object.get('spriteName')
vtt_name = json_object.get('vttName')
outputs = json_object.get('outputs')
name = json_object.get('name')
description = json_object.get('description')
sprite = Sprite(id_=id_, custom_data=custom_data, outputs=outputs, name=name, description=description,
height=height, width=width, sprite_name=sprite_name, vtt_name=vtt_name, distance=distance)
return sprite
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, new_outputs):
if new_outputs is None:
return
if not isinstance(new_outputs, list):
raise InvalidTypeError('new_outputs has to be a list of EncodingOutput objects')
if all(isinstance(output, EncodingOutput) for output in new_outputs):
self._outputs = new_outputs
else:
outputs = []
for json_object in new_outputs:
output = EncodingOutput.parse_from_json_object(json_object)
outputs.append(output)
self._outputs = outputs
def serialize(self):
serialized = super().serialize()
serialized['outputs'] = self.outputs
return serialized
|
bitmovin/bitmovin-python
|
bitmovin/resources/models/encodings/sprite.py
|
Python
|
unlicense
| 2,500
| 0.002
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import unittest
import logging
import functools
from nose.tools import * # flake8: noqa (PEP8 asserts)
import mock
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate, migrate_collected_metadata
from osf.models import (
Retraction,
NodeLicense,
Tag,
QuickFilesNode,
CollectedGuidMetadata,
)
from addons.osfstorage.models import OsfStorageFile
from scripts.populate_institutions import main as populate_institutions
from osf_tests import factories
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.utils import mock_archive, run_celery_tasks
TEST_INDEX = 'test'
def query(term, raw=False):
results = search.search(build_query(term), index=elastic_search.INDEX, raw=raw)
return results
def query_collections(name):
term = 'category:collectionSubmission AND "{}"'.format(name)
return query(term, raw=True)
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
def query_file(name):
term = 'category:file AND "{}"'.format(name)
return query(term)
def query_tag_file(name):
term = 'category:file AND (tags:u"{}")'.format(name)
return query(term)
def retry_assertion(interval=0.3, retries=3):
def test_wrapper(func):
t_interval = interval
t_retries = retries
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError as e:
if retries:
time.sleep(t_interval)
retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs)
else:
raise e
return wrapped
return test_wrapper
class TestCollectionsSearch(OsfTestCase):
def setUp(self):
super(TestCollectionsSearch, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='Salif Keita')
self.node_private = factories.NodeFactory(creator=self.user, title='Salif Keita: Madan', is_public=False)
self.node_public = factories.NodeFactory(creator=self.user, title='Salif Keita: Yamore', is_public=True)
self.node_one = factories.NodeFactory(creator=self.user, title='Salif Keita: Mandjou', is_public=True)
self.node_two = factories.NodeFactory(creator=self.user, title='Salif Keita: Tekere', is_public=True)
self.provider = factories.CollectionProviderFactory()
self.collection_one = factories.CollectionFactory(title='Life of Salif Keita', creator=self.user, is_public=True, provider=self.provider)
self.collection_public = factories.CollectionFactory(title='Best of Salif Keita', creator=self.user, is_public=True, provider=self.provider)
self.collection_private = factories.CollectionFactory(title='Commentary: Best of Salif Keita', creator=self.user, is_public = False, provider=self.provider)
def test_only_public_collections_submissions_are_searchable(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
assert_false(self.node_one.is_collected)
assert_false(self.node_public.is_collected)
self.collection_one.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_public.is_collected)
self.collection_one.save()
self.collection_public.save()
assert_true(self.node_one.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
self.collection_private.collect_object(self.node_two, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
def test_index_on_submission_privacy_changes(self):
# test_submissions_turned_private_are_deleted_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_one.collect_object(self.node_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
with run_celery_tasks():
self.node_one.is_public = False
self.node_one.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_turned_public_are_added_to_index
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.node_private.is_public = True
self.node_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
def test_index_on_collection_privacy_changes(self):
# test_submissions_of_collection_turned_private_are_removed_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
with run_celery_tasks():
self.collection_public.is_public = False
self.collection_public.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_of_collection_turned_public_are_added_to_index
self.collection_private.collect_object(self.node_one, self.user)
self.collection_private.collect_object(self.node_two, self.user)
self.collection_private.collect_object(self.node_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_two.is_collected)
assert_true(self.node_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.collection_private.is_public = True
self.collection_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
def test_collection_submissions_are_removed_from_index_on_delete(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
self.collection_public.delete()
assert_true(self.collection_public.deleted)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_removed_submission_are_removed_from_index(self):
self.collection_public.collect_object(self.node_one, self.user)
assert_true(self.node_one.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
self.collection_public.remove_object(self.node_one)
assert_false(self.node_one.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_collection_submission_doc_structure(self):
self.collection_public.collect_object(self.node_one, self.user)
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
with run_celery_tasks():
self.node_one.title = 'Keita Royal Family of Mali'
self.node_one.save()
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
assert_equal(docs[0]['_source']['abstract'], self.node_one.description)
assert_equal(docs[0]['_source']['contributors'][0]['url'], self.user.url)
assert_equal(docs[0]['_source']['contributors'][0]['fullname'], self.user.fullname)
assert_equal(docs[0]['_source']['url'], self.node_one.url)
assert_equal(docs[0]['_source']['id'], '{}-{}'.format(self.node_one._id,
self.node_one.collecting_metadata_list[0].collection._id))
assert_equal(docs[0]['_source']['category'], 'collectionSubmission')
class TestUserUpdate(OsfTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = factories.UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
def test_change_name(self):
# Add a user, change her name, and verify that only the new name is
# found in search.
user = factories.UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
# Test that disabled users are not in search index
user = factories.UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
def test_merged_user(self):
user = factories.UserFactory(fullname='Annie Lennox')
merged_user = factories.UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = factories.UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = factories.UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = factories.UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
class TestProject(OsfTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.project = factories.ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
with run_celery_tasks():
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
class TestNodeSearch(OsfTestCase):
def setUp(self):
super(TestNodeSearch, self).setUp()
with run_celery_tasks():
self.node = factories.ProjectFactory(is_public=True, title='node')
self.public_child = factories.ProjectFactory(parent=self.node, is_public=True, title='public_child')
self.private_child = factories.ProjectFactory(parent=self.node, title='private_child')
self.public_subchild = factories.ProjectFactory(parent=self.private_child, is_public=True)
self.node.node_license = factories.NodeLicenseRecordFactory()
self.node.save()
self.query = 'category:project & category:component'
@retry_assertion()
def test_node_license_added_to_search(self):
docs = query(self.query)['results']
node = [d for d in docs if d['title'] == self.node.title][0]
assert_in('license', node)
assert_equal(node['license']['id'], self.node.node_license.license_id)
@unittest.skip("Elasticsearch latency seems to be causing theses tests to fail randomly.")
@retry_assertion(retries=10)
def test_node_license_propogates_to_children(self):
docs = query(self.query)['results']
child = [d for d in docs if d['title'] == self.public_child.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
child = [d for d in docs if d['title'] == self.public_subchild.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
@unittest.skip("Elasticsearch latency seems to be causing theses tests to fail randomly.")
@retry_assertion(retries=10)
def test_node_license_updates_correctly(self):
other_license = NodeLicense.objects.get(name='MIT License')
new_license = factories.NodeLicenseRecordFactory(node_license=other_license)
self.node.node_license = new_license
self.node.save()
docs = query(self.query)['results']
for doc in docs:
assert_equal(doc['license'].get('id'), new_license.license_id)
class TestRegistrationRetractions(OsfTestCase):
def setUp(self):
super(TestRegistrationRetractions, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
creator=self.user,
is_public=True,
)
self.registration = factories.RegistrationFactory(project=self.project, is_public=True)
@mock.patch('website.project.tasks.update_node_share')
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_is_searchable(self, mock_registration_updated):
self.registration.retract_registration(self.user)
self.registration.retraction.state = Retraction.APPROVED
self.registration.retraction.save()
self.registration.save()
self.registration.retraction._on_complete(self.user)
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_pending_retraction_wiki_content_is_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.registration.create_or_update_node_wiki(name=key, content=value, auth=self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
with run_celery_tasks():
self.registration.save()
self.registration.reload()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_wiki_content_is_not_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.registration.create_or_update_node_wiki(name=key, content=value, auth=self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.retraction.state = Retraction.APPROVED
with run_celery_tasks():
self.registration.retraction.save()
self.registration.save()
self.registration.update_search()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 0)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
class TestPublicNodes(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPublicNodes, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.component = factories.NodeFactory(
parent=self.project,
description='',
title=self.title,
creator=self.user,
is_public=True
)
self.registration = factories.RegistrationFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration.archive_job.target_addons = []
self.registration.archive_job.status = 'SUCCESS'
self.registration.archive_job.save()
def test_make_private(self):
# Make project public, then private, and verify that it is not present
# in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_search_node_partial(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Blue')['results']
assert_equal(len(find), 1)
def test_search_node_partial_with_sep(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Express')['results']
assert_equal(len(find), 1)
def test_search_node_not_name(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Green Flyer-Slow')['results']
assert_equal(len(find), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
# Make parent of component, public, then private, and verify that the
# component still appears but doesn't link to the parent in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_false(docs[0]['parent_title'])
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
with run_celery_tasks():
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
title_original = self.project.title
with run_celery_tasks():
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True
)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.update_node_wiki(
key, value, self.consolidate_auth,
)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
# Add wiki text to page, then delete, then verify that project is not
# found when searching for wiki text.
wiki_content = 'Hammer to fall'
self.project.update_node_wiki(
'home', wiki_content, self.consolidate_auth,
)
with run_celery_tasks():
self.project.update_node_wiki('home', '', self.consolidate_auth)
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
with run_celery_tasks():
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
class TestAddContributor(OsfTestCase):
# Tests of the search.search_contributor method
def setUp(self):
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.name3 = u'j\xc3\xb3ebert3 Smith3'
self.name4 = u'B\xc3\xb3bbert4 Jones4'
with run_celery_tasks():
super(TestAddContributor, self).setUp()
self.user = factories.UserFactory(fullname=self.name1)
self.user3 = factories.UserFactory(fullname=self.name3)
def test_unreg_users_dont_show_in_search(self):
unreg = factories.UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
with run_celery_tasks():
unreg = factories.UnregUserFactory(fullname='Robert Paulson')
self.project = factories.ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
# Searching for full name yields exactly one result.
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
# Searching for first name yields exactly one result.
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
# Searching for part of first name yields exactly one
# result.
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_fullname_special_character(self):
# Searching for a fullname with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4)
assert_equal(len(contribs['users']), 0)
def test_search_firstname_special_charcter(self):
# Searching for a first name with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial_special_character(self):
# Searching for a partial name with a special character yields
# exctly one result.
contribs = search.search_contributor(self.name3.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_profile(self):
orcid = '123456'
user = factories.UserFactory()
user.social['orcid'] = orcid
user.save()
contribs = search.search_contributor(orcid)
assert_equal(len(contribs['users']), 1)
assert_equal(len(contribs['users'][0]['social']), 1)
assert_equal(contribs['users'][0]['social']['orcid'], user.social_links['orcid'])
class TestProjectSearchResults(OsfTestCase):
def setUp(self):
self.singular = 'Spanish Inquisition'
self.plural = 'Spanish Inquisitions'
self.possessive = 'Spanish\'s Inquisition'
with run_celery_tasks():
super(TestProjectSearchResults, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project_singular = factories.ProjectFactory(
title=self.singular,
creator=self.user,
is_public=True,
)
self.project_plural = factories.ProjectFactory(
title=self.plural,
creator=self.user,
is_public=True,
)
self.project_possessive = factories.ProjectFactory(
title=self.possessive,
creator=self.user,
is_public=True,
)
self.project_unrelated = factories.ProjectFactory(
title='Cardinal Richelieu',
creator=self.user,
is_public=True,
)
def test_singular_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
time.sleep(1)
results = query(self.singular)['results']
assert_equal(len(results), 3)
def test_plural_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.plural)['results']
assert_equal(len(results), 3)
def test_possessive_query(self):
# Verify searching for possessive term includes singular,
# possessive and plural versions in results.
results = query(self.possessive)['results']
assert_equal(len(results), 3)
def job(**kwargs):
keys = [
'title',
'institution',
'department',
'location',
'startMonth',
'startYear',
'endMonth',
'endYear',
'ongoing',
]
job = {}
for key in keys:
if key[-5:] == 'Month':
job[key] = kwargs.get(key, 'December')
elif key[-4:] == 'Year':
job[key] = kwargs.get(key, '2000')
else:
job[key] = kwargs.get(key, 'test_{}'.format(key))
return job
class TestUserSearchResults(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestUserSearchResults, self).setUp()
self.user_one = factories.UserFactory(jobs=[job(institution='Oxford'),
job(institution='Star Fleet')],
fullname='Date Soong')
self.user_two = factories.UserFactory(jobs=[job(institution='Grapes la Picard'),
job(institution='Star Fleet')],
fullname='Jean-Luc Picard')
self.user_three = factories.UserFactory(jobs=[job(institution='Star Fleet'),
job(institution='Federation Medical')],
fullname='Beverly Crusher')
self.user_four = factories.UserFactory(jobs=[job(institution='Star Fleet')],
fullname='William Riker')
self.user_five = factories.UserFactory(jobs=[job(institution='Traveler intern'),
job(institution='Star Fleet Academy'),
job(institution='Star Fleet Intern')],
fullname='Wesley Crusher')
for i in range(25):
factories.UserFactory(jobs=[job()])
self.current_starfleet = [
self.user_three,
self.user_four,
]
self.were_starfleet = [
self.user_one,
self.user_two,
self.user_three,
self.user_four,
self.user_five
]
@unittest.skip('Cannot guarentee always passes')
def test_current_job_first_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
current_starfleet_names = [u.fullname for u in self.current_starfleet]
for name in result_names[:2]:
assert_in(name, current_starfleet_names)
def test_had_job_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
were_starfleet_names = [u.fullname for u in self.were_starfleet]
for name in result_names:
assert_in(name, were_starfleet_names)
class TestSearchExceptions(OsfTestCase):
# Verify that the correct exception is thrown when the connection is lost
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._client = search.search_engine.CLIENT
search.search_engine.CLIENT = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.CLIENT = cls._client
@requires_search
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project = factories.ProjectFactory(
title="Tom Sawyer",
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
class TestSearchMigration(OsfTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
populate_institutions('test')
self.es = search.search_engine.CLIENT
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
self.project = factories.ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
def test_first_migration_no_remove(self):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_remove(self):
for n in xrange(1, 21):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_first_migration_with_remove(self):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys()[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_remove(self):
for n in xrange(1, 21, 2):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys()[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
def test_migration_institutions(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
institution_bucket_found = False
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type=None, search_type='count', body=count_query)
for bucket in res['aggregations']['counts']['buckets']:
if bucket['key'] == u'institution':
institution_bucket_found = True
assert_equal(institution_bucket_found, True)
def test_migration_collections(self):
provider = factories.CollectionProviderFactory()
collection_one = factories.CollectionFactory(is_public=True, provider=provider)
collection_two = factories.CollectionFactory(is_public=True, provider=provider)
node = factories.NodeFactory(creator=self.user, title='Ali Bomaye', is_public=True)
collection_one.collect_object(node, self.user)
collection_two.collect_object(node, self.user)
assert node.is_collected
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type='collectionSubmission', search_type='count', body=count_query)
assert res['hits']['total'] == 2
class TestSearchFiles(OsfTestCase):
def setUp(self):
super(TestSearchFiles, self).setUp()
self.node = factories.ProjectFactory(is_public=True, title='Otis')
self.osf_storage = self.node.get_addon('osfstorage')
self.root = self.osf_storage.get_root()
def test_search_file(self):
self.root.append_file('Shake.wav')
find = query_file('Shake.wav')['results']
assert_equal(len(find), 1)
def test_search_file_name_without_separator(self):
self.root.append_file('Shake.wav')
find = query_file('Shake')['results']
assert_equal(len(find), 1)
def test_delete_file(self):
file_ = self.root.append_file('I\'ve Got Dreams To Remember.wav')
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 1)
file_.delete()
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 0)
def test_add_tag(self):
file_ = self.root.append_file('That\'s How Strong My Love Is.mp3')
tag = Tag(name='Redding')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Redding')['results']
assert_equal(len(find), 1)
def test_remove_tag(self):
file_ = self.root.append_file('I\'ve Been Loving You Too Long.mp3')
tag = Tag(name='Blue')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 1)
file_.tags.remove(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 0)
def test_make_node_private(self):
file_ = self.root.append_file('Change_Gonna_Come.wav')
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 1)
self.node.is_public = False
with run_celery_tasks():
self.node.save()
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 0)
def test_make_private_node_public(self):
self.node.is_public = False
self.node.save()
file_ = self.root.append_file('Try a Little Tenderness.flac')
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 0)
self.node.is_public = True
with run_celery_tasks():
self.node.save()
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 1)
def test_delete_node(self):
node = factories.ProjectFactory(is_public=True, title='The Soul Album')
osf_storage = node.get_addon('osfstorage')
root = osf_storage.get_root()
root.append_file('The Dock of the Bay.mp3')
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 1)
node.is_deleted = True
with run_celery_tasks():
node.save()
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 0)
def test_file_download_url_guid(self):
file_ = self.root.append_file('Timber.mp3')
file_guid = file_.get_guid(create=True)
file_.save()
find = query_file('Timber.mp3')['results']
assert_equal(find[0]['guid_url'], '/' + file_guid._id + '/')
def test_file_download_url_no_guid(self):
file_ = self.root.append_file('Timber.mp3')
path = OsfStorageFile.objects.get(node=file_.node).path
deep_url = '/' + file_.node._id + '/files/osfstorage' + path + '/'
find = query_file('Timber.mp3')['results']
assert_not_equal(file_.path, '')
assert_equal(file_.path, path)
assert_equal(find[0]['guid_url'], None)
assert_equal(find[0]['deep_url'], deep_url)
def test_quickfiles_files_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 1)
assert find[0]['node_url'] == '/{}/quickfiles/'.format(quickfiles.creator._id)
def test_qatest_quickfiles_files_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
file = quickfiles_root.append_file('GreenLight.mp3')
tag = Tag(name='qatest')
tag.save()
file.tags.add(tag)
file.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
def test_quickfiles_spam_user_files_do_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
self.node.creator.disable_account()
self.node.creator.add_system_tag('spam_confirmed')
self.node.creator.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
|
icereval/osf.io
|
osf_tests/test_elastic_search.py
|
Python
|
apache-2.0
| 47,974
| 0.001772
|
#!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf)
#print new_pdf
#print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception,e:
raise Exception("Field %s: %s"%(k,str(e)))
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def pdf_merge(pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return pdf
serv=SimpleXMLRPCServer(("localhost",9999))
serv.register_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever()
|
jeffery9/mixprint_addons
|
ineco_thai_account/report/jy_serv.py
|
Python
|
agpl-3.0
| 1,612
| 0.031638
|
from __future__ import print_function
import math, nltk
from termcolor import colored
from analyze import generate_stopwords, sanitize
from vector import Vector
class NaiveBayesClassifier():
def __init__(self):
"""
Creates:
"""
self.c = {"+" : Vector(), "-" : Vector()}
for vector in self.c.values():
vector.default = 1
self.classes = ["+", "-"]
self.prior = {"+" : 0.55, "-" : 0.45}
self.stopwords = generate_stopwords()
self.features = set()
f = open("data/features.txt", "r")
for line in f:
self.features.add(line.strip().lower())
f.close()
# Begin training
f_pos = open("data/train_pos.txt", "r")
f_neg = open("data/train_neg.txt", "r")
self.train("+", f_pos)
self.train("-", f_neg)
f_pos.close()
f_neg.close()
def train(self, sentiment, tweets):
"""
@param {string} sentiment = "+" || "-"
{iterable} tweets = file_with_tagged_tweets
@return None
"""
freq = self.c[sentiment]
total = 0.0
for tweet in tweets:
total += 1
words = sanitize(tweet, self.stopwords)
for word in words:
if word in self.features: # word in our pre-made features list
freq[word] += 100
for word in freq:
freq[word] = freq[word] / total
freq.default = 1/total
def posterior(self, sentiment, sanitized_tweet):
"""
Computes the posterior (Bayesian Probability term) of a sanitized tweet
Probability model for a classifier is a conditional model
p(C, F1,...,Fn) = ( p(c)p(F1,...,Fn|C) ) / p(F1,...,Fn)
...
In English, using Bayesian Probability terminology, the equation can be written as
prior * likelihood
posterior = --------------------
evidence
in our case, we have:
p(sentiment, sanitized_tweet)
@param {string} sentiment = "+" or "-"
{set} sanitized_tweet = set of sanitized words in tweet
@return {float}
"""
#print "sanitized tweet = %s" % sanitized_tweet
#print math.log(self.prior[sentiment])
#print "self.prior[sentiment] = %s" % self.prior[sentiment]
p = math.log(self.prior[sentiment])
values = self.c[sentiment]
#print "%s : original p: %f" % (sentiment, p)
for word in sanitized_tweet:
if word in self.features: # word is in the features list, so apply the score for the feature based on the sentiment
p += math.log(values[word])
# print "%s : %f" % (word, math.log(values[word]))
else:
p += math.log(.1 - values[word])
# print "%s : %f" % (word, math.log(.1 - values[word]))
#print p
return p
'''
for feature in self.features:
#print "c[%s] = %s" % (feature, c[feature])
if feature in sanitized_tweet:
p += math.log(1 - c[feature]) # add feature's score per the sentiment
else:
p += math.log(1 - c[feature])
return p
'''
def classify(self, tweet, verbose=False, eval=False):
"""
Classifies a text's sentiment given the posterior of of its class
Picks the largest posterior between that of "+" and "-"
However, if there is not enough confidence (i.e. if mpt posterior(c1|tweet) < 2*posterior(c2|tweet),
then we classify as neutral ("~") because we don't have conclusive evidence
@param {string} tweet
@return {string} sentiment = "+" || "-" || "~"
"""
sanitized = sanitize(tweet, self.stopwords)
# print sanitized
sentiment = {}
bigrams = nltk.bigrams(sanitized)
trigrams = nltk.trigrams(sanitized)
if len(sanitized) <= 22:
for s in self.classes:
sentiment[s] = self.posterior(s, sanitized) # Calculate posterior for positive and negative sentiment
if verbose: print(s, sanitized, self.posterior(s, sanitized))
elif len(sanitized) == 23:
for s in self.classes:
for pair in bigrams:
sentiment[s] = self.posterior(s, pair)
if verbose: print (s, pair, self.posterior(s, pair))
else:
# use trigram model
for s in self.classes:
for tri in trigrams:
sentiment[s] = self.posterior(s, tri)
if verbose: print (s, tri, self.posterior(s, tri))
positive = sentiment["+"] # Get calculated posterior of positive sentiment
negative = sentiment["-"] # Get calculated posterior fo negative sentiment
#print "positive: %s negative: %s" % (positive, negative)
if "not" in sanitized or "despite" in sanitized:
if positive > + math.log(1.3) + negative:
negative = abs(negative)
elif negative > math.log(9) + positive:
positive = abs(positive)
if verbose: print("positive: %f negative: %f" % (positive, negative))
if positive > + math.log(1.3) + negative:
if eval: return "+"
else: print(colored('+', 'green'))
elif negative > math.log(.9)+positive:
if eval: return "-"
else: print(colored('-', 'red'))
else:
if eval: return "~"
else: print(colored('~', 'white'))
def evaluate(self):
totalp = totaln = 0
t = w = 0 # total = 0, wrong = 0
fp = fn = 0 # false positive = 0, false negative = 0
for tweet in open("data/verify_pos.txt"):
t += 1.0
totalp += 1.0
e = self.classify(tweet, False, eval=True)
if e != "+":
if e == "-": fn += 1
w += 1.0
tp = t - w # true positive
print(colored('Positive', 'green'), end="")
print(" - accuracy: %.2f%%" % self.accuracy(w, t)) # make function that displays values correctly
t = w = 0
for tweet in open("data/verify_neg.txt"):
t += 1.0
totaln += 1.0
e = self.classify(tweet, False, eval=True)
if e != "-":
if e == "+": fp += 1
w += 1.0
tn = t - w # true negative
print(colored('Negative', 'red'), end="")
print(" - accuracy: %.2f%%" % self.accuracy(w, t))
w = t = 0
for tweet in open("data/verify_neutral.txt"):
t += 1.0
if "~" != self.classify(tweet, verbose=False, eval=True):
w += 1.0
# print "Neutral - accuracy: %s" % self.accuracy(w, t)
# Precision
# = TP / (TP + FP)
precision = (tp / (tp + fp))
print(colored("\nPrecision: ", "magenta") + "%.2f" % precision)
# Recall
# = TP / (TP + FN)
recall = (tp / (tp + fn))
print(colored("Recall: ", "magenta") + "%.2f" % recall)
# Accuracy
# = (TP + TN) / (P + N)
accuracy = (tp + tn) / (totalp + totaln) * 100
print(colored("Accuracy: ", "magenta") + "%.2f%%" % accuracy)
# F-score
# measure of test's accuracy - considers both the precision and recall
f_score = 2 * (precision*recall) / (precision+recall)
print(colored("\nF-Measure: ", "cyan") + "%.2f" % f_score)
def accuracy(self, w, t):
return (1 - (w/t)) * 100
def __repr__(self):
pass
c = NaiveBayesClassifier()
|
trivedi/sentapy
|
NaiveBayes.py
|
Python
|
mit
| 7,810
| 0.009091
|
# -*- coding: utf-8 -*-
# ยฉ 2015 Eficent Business and IT Consulting Services S.L. -
# Jordi Ballester Alomar
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import models
|
Eficent/odoo-operating-unit
|
account_invoice_merge_operating_unit/__init__.py
|
Python
|
agpl-3.0
| 202
| 0
|
'''
compile_test.py - check pyximport functionality with pysam
==========================================================
test script for checking if compilation against
pysam and tabix works.
'''
# clean up previous compilation
import os
import unittest
import pysam
from TestUtils import make_data_files, BAM_DATADIR, TABIX_DATADIR
def setUpModule():
make_data_files(BAM_DATADIR)
make_data_files(TABIX_DATADIR)
try:
os.unlink('tests/_compile_test.c')
os.unlink('tests/_compile_test.pyxbldc')
except OSError:
pass
import pyximport
pyximport.install(build_in_temp=False)
import _compile_test
class BAMTest(unittest.TestCase):
input_filename = os.path.join(BAM_DATADIR, "ex1.bam")
def testCount(self):
nread = _compile_test.testCountBAM(
pysam.Samfile(self.input_filename))
self.assertEqual(nread, 3270)
class GTFTest(unittest.TestCase):
input_filename = os.path.join(TABIX_DATADIR, "example.gtf.gz")
def testCount(self):
nread = _compile_test.testCountGTF(
pysam.Tabixfile(self.input_filename))
self.assertEqual(nread, 237)
if __name__ == "__main__":
unittest.main()
|
pysam-developers/pysam
|
tests/compile_test.py
|
Python
|
mit
| 1,181
| 0.001693
|
""" Title: Ch3LpfPlotResponse - Chapter 3: Plot filter response
Author: Ricardo Alejos
Date: 2016-09-20
Description: Plots the micro-strip filter response against the specifications
Version: 1.0.0
Comments: -
"""
# Import Python's built-in modules
import csv as _csv
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import math as _math
# Add project root directory to sys.path so other modules can be imported
_projectRoot = _os.path.abspath(__file__ + "\\..\\..\\..")
if _projectRoot not in _sys.path:
_sys.path.insert(0, _projectRoot)
_strThisFileName = _os.path.splitext(_os.path.basename(__file__))[0]
import pkg.Algorithm.SimAnnMin as _sam
import pkg.ObjectiveFunctions.MsLpf as _lpf
import pkg.Helpers.MatlabFunctions as _mf
def _initLogger():
global logger
logger = _logging.getLogger(_strThisFileName)
logger.setLevel(_logging.DEBUG)
map(logger.removeHandler, logger.handlers[:])
ch = _logging.StreamHandler(_sys.stdout)
ch.setLevel(_logging.INFO)
fh = _logging.FileHandler(_strThisFileName + ".log")
fh.setLevel(_logging.DEBUG)
formatter = _logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.debug("A new logger session has started.")
_initLogger()
cases = (
dict(
title = ("Filter response using","\\it{n}\\rm=2 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 2,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n2_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=4 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 4,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n4_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=8 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 8,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n8_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=2 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 2,
w1 = 1.5242,
l1 = 4.9000,
w2 = 2.4500,
filename = "ch3_fresp_n2_xopt"
),
dict(
title = ("Filter response using","\\it{n}\\rm=4 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 4,
w1 = 1.4564,
l1 = 4.9000,
w2 = 2.4500,
filename = "ch3_fresp_n4_xopt"
),
dict(
title = ("Filter response using","\\it{n}\\rm=8 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 8,
w1 = 1.3750,
l1 = 4.9000,
w2 = 3.0625,
filename = "ch3_fresp_n8_xopt"
),
)
def PlotResponse(w1, l1, w2, n, title, filename = None):
resp = _lpf.getRawResponseData([w1, l1, w2], n)
freq = resp["freq"]
s21m = [_math.sqrt(resp["s21r"][i]**2+resp["s21i"][i]**2) for i in range(len(freq))]
spec = (([0,0,6e9,6e9],[0.85,0.9,0.9,0.85]), ([8e9,8e9,10e9,10e9],[0.15,0.1,0.1,0.15]))
_mf.PlotVsSpecs(
freq,
s21m,
spec,
title,
"Frequency (Hz)",
"|S_{21}|",
filename
)
def main():
with open(_strThisFileName + "_" + _time.strftime('%Y%m%d%H%M%S') + ".csv", "wb") as fhReport:
lRptFld = [
"k",
"iter",
"ui",
"uo"
]
cwReport = _csv.DictWriter(fhReport, lRptFld)
cwReport.writeheader()
lstSaCfg = ["TTT"]
numItn = 50
dicTmeFun = dict(
T = _sam.TmeFns.typical(numItn),
F = _sam.TmeFns.fast(numItn),
S = _sam.TmeFns.slow(numItn)
)
dicSseFun = dict(
T = _sam.SseFns.typical,
F = _sam.SseFns.fast,
S = _sam.SseFns.slow
)
dicAceFun = dict(
T = _sam.AceFns.typical,
F = _sam.AceFns.fast,
S = _sam.AceFns.slow
)
lstK = [8] #[2,4,8]
for strSaCfg in lstSaCfg:
for k in lstK:
uk = _lpf.getInterfaceFunction(k)
logger.info("Running SAM using the %s configuration."%strSaCfg)
dReportRow = dict((key, None) for key in lRptFld)
dReportRow["k"] = k
SamObj = _sam.SimAnnMin()
SamObj.setObeFun(uk)
SamObj.setTmeLst(dicTmeFun[strSaCfg[0]])
SamObj.setSseFun(dicSseFun[strSaCfg[1]])
SamObj.setAceFun(dicAceFun[strSaCfg[2]])
SamObj.setX0([-0.7,0.5,0.1])
SamObj.runAll()
lstUi = SamObj._lUi
lstUo = SamObj._lUo
lstIter = range(len(lstUi))
_mf.Plot(lstIter,
lstUi,
"Evolution of \\it{u}\\rm_{%d} during SA optimiziation."%(k),
"Iteration",
"\\it{u}\\rm_{%d}"%(k),
"sa-evol_u%dall"%(k))
_mf.Plot(lstIter,
lstUo,
"Evolution of \\it{u}\\rm_{%d}* during SA optimization"%(k),
"Iteration",
"\\it{u}\\rm_{%d}"%(k),
"sa-evol_u%dopt"%(k))
for iter in lstIter:
dReportRow["iter"] = iter
dReportRow["ui"] = "%0.4f"%lstUi[iter]
dReportRow["uo"] = "%0.4f"%lstUo[iter]
logger.info("Finished processing of u%d"%k)
cwReport.writerow(dReportRow)
main()
|
ricardoalejos/RalejosMsrElcDsn
|
SmdAngPtnSnt/pkg/ExpFlows/Ch3LpfPlotSaEvoVsRes.py
|
Python
|
mit
| 5,718
| 0.03148
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
# Support for fake joystick/gamepad during devlopment
# if no 'real' joystick/gamepad is available use keyboard emulation
# 'ctrl' + 'alt' + numberKey
from __future__ import absolute_import, division, print_function
from psychopy import event
class Joystick(object):
def __init__(self, device_number):
self.device_number = device_number
self.numberKeys = ['0','1','2','3','4','5','6','7','8','9']
self.modifierKeys = ['ctrl','alt']
self.mouse = event.Mouse()
def getNumButtons(self):
return(len(self.numberKeys))
def getAllButtons(self):
keys = event.getKeys(keyList=self.numberKeys, modifiers=True)
values = [key for key, modifiers in keys if all([modifiers[modKey] for modKey in self.modifierKeys])]
self.state = [key in values for key in self.numberKeys]
mouseButtons = self.mouse.getPressed()
self.state[:len(mouseButtons)] = [a or b != 0 for (a,b) in zip(self.state, mouseButtons)]
return(self.state)
def getX(self):
(x, y) = self.mouse.getPos()
return x
def getY(self):
(x, y) = self.mouse.getPos()
return y
|
hoechenberger/psychopy
|
psychopy/experiment/components/joystick/mouseJoystick.py
|
Python
|
gpl-3.0
| 1,348
| 0.009644
|
# -*- coding: Latin-1 -*-
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See License.txt for complete terms.
# file object -> CybOX File Object mappings
file_object_mappings = {'file_format': 'file_format',
'type': 'type',
'file_name': 'file_name',
'file_path': 'file_path',
'size': 'size_in_bytes',
'magic_number': 'magic_number',
'file_extension': 'file_extension',
'entropy': 'peak_entropy'}
|
MAECProject/pefile-to-maec
|
pefile_to_maec/mappings/file_object.py
|
Python
|
bsd-3-clause
| 586
| 0.001706
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###################################################################
#
# Company: Squeeze Studio Animation
#
# Author: Danilo Pinheiro
# Date: 2014-02-10
# Updated: 2014-02-24
#
# sqStickyLipsSetup.py
#
# This script will create a Sticky Lips setup.
#
#######################################
# importing libraries:
import maya.cmds as cmds
import maya.mel as mel
from functools import partial
# global variables to this module:
CLASS_NAME = "StickyLips"
TITLE = "m061_stickyLips"
DESCRIPTION = "m062_stickyLipsDesc"
ICON = "/Icons/sq_stickyLips.png"
SQSL_VERSION = "1.0"
class StickyLips():
def __init__(self, dpUIinst, langDic, langName):
# redeclaring variables
self.dpUIinst = dpUIinst
self.langDic = langDic
self.langName = langName
# call main function
self.dpMain(self)
def dpMain(self, *args):
self.edgeList = []
self.baseCurve = []
self.baseCurveA = []
self.baseCurveB = []
self.mainCurveA = []
self.mainCurveB = []
self.curveLenght = 0
self.maxIter = 0
self.clusterList = []
self.receptList = []
self.optionCtrl = "Option_Ctrl"
self.wireNodeList = []
if cmds.window('sqStickyLipsWindow', query=True, exists=True):
cmds.deleteUI('sqStickyLipsWindow', window=True)
cmds.window('sqStickyLipsWindow', title='sqStickyLips - v'+str(SQSL_VERSION)+' - UI', widthHeight=(300, 200), menuBar=False, sizeable=False, minimizeButton=True, maximizeButton=False)
cmds.showWindow('sqStickyLipsWindow')
slLayoutColumn = cmds.columnLayout('slLayoutColumn', adjustableColumn=True)
cmds.text("Load meshes:", align="left", parent=slLayoutColumn)
slLayoutA = cmds.rowColumnLayout('slLayoutA', numberOfColumns=2, columnWidth=[(1, 100), (2, 160)], parent=slLayoutColumn)
cmds.button(label="Recept A >>", command=partial(self.sqSLLoad, "A"), parent=slLayoutA)
self.receptA_TF = cmds.textField(parent=slLayoutA)
cmds.button(label="Recept B >>", command=partial(self.sqSLLoad, "B"), parent=slLayoutA)
self.receptB_TF = cmds.textField(parent=slLayoutA)
cmds.text("Select a closed edgeLoop and press the run button", parent=slLayoutColumn)
cmds.button(label="RUN - Generate Sticky Lips", command=self.sqGenerateStickyLips, backgroundColor=[0.3, 1, 0.7], parent=slLayoutColumn)
def sqSLLoad(self, recept, *args):
if recept == "A":
cmds.textField(self.receptA_TF, edit=True, text=cmds.ls(selection=True)[0])
if recept == "B":
cmds.textField(self.receptB_TF, edit=True, text=cmds.ls(selection=True)[0])
def sqGetRecepts(self, receptA=None, receptB=None, *args):
self.receptList = []
self.receptList.append(receptA)
self.receptList.append(receptB)
if receptA == None:
receptAName = cmds.textField(self.receptA_TF, query=True, text=True)
if cmds.objExists(receptAName):
self.receptList[0] = receptAName
if receptB == None:
receptBName = cmds.textField(self.receptB_TF, query=True, text=True)
if cmds.objExists(receptBName):
self.receptList[1] = receptBName
def sqGenerateCurves(self, *args):
self.edgeList = cmds.ls(selection=True, flatten=True)
if not self.edgeList == None and not self.edgeList == [] and not self.edgeList == "":
self.baseCurve = cmds.polyToCurve(name="baseCurve", form=2, degree=1)[0]
cmds.select(self.baseCurve+".ep[*]")
cmds.insertKnotCurve(cmds.ls(selection=True, flatten=True), constructionHistory=True, curveOnSurface=True, numberOfKnots=1, addKnots=False, insertBetween=True, replaceOriginal=True)
pointListA, pointListB, sideA, sideB = self.sqGetPointLists()
toDeleteList = []
p = 2
for k in range((sideA+2), (sideB-1)):
if p%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(k)+"]")
toDeleteList.append(self.baseCurve+".cv["+str(k+len(pointListA)-1)+"]")
p = p+1
q = 2
m = sideA-2
if m >= 0:
while m >= 0:
if not m == sideA and not m == sideB:
if q%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(m)+"]")
m = m-1
q = q+1
cmds.delete(toDeleteList)
cmds.insertKnotCurve([self.baseCurve+".u["+str(len(pointListA)-1)+"]", self.baseCurve+".ep["+str(len(pointListA)-1)+"]"], constructionHistory=True, curveOnSurface=True, numberOfKnots=1, addKnots=False, insertBetween=True, replaceOriginal=True)
pointListA, pointListB, sideA, sideB = self.sqGetPointLists()
posListA, posListB = [], []
for i in range(0, len(pointListA)-1):
posListA.append(cmds.xform(pointListA[i], query=True, worldSpace=True, translation=True))
posListB.append(cmds.xform(pointListB[i], query=True, worldSpace=True, translation=True))
self.mainCurveA = cmds.curve(name="StickyLips_Main_A_Crv", degree=1, point=posListA)
self.mainCurveB = cmds.curve(name="StickyLips_Main_B_Crv", degree=1, point=posListB)
cmds.rename(cmds.listRelatives(self.mainCurveA, children=True, shapes=True)[0], self.mainCurveA+"Shape")
cmds.rename(cmds.listRelatives(self.mainCurveB, children=True, shapes=True)[0], self.mainCurveB+"Shape")
cmds.select(self.mainCurveA+".cv[*]")
self.curveLenght = len(cmds.ls(selection=True, flatten=True))
cmds.select(clear=True)
self.sqCheckCurveDirection(self.mainCurveA)
self.sqCheckCurveDirection(self.mainCurveB)
self.baseCurveA = cmds.duplicate(self.mainCurveA, name=self.mainCurveA.replace("_Main_", "_Base_"))[0]
self.baseCurveB = cmds.duplicate(self.mainCurveB, name=self.mainCurveB.replace("_Main_", "_Base_"))[0]
cmds.delete(self.baseCurve)
self.maxIter = len(posListA)
cmds.group(self.mainCurveA, self.mainCurveB, self.baseCurveA, self.baseCurveB, name="StickyLips_StaticData_Grp")
else:
mel.eval("warning \"Please, select an closed edgeLoop.\";")
def sqCheckCurveDirection(self, thisCurve, *args):
posMinX = cmds.xform(thisCurve+".cv[0]", query=True, worldSpace=True, translation=True)[0]
posMaxX = cmds.xform(thisCurve+".cv["+str(self.curveLenght-1)+"]", query=True, worldSpace=True, translation=True)[0]
if posMinX > posMaxX:
cmds.reverseCurve(thisCurve, constructionHistory=False, replaceOriginal=True)
def sqGetPointLists(self, *args):
cmds.select(self.baseCurve+".cv[*]")
pointList = cmds.ls(selection=True, flatten=True)
minX = 0
maxX = 0
sideA = 0
sideB = 0
for i in range(0, len(pointList)):
pointPosX = cmds.xform(pointList[i], query=True, worldSpace=True, translation=True)[0]
if pointPosX < minX:
minX = pointPosX
sideA = i
elif pointPosX > maxX:
maxX = pointPosX
sideB = i
if sideA > sideB:
sideC = sideA
sideA = sideB
sideB = sideC
pointListA = pointList[sideA:(sideB+1)]
pointListB = pointList[sideB:]
for j in range(0, (sideA+1)):
pointListB.append(pointList[j])
return pointListA, pointListB, sideA, sideB
def sqCreateClusters(self, curveA, curveB, *args):
self.clusterList = []
for i in range(1, self.curveLenght-1):
self.clusterList.append(cmds.cluster([curveA+".cv["+str(i)+"]", curveB+".cv["+str(i)+"]"], name="StickyLips_"+str(`i-1`)+"_Cls")[1])
return self.clusterList
def sqGenerateMuscleLocators(self, *args):
muscleLoaded = True
if not cmds.pluginInfo('MayaMuscle.mll', query=True, loaded=True):
muscleLoaded = False
try:
cmds.loadPlugin('MayaMuscle.mll')
muscleLoaded = True
except:
print "Error: Can not load the Maya Muscle plugin!"
pass
if muscleLoaded:
minIndex = 0
minPosX = 1000000000000000 # just to avoid non centered characters
minPosId = 0
vertexPairList = []
muscleLocatorList = []
for e, edgeName in enumerate(self.edgeList):
tempCompList = cmds.polyListComponentConversion(edgeName, fromEdge=True, toVertex=True)
tempExpList = cmds.filterExpand(tempCompList, selectionMask=31, expand=True)
vertexPairList.append(tempExpList)
edgePosA = cmds.xform(tempExpList[0], query=True, worldSpace=True, translation=True)[0]
edgePosB = cmds.xform(tempExpList[1], query=True, worldSpace=True, translation=True)[0]
if edgePosA < minPosX:
minIndex = e
minPosX = edgePosA
minPosId = 0
if edgePosB < minPosX:
minIndex = e
minPosX = edgePosB
minPosId = 1
usedIndexList = []
usedIndexList.append(minIndex)
lastIndexUp = minIndex
lastIndexDown = 0
upEdgeList = []
upEdgeList.append(self.edgeList[minIndex])
downEdgeList = []
for i in range(0, len(vertexPairList)-1):
if not i == minIndex:
if vertexPairList[i][0] in vertexPairList[minIndex][minPosId] or vertexPairList[i][1] in vertexPairList[minIndex][minPosId]:
downEdgeList.append(self.edgeList[i])
usedIndexList.append(i)
lastIndexDown = i
for i in range(0, self.maxIter-2):
for j in range(0, len(vertexPairList)):
if not j in usedIndexList:
if vertexPairList[j][0] in vertexPairList[lastIndexUp] or vertexPairList[j][1] in vertexPairList[lastIndexUp]:
upEdgeList.append(self.edgeList[j])
usedIndexList.append(j)
lastIndexUp = j
break
for j in range(0, len(vertexPairList)):
if not j in usedIndexList:
if vertexPairList[j][0] in vertexPairList[lastIndexDown] or vertexPairList[j][1] in vertexPairList[lastIndexDown]:
downEdgeList.append(self.edgeList[j])
usedIndexList.append(j)
lastIndexDown = j
break
upMinusDown = len(upEdgeList) - len(downEdgeList)
downMinusUp = len(downEdgeList) - len(upEdgeList)
if upMinusDown > 1:
for i in range(0, upMinusDown):
if not len(upEdgeList) == (self.maxIter-3):
downEdgeList.append(upEdgeList[len(upEdgeList)-1])
upEdgeList = upEdgeList[:-1]
if downMinusUp > 1:
for i in range(0, downMinusUp):
if not len(upEdgeList) == (self.maxIter-3):
upEdgeList.append(downEdgeList[len(downEdgeList)-1])
downEdgeList = downEdgeList[:-1]
upEdgeList = upEdgeList[:self.maxIter-1]
downEdgeList = downEdgeList[:self.maxIter-1]
for k in range(0, self.maxIter-2):
cmds.select([upEdgeList[k], downEdgeList[k]])
# cmds.refresh()
# cmds.pause(seconds=1)
mel.eval("cMuscleSurfAttachSetup();")
msa = cmds.rename("StickLips_"+str(k)+"_MSA")
cmds.disconnectAttr(msa+"Shape.outRotate", msa+".rotate")
cmds.setAttr(msa+".rotateX", 0)
cmds.setAttr(msa+".rotateY", 0)
cmds.setAttr(msa+".rotateZ", 0)
muscleLocatorList.append(msa)
cmds.parent(self.clusterList[k], msa, absolute=True)
def sqSetClustersZeroScale(self, *arqs):
if self.clusterList:
for item in self.clusterList:
cmds.setAttr(item+".scaleX", 0)
cmds.setAttr(item+".scaleY", 0)
cmds.setAttr(item+".scaleZ", 0)
def sqCreateStikyLipsDeformers(self, *args):
baseMesh = None
mainCurveList = [self.mainCurveA, self.mainCurveB]
for mainCurve in mainCurveList:
if baseMesh == None:
baseMesh = cmds.duplicate(self.receptList[0], name=self.receptList[0]+"Base")[0]
cmds.setAttr(baseMesh+".visibility", 0)
wrapNode = cmds.deformer(mainCurve, name="StickyLips_Wrap", type="wrap")[0]
try:
cmds.connectAttr(self.receptList[0]+".dropoff", wrapNode+".dropoff[0]", force=True)
cmds.connectAttr(self.receptList[0]+".inflType", wrapNode+".inflType[0]", force=True)
cmds.connectAttr(self.receptList[0]+".smoothness", wrapNode+".smoothness[0]", force=True)
cmds.connectAttr(self.receptList[0]+"Shape.worldMesh[0]", wrapNode+".driverPoints[0]", force=True)
except:
pass
cmds.connectAttr(baseMesh+"Shape.worldMesh[0]", wrapNode+".basePoints[0]", force=True)
cmds.connectAttr(mainCurve+"Shape.worldMatrix[0]", wrapNode+".geomMatrix", force=True)
cmds.setAttr(wrapNode+".maxDistance", 1)
cmds.setAttr(wrapNode+".autoWeightThreshold", 1)
cmds.setAttr(wrapNode+".exclusiveBind", 1)
baseCurveList = [self.baseCurveA, self.baseCurveB]
for c, baseCurve in enumerate(baseCurveList):
wireNode = cmds.wire(self.receptList[1], name=baseCurve+"_Wire", groupWithBase=False, crossingEffect=0, localInfluence=0)[0]
cmds.connectAttr(mainCurveList[c]+"Shape.worldSpace[0]", wireNode+".baseWire[0]", force=True)
cmds.connectAttr(baseCurve+"Shape.worldSpace[0]", wireNode+".deformedWire[0]", force=True)
self.wireNodeList.append(wireNode)
wireLocList = []
for i in range(0, self.maxIter):
wireLocList.append(baseCurve+".u["+str(i)+"]")
cmds.dropoffLocator(1, 1, wireNode, wireLocList)
def sqCreateStickyLipsCtrlAttr(self, *args):
if not cmds.objExists(self.optionCtrl):
cmds.circle(name=self.optionCtrl, constructionHistory=False)
cmds.addAttr(self.optionCtrl, longName='stickyLips', attributeType='bool')
cmds.setAttr(self.optionCtrl+'.stickyLips', edit=True, keyable=True)
for i in range(0, self.maxIter):
cmds.addAttr(self.optionCtrl, longName="stickyLipsWireLocator"+str(i), attributeType='float', keyable=False)
for i in range(0, self.maxIter):
for wireNode in self.wireNodeList:
cmds.connectAttr(self.optionCtrl+".stickyLipsWireLocator"+str(i), wireNode+".wireLocatorEnvelope["+str(i)+"]")
slTextCurve = cmds.textCurves(ch=False, font="Arial|w400|h-08", text="StickyLips", name="StickyLips_Label_Txt")[0]
if "Shape" in slTextCurve:
slTextCurve = cmds.rename(slTextCurve, slTextCurve[:slTextCurve.find("Shape")])
t = 0
slCharTransformList = cmds.listRelatives(slTextCurve, children=True, type="transform")
for charTransform in slCharTransformList:
txValue = cmds.getAttr(charTransform+".tx")
sLTextShapeList = cmds.listRelatives(charTransform, allDescendents=True, type="nurbsCurve")
for i, textShape in enumerate(sLTextShapeList):
textShape = cmds.rename(textShape, "StickyLips_Txt_"+str(t)+"Shape")
cmds.parent(textShape, slTextCurve, shape=True, relative=True)
cmds.move(txValue, 0, 0, textShape+".cv[:]", relative=True)
t = t+1
cmds.delete(charTransform)
cmds.setAttr(slTextCurve+".translateX", -0.1)
cmds.setAttr(slTextCurve+".translateY", 0.25)
cmds.setAttr(slTextCurve+".scaleX", 0.1)
cmds.setAttr(slTextCurve+".scaleY", 0.1)
cmds.setAttr(slTextCurve+".scaleZ", 0.1)
cmds.setAttr(slTextCurve+".template", 1)
cmds.makeIdentity(slTextCurve, apply=True)
sideNameList = ["L", "R"]
for side in sideNameList:
bg = cmds.circle(name=side+"_StickyLips_Bg", normal=(0,0,1), radius=1, degree=1, sections=4, constructionHistory=False)[0]
cmds.setAttr(bg+".rotateZ", 45)
cmds.setAttr(bg+".translateX", 0.5)
cmds.makeIdentity(bg, apply=True)
cmds.setAttr(bg+".scaleX", 0.85)
cmds.setAttr(bg+".scaleY", 0.15)
cmds.makeIdentity(bg, apply=True)
cmds.setAttr(bg+".template", 1)
self.sliderCtrl = cmds.circle(name=side+"_StickyLips_Ctrl", normal=(0,0,1), radius=0.1, degree=3, constructionHistory=False)[0]
attrToHideList = ['ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']
for attr in attrToHideList:
cmds.setAttr(self.sliderCtrl+"."+attr, edit=True, lock=True, keyable=False)
cmds.transformLimits(self.sliderCtrl, translationX=(0,1), enableTranslationX=(1,1))
distPos = 1.0 / self.maxIter
for i in range(0, self.maxIter):
lPosA = (i * distPos)
lPosB = (lPosA + distPos)
rPosB = 1 - (i * distPos)
rPosA = (rPosB - distPos)
if i > 0:
lPosA = lPosA - (distPos*0.33)
rPosA = rPosA - (distPos*0.33)
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[0]+"_StickyLips_Ctrl.translateX", driverValue=lPosA, value=0, inTangentType="linear", outTangentType="linear")
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[0]+"_StickyLips_Ctrl.translateX", driverValue=lPosB, value=1, inTangentType="linear", outTangentType="linear")
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[1]+"_StickyLips_Ctrl.translateX", driverValue=rPosA, value=0, inTangentType="linear", outTangentType="linear")
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[1]+"_StickyLips_Ctrl.translateX", driverValue=rPosB, value=1, inTangentType="linear", outTangentType="linear")
lSliderGrp = cmds.group(sideNameList[0]+"_StickyLips_Ctrl", sideNameList[0]+"_StickyLips_Bg", name=sideNameList[0]+"_StickyLips_Ctrl_Grp")
rSliderGrp = cmds.group(sideNameList[1]+"_StickyLips_Ctrl", sideNameList[1]+"_StickyLips_Bg", name=sideNameList[1]+"_StickyLips_Ctrl_Grp")
cmds.setAttr(rSliderGrp+".rotateZ", 180)
cmds.setAttr(rSliderGrp+".translateY", -0.25)
sliderGrp = cmds.group(lSliderGrp, rSliderGrp, slTextCurve, name="StickyLips_Ctrl_Grp")
def sqGenerateStickyLips(self, *args):
self.sqGetRecepts()
if self.receptList[0] == None or self.receptList[1] == None:
mel.eval("warning \"Please, load ReceptA and ReceptB targets to continue.\";")
else:
self.sqGenerateCurves()
self.sqCreateClusters(self.baseCurveA, self.baseCurveB)
self.sqSetClustersZeroScale()
self.sqGenerateMuscleLocators()
self.sqCreateStikyLipsDeformers()
self.sqCreateStickyLipsCtrlAttr()
cmds.select(clear=True)
|
SqueezeStudioAnimation/dpAutoRigSystem
|
dpAutoRigSystem/Extras/sqStickyLipsSetup.py
|
Python
|
gpl-2.0
| 20,524
| 0.007698
|
#!/usr/bin/env python3
import arrow
import math
from . import statnett
from . import ENTSOE
from . import DK
import logging
import pandas as pd
import requests
def fetch_production(zone_key='NL', session=None, target_datetime=None,
logger=logging.getLogger(__name__), energieopwek_nl=True):
if target_datetime is None:
target_datetime = arrow.utcnow()
else:
target_datetime = arrow.get(target_datetime)
r = session or requests.session()
consumptions = ENTSOE.fetch_consumption(zone_key=zone_key,
session=r,
target_datetime=target_datetime,
logger=logger)
if not consumptions:
return
for c in consumptions:
del c['source']
df_consumptions = pd.DataFrame.from_dict(consumptions).set_index(
'datetime')
# NL has exchanges with BE, DE, NO, GB, DK-DK1
exchanges = []
for exchange_key in ['BE', 'DE', 'GB']:
zone_1, zone_2 = sorted([exchange_key, zone_key])
exchange = ENTSOE.fetch_exchange(zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=target_datetime,
logger=logger)
if not exchange:
return
exchanges.extend(exchange or [])
# add NO data, fetch once for every hour
# This introduces an error, because it doesn't use the average power flow
# during the hour, but rather only the value during the first minute of the
# hour!
zone_1, zone_2 = sorted(['NO', zone_key])
exchange_NO = [statnett.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=dt.datetime,
logger=logger)
for dt in arrow.Arrow.range(
'hour',
arrow.get(min([e['datetime']
for e in exchanges])).replace(minute=0),
arrow.get(max([e['datetime']
for e in exchanges])).replace(minute=0))]
exchanges.extend(exchange_NO)
# add DK1 data (only for dates after operation)
if target_datetime > arrow.get('2019-08-24', 'YYYY-MM-DD') :
zone_1, zone_2 = sorted(['DK-DK1', zone_key])
df_dk = pd.DataFrame(DK.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=target_datetime,
logger=logger))
# Because other exchanges and consumption data is only available per hour
# we floor the timpstamp to hour and group by hour with averaging of netFlow
df_dk['datetime'] = df_dk['datetime'].dt.floor('H')
exchange_DK = df_dk.groupby(['datetime']).aggregate({'netFlow' : 'mean',
'sortedZoneKeys': 'max', 'source' : 'max'}).reset_index()
# because averaging with high precision numbers leads to rounding errors
exchange_DK = exchange_DK.round({'netFlow': 3})
exchanges.extend(exchange_DK.to_dict(orient='records'))
# We want to know the net-imports into NL, so if NL is in zone_1 we need
# to flip the direction of the flow. E.g. 100MW for NL->DE means 100MW
# export to DE and needs to become -100MW for import to NL.
for e in exchanges:
if(e['sortedZoneKeys'].startswith('NL->')):
e['NL_import'] = -1 * e['netFlow']
else:
e['NL_import'] = e['netFlow']
del e['source']
del e['netFlow']
df_exchanges = pd.DataFrame.from_dict(exchanges).set_index('datetime')
# Sum all exchanges to NL imports
df_exchanges = df_exchanges.groupby('datetime').sum()
# Fill missing values by propagating the value forward
df_consumptions_with_exchanges = df_consumptions.join(df_exchanges).fillna(
method='ffill', limit=3) # Limit to 3 x 15min
# Load = Generation + netImports
# => Generation = Load - netImports
df_total_generations = (df_consumptions_with_exchanges['consumption']
- df_consumptions_with_exchanges['NL_import'])
# Fetch all production
# The energieopwek_nl parser is backwards compatible with ENTSOE parser.
# Because of data quality issues we switch to using energieopwek, but if
# data quality of ENTSOE improves we can switch back to using a single
# source.
productions_ENTSOE = ENTSOE.fetch_production(zone_key=zone_key, session=r,
target_datetime=target_datetime, logger=logger)
if energieopwek_nl:
productions_eopwek = fetch_production_energieopwek_nl(session=r,
target_datetime=target_datetime, logger=logger)
# For every production value we look up the corresponding ENTSOE
# values and copy the nuclear, gas, coal, biomass and unknown production.
productions = []
for p in productions_eopwek:
entsoe_value = next((pe for pe in productions_ENTSOE
if pe["datetime"] == p["datetime"]), None)
if entsoe_value:
p["production"]["nuclear"] = entsoe_value["production"]["nuclear"]
p["production"]["gas"] = entsoe_value["production"]["gas"]
p["production"]["coal"] = entsoe_value["production"]["coal"]
p["production"]["biomass"] = entsoe_value["production"]["biomass"]
p["production"]["unknown"] = entsoe_value["production"]["unknown"]
productions.append(p)
else:
productions = productions_ENTSOE
if not productions:
return
# Flatten production dictionaries (we ignore storage)
for p in productions:
# if for some reason therรฉ's no unknown value
if not 'unknown' in p['production'] or p['production']['unknown'] == None:
p['production']['unknown'] = 0
Z = sum([x or 0 for x in p['production'].values()])
# Only calculate the difference if the datetime exists
# If total ENTSOE reported production (Z) is less than total generation
# (calculated from consumption and imports), then there must be some
# unknown production missing, so we add the difference.
# The difference can actually be negative, because consumption is based
# on TSO network load, but locally generated electricity may never leave
# the DSO network and be substantial (e.g. Solar).
if p['datetime'] in df_total_generations and Z < df_total_generations[p['datetime']]:
p['production']['unknown'] = round((
df_total_generations[p['datetime']] - Z + p['production']['unknown']), 3)
# Filter invalid
# We should probably add logging to this
return [p for p in productions if p['production']['unknown'] > 0]
def fetch_production_energieopwek_nl(session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
if target_datetime is None:
target_datetime = arrow.utcnow()
# Get production values for target and target-1 day
df_current = get_production_data_energieopwek(
target_datetime, session=session)
df_previous = get_production_data_energieopwek(
target_datetime.shift(days=-1), session=session)
# Concat them, oldest first to keep chronological order intact
df = pd.concat([df_previous, df_current])
output = []
base_time = arrow.get(target_datetime.date(), 'Europe/Paris').shift(days=-1).to('utc')
for i, prod in enumerate(df.to_dict(orient='records')):
output.append(
{
'zoneKey': 'NL',
'datetime': base_time.shift(minutes=i*15).datetime,
'production': prod,
'source': 'energieopwek.nl, entsoe.eu'
}
)
return output
def get_production_data_energieopwek(date, session=None):
r = session or requests.session()
# The API returns values per day from local time midnight until the last
# round 10 minutes if the requested date is today or for the entire day if
# it's in the past. 'sid' can be anything.
url = 'http://energieopwek.nl/jsonData.php?sid=2ecde3&Day=%s' % date.format('YYYY-MM-DD')
response = r.get(url)
obj = response.json()
production_input = obj['TenMin']['Country']
# extract the power values in kW from the different production types
# we only need column 0, 1 and 3 contain energy sum values
df_solar = pd.DataFrame(production_input['Solar']) .drop(['1','3'], axis=1).astype(int).rename(columns={"0" : "solar"})
df_offshore = pd.DataFrame(production_input['WindOffshore']).drop(['1','3'], axis=1).astype(int)
df_onshore = pd.DataFrame(production_input['Wind']) .drop(['1','3'], axis=1).astype(int)
# We don't differentiate between onshore and offshore wind so we sum them
# toghether and build a single data frame with named columns
df_wind = df_onshore.add(df_offshore).rename(columns={"0": "wind"})
df = pd.concat([df_solar, df_wind], axis=1)
# resample from 10min resolution to 15min resolution to align with ENTSOE data
# we duplicate every row and then group them per 3 and take the mean
df = pd.concat([df]*2).sort_index(axis=0).reset_index(drop=True).groupby(by=lambda x : math.floor(x/3)).mean()
# Convert kW to MW with kW resolution
df = df.apply(lambda x: round(x / 1000, 3))
return df
if __name__ == '__main__':
print(fetch_production())
|
corradio/electricitymap
|
parsers/NL.py
|
Python
|
gpl-3.0
| 9,768
| 0.005119
|
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from sklearn.preprocessing import StandardScaler, normalize
import sys
import common
FACT = 'pmi' # nmf/pmi_wl/pmi_wp/pmi_wlp
DIM = 200
DATASET = 'MSDmm'
WINDOW = 1
NSAMPLES = 'all' #all
MAX_N_SCALER = 300000
N_PATCHES = 3
def scale(X, scaler=None, max_N=MAX_N_SCALER):
shape = X.shape
X.shape = (shape[0], shape[2] * shape[3])
if not scaler:
scaler = StandardScaler()
N = pd.np.min([len(X), max_N]) # Limit the number of patches to fit
scaler.fit(X[:N])
X = scaler.transform(X)
X.shape = shape
return X, scaler
def load_X(args):
data_path = '../data/patches_%s_%s/' % (DATASET, args.window)
progress_update = 1
data_files = glob.glob(os.path.join(data_path, "*.npy"))
#songs_in = set(open(common.DATASETS_DIR+'/trainset_%s.tsv' %
# (args.dataset)).read().splitlines())
if len(data_files) == 0:
raise ValueError("Error: Empty directory %s" % data_path)
index_factors = set(open(common.DATASETS_DIR+'/items_index_train_'+DATASET+'.tsv').read().splitlines())
data_files_in = []
for file in data_files:
filename = file[file.rfind('/')+1:-4]
item_id, npatch = filename.split('_')
if int(npatch) < args.npatches and item_id in index_factors:
data_files_in.append(file)
all_X = []
songs_dataset = []
X_mbatch = np.load(data_files_in[0])
X = np.zeros((len(data_files_in),1,X_mbatch.shape[0],X_mbatch.shape[1]))
for i, data_file in enumerate(data_files_in):
song_id = data_file[data_file.rfind('/')+1:data_file.rfind('_')]
X_mbatch = np.load(data_file)
X[i,0,:,:] = X_mbatch
#if len(all_Y) == 0:
# plt.imshow(X_mbatch,interpolation='nearest',aspect='equal')
# plt.show()
#all_X.append(X_mbatch.reshape(-1,X_mbatch.shape[0],X_mbatch.shape[1]))
songs_dataset.append(song_id)
if i % progress_update == 0:
sys.stdout.write("\rLoading Data: %.2f%%" % (100 * i / float(len(data_files_in))))
sys.stdout.flush()
sys.stdout.write("\rLoading Data: 100%")
sys.stdout.flush()
print "X data loaded"
output_suffix_X = '%s_%sx%s' % (args.dataset,args.npatches,args.window)
scaler_file=common.DATASETS_DIR+'/train_data/scaler_%s.pk' % output_suffix_X
X,scaler = scale(X)
pickle.dump(scaler,open(scaler_file,'wb'))
X_file = common.DATASETS_DIR+'/train_data/X_train_'+output_suffix_X
np.save(X_file,X)
fw=open(common.DATASETS_DIR+'/train_data/index_train_'+output_suffix_X+'.tsv','w')
fw.write("\n".join(songs_dataset))
def load_Y(args):
progress_update = 1
output_suffix_X = '%s_%sx%s' % (args.dataset,args.npatches,args.window)
index_X=open(common.DATASETS_DIR+'/train_data/index_train_'+output_suffix_X+'.tsv').read().splitlines()
song_factors=np.load(common.DATASETS_DIR+'/item_factors_%s_%s_%s.npy' % (args.fact,args.dim,args.dataset))
song_index=open(common.DATASETS_DIR+'/items_index_%s.tsv' % (args.dataset)).read().splitlines()
#print common.DATASETS_DIR+'/song_factors_%s_%s_%s.npy' % (args.fact,args.dim,args.dataset)
print len(song_index)
inv_song_index = dict()
for i,song_id in enumerate(song_index):
inv_song_index[song_id] = i
# Read all data into memory (this might need to change if data too large)
all_Y = []
songs_dataset = []
Y = np.zeros((len(index_X), int(args.dim)))
for i, song_id in enumerate(index_X):
# all_Y.append(song_factors[inv_song_index[song_id]])
Y[i, :] = song_factors[inv_song_index[song_id]]
if i % progress_update == 0:
sys.stdout.write("\rLoading Data: %.2f%%" %
(100 * i / float(len(index_X))))
sys.stdout.flush()
sys.stdout.write("\rLoading Data: 100%")
sys.stdout.flush()
print "Y data loaded"
output_suffix_Y = '%s_%s_%s_%sx%s' % (args.fact, args.dim, args.dataset,
args.npatches, args.window)
normalize(Y, copy=False)
Y_file = common.DATASETS_DIR+'/train_data/Y_train_'+output_suffix_Y
np.save(Y_file, Y)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Trains the model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d',
'--dataset',
dest="dataset",
type=str,
help='Dataset name',
default=DATASET)
parser.add_argument('-f',
'--fact',
dest="fact",
type=str,
help='Factorization method',
default=FACT)
parser.add_argument('-dim',
'--dim',
dest="dim",
type=str,
help='Factors dimensions',
default=DIM)
parser.add_argument('-w',
'--window',
dest="window",
type=str,
help='Patches window size in seconds',
default=WINDOW)
parser.add_argument('-np',
'--npatches',
dest="npatches",
type=str,
help='Number of patches',
default=N_PATCHES)
parser.add_argument('-x',
'--loadx',
dest="loadX",
help='Load X',
action='store_true',
default=False)
parser.add_argument('-y',
'--loady',
dest="loadY",
help='Load Y',
action='store_true',
default=False)
parser.add_argument('-all',
'--all',
dest="all_data",
help='All data, test and train set together',
action='store_true',
default=False)
args = parser.parse_args()
if args.loadX:
load_X(args)
if args.loadY:
load_Y(args)
|
sergiooramas/tartarus
|
src/load.py
|
Python
|
mit
| 6,438
| 0.005747
|
import sys
print("Hello, World!")
|
bikoheke/hacktoberfest
|
scripts/hello_world_amlaanb.py
|
Python
|
gpl-3.0
| 34
| 0.029412
|
from cantilever_divingboard import *
# We need to scale the parameters before applying the optimization algorithm
# Normally there are about 20 orders of magnitude between the dimensions and
# the doping concentration, so this is a critical step
# Run the script
freq_min = 1e3
freq_max = 1e5
omega_min = 100e3
initial_guess = (50e-6, 1e-6, 1e-6,
30e-6, 1e-6, 1e-6, 500e-9, 5., 1e15)
constraints = ((30e-6, 100e-6), (500e-9, 20e-6), (1e-6, 10e-6),
(2e-6, 100e-6), (500e-9, 5e-6), (500e-9, 20e-6), (30e-9, 10e-6),
(1., 10.), (1e15, 4e19))
x = optimize_cantilever(initial_guess, constraints, freq_min, freq_max, omega_min)
c = cantilever_divingboard(freq_min, freq_max, x)
c.print_performance()
|
jcdoll/PiezoD
|
python/archive/lbfgs.py
|
Python
|
gpl-3.0
| 752
| 0.009309
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows the complex DAG structure.
"""
from datetime import datetime
from airflow import models
from airflow.models.baseoperator import chain
from airflow.operators.bash import BashOperator
with models.DAG(
dag_id="example_complex",
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example', 'example2', 'example3'],
) as dag:
# Create
create_entry_group = BashOperator(task_id="create_entry_group", bash_command="echo create_entry_group")
create_entry_group_result = BashOperator(
task_id="create_entry_group_result", bash_command="echo create_entry_group_result"
)
create_entry_group_result2 = BashOperator(
task_id="create_entry_group_result2", bash_command="echo create_entry_group_result2"
)
create_entry_gcs = BashOperator(task_id="create_entry_gcs", bash_command="echo create_entry_gcs")
create_entry_gcs_result = BashOperator(
task_id="create_entry_gcs_result", bash_command="echo create_entry_gcs_result"
)
create_entry_gcs_result2 = BashOperator(
task_id="create_entry_gcs_result2", bash_command="echo create_entry_gcs_result2"
)
create_tag = BashOperator(task_id="create_tag", bash_command="echo create_tag")
create_tag_result = BashOperator(task_id="create_tag_result", bash_command="echo create_tag_result")
create_tag_result2 = BashOperator(task_id="create_tag_result2", bash_command="echo create_tag_result2")
create_tag_template = BashOperator(task_id="create_tag_template", bash_command="echo create_tag_template")
create_tag_template_result = BashOperator(
task_id="create_tag_template_result", bash_command="echo create_tag_template_result"
)
create_tag_template_result2 = BashOperator(
task_id="create_tag_template_result2", bash_command="echo create_tag_template_result2"
)
create_tag_template_field = BashOperator(
task_id="create_tag_template_field", bash_command="echo create_tag_template_field"
)
create_tag_template_field_result = BashOperator(
task_id="create_tag_template_field_result", bash_command="echo create_tag_template_field_result"
)
create_tag_template_field_result2 = BashOperator(
task_id="create_tag_template_field_result2", bash_command="echo create_tag_template_field_result"
)
# Delete
delete_entry = BashOperator(task_id="delete_entry", bash_command="echo delete_entry")
create_entry_gcs >> delete_entry
delete_entry_group = BashOperator(task_id="delete_entry_group", bash_command="echo delete_entry_group")
create_entry_group >> delete_entry_group
delete_tag = BashOperator(task_id="delete_tag", bash_command="echo delete_tag")
create_tag >> delete_tag
delete_tag_template_field = BashOperator(
task_id="delete_tag_template_field", bash_command="echo delete_tag_template_field"
)
delete_tag_template = BashOperator(task_id="delete_tag_template", bash_command="echo delete_tag_template")
# Get
get_entry_group = BashOperator(task_id="get_entry_group", bash_command="echo get_entry_group")
get_entry_group_result = BashOperator(
task_id="get_entry_group_result", bash_command="echo get_entry_group_result"
)
get_entry = BashOperator(task_id="get_entry", bash_command="echo get_entry")
get_entry_result = BashOperator(task_id="get_entry_result", bash_command="echo get_entry_result")
get_tag_template = BashOperator(task_id="get_tag_template", bash_command="echo get_tag_template")
get_tag_template_result = BashOperator(
task_id="get_tag_template_result", bash_command="echo get_tag_template_result"
)
# List
list_tags = BashOperator(task_id="list_tags", bash_command="echo list_tags")
list_tags_result = BashOperator(task_id="list_tags_result", bash_command="echo list_tags_result")
# Lookup
lookup_entry = BashOperator(task_id="lookup_entry", bash_command="echo lookup_entry")
lookup_entry_result = BashOperator(task_id="lookup_entry_result", bash_command="echo lookup_entry_result")
# Rename
rename_tag_template_field = BashOperator(
task_id="rename_tag_template_field", bash_command="echo rename_tag_template_field"
)
# Search
search_catalog = BashOperator(task_id="search_catalog", bash_command="echo search_catalog")
search_catalog_result = BashOperator(
task_id="search_catalog_result", bash_command="echo search_catalog_result"
)
# Update
update_entry = BashOperator(task_id="update_entry", bash_command="echo update_entry")
update_tag = BashOperator(task_id="update_tag", bash_command="echo update_tag")
update_tag_template = BashOperator(task_id="update_tag_template", bash_command="echo update_tag_template")
update_tag_template_field = BashOperator(
task_id="update_tag_template_field", bash_command="echo update_tag_template_field"
)
# Create
create_tasks = [
create_entry_group,
create_entry_gcs,
create_tag_template,
create_tag_template_field,
create_tag,
]
chain(*create_tasks)
create_entry_group >> delete_entry_group
create_entry_group >> create_entry_group_result
create_entry_group >> create_entry_group_result2
create_entry_gcs >> delete_entry
create_entry_gcs >> create_entry_gcs_result
create_entry_gcs >> create_entry_gcs_result2
create_tag_template >> delete_tag_template_field
create_tag_template >> create_tag_template_result
create_tag_template >> create_tag_template_result2
create_tag_template_field >> delete_tag_template_field
create_tag_template_field >> create_tag_template_field_result
create_tag_template_field >> create_tag_template_field_result2
create_tag >> delete_tag
create_tag >> create_tag_result
create_tag >> create_tag_result2
# Delete
delete_tasks = [
delete_tag,
delete_tag_template_field,
delete_tag_template,
delete_entry_group,
delete_entry,
]
chain(*delete_tasks)
# Get
create_tag_template >> get_tag_template >> delete_tag_template
get_tag_template >> get_tag_template_result
create_entry_gcs >> get_entry >> delete_entry
get_entry >> get_entry_result
create_entry_group >> get_entry_group >> delete_entry_group
get_entry_group >> get_entry_group_result
# List
create_tag >> list_tags >> delete_tag
list_tags >> list_tags_result
# Lookup
create_entry_gcs >> lookup_entry >> delete_entry
lookup_entry >> lookup_entry_result
# Rename
create_tag_template_field >> rename_tag_template_field >> delete_tag_template_field
# Search
chain(create_tasks, search_catalog, delete_tasks)
search_catalog >> search_catalog_result
# Update
create_entry_gcs >> update_entry >> delete_entry
create_tag >> update_tag >> delete_tag
create_tag_template >> update_tag_template >> delete_tag_template
create_tag_template_field >> update_tag_template_field >> rename_tag_template_field
|
apache/incubator-airflow
|
airflow/example_dags/example_complex.py
|
Python
|
apache-2.0
| 7,913
| 0.004929
|
def pytest_addoption(parser):
parser.addoption(
'--integration',
action='store_true',
help='run integration tests',
)
def pytest_ignore_collect(path, config):
if not config.getoption('integration') and 'integration' in str(path):
return True
|
malinoff/amqproto
|
tests/conftest.py
|
Python
|
apache-2.0
| 288
| 0
|
def read_logfile_by_line(logfile):
"""generator function that yields the log file content line by line"""
with open(logfile, 'r') as f:
for line in f:
yield line
yield None
def parse_commands(log_content):
"""
parse cwl commands from the line-by-line generator of log file content and
returns the commands as a list of command line lists, each corresponding to a step run.
"""
command_list = []
command = []
in_command = False
line = next(log_content)
while(line):
line = line.strip('\n')
if '[job' in line and line.endswith('docker \\'):
line = 'docker \\' # remove the other stuff
in_command = True
if in_command:
command.append(line.strip('\\').rstrip(' '))
if not line.endswith('\\'):
in_command = False
command_list.append(command)
command = []
line = next(log_content)
return(command_list)
|
4dn-dcic/tibanna
|
awsf3/log.py
|
Python
|
mit
| 1,003
| 0.004985
|
import copy
import json
import logging
import threading
import uuid
from flask import Flask, abort, jsonify, request
import kubernetes
app = Flask(__name__)
app.secret_key = "mega secret key"
JOB_DB = {}
def get_config(experiment):
with open('config_template.json', 'r') as config:
return json.load(config)[experiment]
def filter_jobs(job_db):
job_db_copy = copy.deepcopy(job_db)
for job_name in job_db_copy:
del(job_db_copy[job_name]['obj'])
del(job_db_copy[job_name]['deleted'])
if job_db_copy[job_name].get('pod'):
del(job_db_copy[job_name]['pod'])
return job_db_copy
@app.route('/api/v1.0/jobs', methods=['GET'])
def get_jobs():
return jsonify({"jobs": filter_jobs(JOB_DB)}), 200
@app.route('/api/v1.0/k8sjobs', methods=['GET'])
def get_k8sjobs():
return jsonify({"jobs": kubernetes.get_jobs()}), 200
@app.route('/api/v1.0/jobs', methods=['POST'])
def create_job():
if not request.json \
or not ('experiment') in request.json\
or not ('docker-img' in request.json):
print(request.json)
abort(400)
cmd = request.json['cmd'] if 'cmd' in request.json else None
env_vars = (request.json['env-vars']
if 'env-vars' in request.json else {})
experiment_config = get_config(request.json['experiment'])
k8s_volume = experiment_config['k8s_volume']
job_id = str(uuid.uuid4())
job_obj = kubernetes.create_job(job_id,
request.json['docker-img'],
cmd,
[(k8s_volume, '/data')],
env_vars,
request.json['experiment'])
if job_obj:
job = copy.deepcopy(request.json)
job['job-id'] = job_id
job['status'] = 'started'
job['restart_count'] = 0
job['max_restart_count'] = 3
job['obj'] = job_obj
job['deleted'] = False
JOB_DB[job_id] = job
return jsonify({'job-id': job_id}), 201
else:
return jsonify({'job': 'Could not be allocated'}), 500
@app.route('/api/v1.0/jobs/<job_id>', methods=['GET'])
def get_job(job_id):
if job_id in JOB_DB:
job_copy = copy.deepcopy(JOB_DB[job_id])
del(job_copy['obj'])
del(job_copy['deleted'])
if job_copy.get('pod'):
del(job_copy['pod'])
return jsonify({'job': job_copy}), 200
else:
abort(404)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(threadName)s - %(levelname)s: %(message)s'
)
job_event_reader_thread = threading.Thread(target=kubernetes.watch_jobs,
args=(JOB_DB,))
job_event_reader_thread.start()
pod_event_reader_thread = threading.Thread(target=kubernetes.watch_pods,
args=(JOB_DB,))
pod_event_reader_thread.start()
app.run(debug=True, port=5000,
host='0.0.0.0')
|
diegodelemos/cap-reuse
|
step-broker/app.py
|
Python
|
gpl-3.0
| 3,081
| 0
|
# encoding: utf-8
# This file is part of Guacamole.
#
# Copyright 2012-2015 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Guacamole is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3,
# as published by the Free Software Foundation.
#
# Guacamole is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Guacamole. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the cmdtree module."""
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from guacamole.core import Bowl
from guacamole.ingredients.cmdtree import CommandTreeBuilder
from guacamole.recipes.cmd import Command
class _sub(Command):
spices = ('mustard',)
class _cmd(Command):
spices = ('salt', 'pepper')
sub_commands = (('sub', _sub),)
class CommandTreeBuilderTests(unittest.TestCase):
"""Tests for the CommandTreeBuilder class."""
def setUp(self):
"""Common initialization method."""
self.bowl = Bowl([CommandTreeBuilder(_cmd())])
self.bowl.eat()
def test_build_command_tree(self):
"""check if a correct command tree is built."""
cmd_obj = self.bowl.context.cmd_tree[1]
sub_obj = self.bowl.context.cmd_tree[2][0][1]
self.assertIsInstance(cmd_obj, _cmd)
self.assertIsInstance(sub_obj, _sub)
self.assertEqual(
self.bowl.context.cmd_tree,
(None, cmd_obj, (('sub', sub_obj, ()),)))
def test_collect_spices(self):
"""check if spices are collected from top-level command only."""
self.assertTrue(self.bowl.has_spice('salt'))
self.assertTrue(self.bowl.has_spice('pepper'))
self.assertFalse(self.bowl.has_spice('mustard'))
|
zyga/guacamole
|
guacamole/ingredients/test_cmdtree.py
|
Python
|
gpl-3.0
| 2,078
| 0
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import current_app, redirect, request
from werkzeug.exceptions import NotFound
from indico.modules.attachments.controllers.util import SpecificAttachmentMixin
from indico.modules.attachments.models.legacy_mapping import LegacyAttachmentFolderMapping, LegacyAttachmentMapping
from indico.modules.events import LegacyEventMapping
from indico.util.string import is_legacy_id
from indico.web.flask.util import url_for
from MaKaC.webinterface.rh.base import RHSimple, RH
def _clean_args(kwargs):
if 'event_id' not in kwargs:
raise NotFound
if is_legacy_id(kwargs['event_id']):
mapping = LegacyEventMapping.find(legacy_event_id=kwargs['event_id']).first_or_404()
kwargs['event_id'] = mapping.event_id
if 'contrib_id' in kwargs:
kwargs['contribution_id'] = kwargs.pop('contrib_id')
if 'subcontrib_id' in kwargs:
kwargs['subcontribution_id'] = kwargs.pop('subcontrib_id')
# extension is just to make the links prettier
kwargs.pop('ext', None)
# session id is only used for actual sessions, not for stuff inside them
if 'contribution_id' in kwargs:
kwargs.pop('session_id', None)
@RHSimple.wrap_function
def compat_folder(**kwargs):
_clean_args(kwargs)
folder = LegacyAttachmentFolderMapping.find(**kwargs).first_or_404().folder
if folder.is_deleted:
raise NotFound
return redirect(url_for('attachments.list_folder', folder), 302 if current_app.debug else 301)
def compat_folder_old():
mapping = {'confId': 'event_id',
'sessionId': 'session_id',
'contribId': 'contrib_id',
'subContId': 'subcontrib_id',
'materialId': 'material_id'}
kwargs = {mapping[k]: v for k, v in request.args.iteritems() if k in mapping}
return compat_folder(**kwargs)
def _redirect_to_note(**kwargs):
del kwargs['material_id']
del kwargs['resource_id']
kwargs['confId'] = kwargs.pop('event_id')
return redirect(url_for('event_notes.view', **kwargs), 302 if current_app.debug else 301)
@RHSimple.wrap_function
def compat_attachment(**kwargs):
_clean_args(kwargs)
mapping = LegacyAttachmentMapping.find_first(**kwargs)
if mapping is None:
if kwargs['material_id'] == 'minutes' and kwargs['resource_id'] == 'minutes':
return _redirect_to_note(**kwargs)
raise NotFound
attachment = mapping.attachment
if attachment.is_deleted or attachment.folder.is_deleted:
raise NotFound
return redirect(attachment.download_url, 302 if current_app.debug else 301)
class RHCompatAttachmentNew(SpecificAttachmentMixin, RH):
normalize_url_spec = dict(SpecificAttachmentMixin.normalize_url_spec,
endpoint='attachments.download')
def _process(self):
raise Exception('This RH should only perform URL normalization!')
|
belokop/indico_bare
|
indico/modules/attachments/controllers/compat.py
|
Python
|
gpl-3.0
| 3,649
| 0.001644
|
import sys
[_, ms, _, ns] = list(sys.stdin)
ms = set(int(m) for m in ms.split(' '))
ns = set(int(n) for n in ns.split(' '))
print(sep='\n', *sorted(ms.difference(ns).union(ns.difference(ms))))
|
alexander-matsievsky/HackerRank
|
All_Domains/Python/Sets/symmetric-difference.py
|
Python
|
mit
| 194
| 0
|
import os
from whylog.log_reader.exceptions import EmptyFile, OffsetBiggerThanFileSize
class ReadUtils(object):
STANDARD_BUFFER_SIZE = 512
@classmethod
def size_of_opened_file(cls, fh):
prev_position = fh.tell()
fh.seek(0, os.SEEK_END)
size = fh.tell()
fh.seek(prev_position)
return size
@classmethod
def _read_content(cls, fd, position, buf_size):
fd.seek(position)
return fd.read(buf_size)
@classmethod
def _read_split_lines(cls, fd, position, buf_size):
content = cls._read_content(fd, position, buf_size)
return content.split('\n')
@classmethod
def _join_results(cls, first_part, second_part):
if not first_part:
if not second_part:
return []
return second_part
if not second_part:
return first_part
return first_part[:-1] + ["".join((first_part[-1], second_part[0]))] + second_part[1:]
@classmethod
def _expand_after(cls, fd, position):
fd.seek(position)
line = fd.readline()
if not line:
raise OffsetBiggerThanFileSize(position)
return line.rstrip('\n')
@classmethod
def _expand_before(cls, fd, position, buf_size):
before = []
while len(before) < 2:
position -= buf_size
if position <= 0:
lines = cls._read_split_lines(fd, 0, position + buf_size)
before = cls._join_results(lines, before)
break
lines = cls._read_split_lines(fd, position, buf_size)
before = cls._join_results(lines, before)
if not before:
raise EmptyFile()
return before[-1]
@classmethod
def _read_entire_line(cls, fd, offset, buf_size):
after = cls._expand_after(fd, offset)
before = cls._expand_before(fd, offset, buf_size)
return before + after, offset - len(before), offset + len(after)
@classmethod
def get_line_containing_offset(cls, fd, offset, buf_size):
"""
returns line which contains the specified offset
and returns also offsets of the first and the last sign of this line.
if there is '\n' on specified offset, the previous line is returned
"""
return cls._read_entire_line(fd, offset, buf_size)
|
andrzejgorski/whylog
|
whylog/log_reader/read_utils.py
|
Python
|
bsd-3-clause
| 2,366
| 0.000423
|
#!python
# -*- coding: utf-8 -*-
from os import path
import shutil
def install():
filename = 'ilmaruuvi.service'
install_path = path.join('/etc/systemd/system', filename)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, filename), 'r') as f:
service = f.read()
service = service.format(working_dir=here, exec_start=shutil.which('ilmaruuvi'))
with open(install_path, 'w') as f:
f.write(service)
|
juhi24/ilmaruuvi
|
ilmaruuvi/systemd_service.py
|
Python
|
mit
| 455
| 0.006593
|
"""
This scripts specifies all PTX special objects.
"""
from __future__ import print_function, absolute_import, division
import operator
import numpy
import llvmlite.llvmpy.core as lc
from numba import types, ir, typing, macro
from .cudadrv import nvvm
class Stub(object):
'''A stub object to represent special objects which is meaningless
outside the context of CUDA-python.
'''
_description_ = '<ptx special value>'
__slots__ = () # don't allocate __dict__
def __new__(cls):
raise NotImplementedError("%s is not instantiable" % cls)
def __repr__(self):
return self._description_
#-------------------------------------------------------------------------------
# SREG
SREG_SIGNATURE = typing.signature(types.int32)
class threadIdx(Stub):
'''
The thread indices in the current thread block, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.blockDim` exclusive.
'''
_description_ = '<threadIdx.{x,y,z}>'
x = macro.Macro('tid.x', SREG_SIGNATURE)
y = macro.Macro('tid.y', SREG_SIGNATURE)
z = macro.Macro('tid.z', SREG_SIGNATURE)
class blockIdx(Stub):
'''
The block indices in the grid of thread blocks, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.gridDim` exclusive.
'''
_description_ = '<blockIdx.{x,y,z}>'
x = macro.Macro('ctaid.x', SREG_SIGNATURE)
y = macro.Macro('ctaid.y', SREG_SIGNATURE)
z = macro.Macro('ctaid.z', SREG_SIGNATURE)
class blockDim(Stub):
'''
The shape of a block of threads, as declared when instantiating the
kernel. This value is the same for all threads in a given kernel, even
if they belong to different blocks (i.e. each block is "full").
'''
x = macro.Macro('ntid.x', SREG_SIGNATURE)
y = macro.Macro('ntid.y', SREG_SIGNATURE)
z = macro.Macro('ntid.z', SREG_SIGNATURE)
class gridDim(Stub):
'''
The shape of the grid of blocks, accressed through the attributes ``x``,
``y``, and ``z``.
'''
_description_ = '<gridDim.{x,y,z}>'
x = macro.Macro('nctaid.x', SREG_SIGNATURE)
y = macro.Macro('nctaid.y', SREG_SIGNATURE)
z = macro.Macro('nctaid.z', SREG_SIGNATURE)
#-------------------------------------------------------------------------------
# Grid Macro
def _ptx_grid1d(): pass
def _ptx_grid2d(): pass
def grid_expand(ndim):
"""grid(ndim)
Return the absolute position of the current thread in the entire
grid of blocks. *ndim* should correspond to the number of dimensions
declared when instantiating the kernel. If *ndim* is 1, a single integer
is returned. If *ndim* is 2 or 3, a tuple of the given number of
integers is returned.
Computation of the first integer is as follows::
cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.grid.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.grid.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.grid.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2, 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
grid = macro.Macro('ptx.grid', grid_expand, callable=True)
#-------------------------------------------------------------------------------
# Gridsize Macro
def gridsize_expand(ndim):
"""
Return the absolute size (or shape) in threads of the entire grid of
blocks. *ndim* should correspond to the number of dimensions declared when
instantiating the kernel.
Computation of the first integer is as follows::
cuda.blockDim.x * cuda.gridDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.gridsize.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.gridsize.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.gridsize.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2 or 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
gridsize = macro.Macro('ptx.gridsize', gridsize_expand, callable=True)
#-------------------------------------------------------------------------------
# synthreads
class syncthreads(Stub):
'''
Synchronize all threads in the same thread block. This function implements
the same pattern as barriers in traditional multi-threaded programming: this
function waits until all threads in the block call it, at which point it
returns control to all its callers.
'''
_description_ = '<syncthread()>'
# -------------------------------------------------------------------------------
# memory fences
class threadfence_block(Stub):
'''
A memory fence at thread block level
'''
_description_ = '<threadfence_block()>'
class threadfence_system(Stub):
'''
A memory fence at system level: across devices
'''
_description_ = '<threadfence_system()>'
class threadfence(Stub):
'''
A memory fence at device level
'''
_description_ = '<threadfence()>'
# -------------------------------------------------------------------------------
# shared
def _legalize_shape(shape):
if isinstance(shape, tuple):
return shape
elif isinstance(shape, int):
return (shape,)
else:
raise TypeError("invalid type for shape; got {0}".format(type(shape)))
def shared_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.smem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class shared(Stub):
"""
Shared memory namespace.
"""
_description_ = '<shared>'
array = macro.Macro('shared.array', shared_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a shared array of the given *shape* and *type*. *shape* is either
an integer or a tuple of integers representing the array's dimensions.
*type* is a :ref:`Numba type <numba-types>` of the elements needing to be
stored in the array.
The returned array-like object can be read and written to like any normal
device array (e.g. through indexing).
'''
#-------------------------------------------------------------------------------
# local array
def local_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.lmem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class local(Stub):
'''
Local memory namespace.
'''
_description_ = '<local>'
array = macro.Macro('local.array', local_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a local array of the given *shape* and *type*. The array is private
to the current thread, and resides in global memory. An array-like object is
returned which can be read and written to like any standard array (e.g.
through indexing).
'''
#-------------------------------------------------------------------------------
# const array
def const_array_like(ndarray):
fname = "ptx.cmem.arylike"
from .descriptor import CUDATargetDesc
aryty = CUDATargetDesc.typingctx.resolve_argument_type(ndarray)
sig = typing.signature(aryty, aryty)
return ir.Intrinsic(fname, sig, args=[ndarray])
class const(Stub):
'''
Constant memory namespace.
'''
_description_ = '<const>'
array_like = macro.Macro('const.array_like', const_array_like,
callable=True, argnames=['ary'])
'''
Create a const array from *ary*. The resulting const array will have the
same shape, type, and values as *ary*.
'''
#-------------------------------------------------------------------------------
# atomic
class atomic(Stub):
"""Namespace for atomic operations
"""
_description_ = '<atomic>'
class add(Stub):
"""add(ary, idx, val)
Perform atomic ary[idx] += val. Supported on int32, float32, and
float64 operands only.
"""
class max(Stub):
"""max(ary, idx, val)
Perform atomic ary[idx] = max(ary[idx], val). NaN is treated as a
missing value, so max(NaN, n) == max(n, NaN) == n. Note that this
differs from Python and Numpy behaviour, where max(a, b) is always
a when either a or b is a NaN.
Supported on float64 operands only.
"""
|
stefanseefeld/numba
|
numba/cuda/stubs.py
|
Python
|
bsd-2-clause
| 9,284
| 0.002801
|
"""
sampyl.samplers.NUTS
~~~~~~~~~~~~~~~~~~~~
This module implements No-U-Turn Sampler (NUTS).
:copyright: (c) 2015 by Mat Leonard.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division
import collections
from ..core import np
from .base import Sampler
from .hamiltonian import energy, leapfrog, initial_momentum
class NUTS(Sampler):
""" No-U-Turn sampler (Hoffman & Gelman, 2014) for sampling from a
probability distribution defined by a log P(theta) function.
For technical details, see the paper:
http://www.stat.columbia.edu/~gelman/research/published/nuts.pdf
:param logp: log P(X) function for sampling distribution
:param start:
Dictionary of starting state for the sampler. Should have one
element for each argument of logp.
:param grad_logp: (optional)
Function or list of functions that calculate grad log P(theta).
Pass functions here if you don't want to use autograd for the
gradients. If logp has multiple parameters, grad_logp must be
a list of gradient functions w.r.t. each parameter in logp.
If you wish to use a logp function that returns both the logp
value and the gradient, set grad_logp = True.
:param scale: (optional)
Dictionary with same format as start. Scaling for initial
momentum in Hamiltonian step.
:param step_size: (optional) *float.*
Initial step size for the deterministic proposals.
:param adapt_steps: (optional) *int.*
Integer number of steps used for adapting the step size to
achieve a target acceptance rate.
:param Emax: (optional) *float.* Maximum energy.
:param target_accept: (optional) *float.* Target acceptance rate.
:param gamma: (optional) *float.*
:param k: (optional) *float.* Scales the speed of step size
adaptation.
:param t0: (optional) *float.* Slows initial step size adaptation.
Example ::
def logp(x, y):
...
start = {'x': x_start, 'y': y_start}
nuts = sampyl.NUTS(logp, start)
chain = nuts.sample(1000)
"""
def __init__(self, logp, start,
step_size=0.25,
adapt_steps=100,
Emax=1000.,
target_accept=0.65,
gamma=0.05,
k=0.75,
t0=10.,
**kwargs):
super(NUTS, self).__init__(logp, start, **kwargs)
self.step_size = step_size / len(self.state.tovector())**(1/4.)
self.adapt_steps = adapt_steps
self.Emax = Emax
self.target_accept = target_accept
self.gamma = gamma
self.k = k
self.t0 = t0
self.Hbar = 0.
self.ebar = 1.
self.mu = np.log(self.step_size*10)
def step(self):
""" Perform one NUTS step."""
H = self.model.logp
dH = self.model.grad
x = self.state
r0 = initial_momentum(x, self.scale)
u = np.random.uniform()
e = self.step_size
xn, xp, rn, rp, y = x, x, r0, r0, x
j, n, s = 0, 1, 1
while s == 1:
v = bern(0.5)*2 - 1
if v == -1:
xn, rn, _, _, x1, n1, s1, a, na = buildtree(xn, rn, u, v, j, e, x, r0,
H, dH, self.Emax)
else:
_, _, xp, rp, x1, n1, s1, a, na = buildtree(xp, rp, u, v, j, e, x, r0,
H, dH, self.Emax)
if s1 == 1 and bern(np.min(np.array([1, n1/n]))):
y = x1
dx = (xp - xn).tovector()
s = s1 * (np.dot(dx, rn.tovector()) >= 0) * \
(np.dot(dx, rp.tovector()) >= 0)
n = n + n1
j = j + 1
if self._sampled >= self.adapt_steps:
self.step_size = self.ebar
else:
# Adapt step size
m = self._sampled + 1
w = 1./(m + self.t0)
self.Hbar = (1 - w)*self.Hbar + w*(self.target_accept - a/na)
log_e = self.mu - (m**.5/self.gamma)*self.Hbar
self.step_size = np.exp(log_e)
z = m**(-self.k)
self.ebar = np.exp(z*log_e + (1 - z)*np.log(self.ebar))
self.state = y
self._sampled += 1
return y
def bern(p):
return np.random.uniform() < p
def buildtree(x, r, u, v, j, e, x0, r0, H, dH, Emax):
if j == 0:
x1, r1 = leapfrog(x, r, v*e, dH)
E = energy(H, x1, r1)
E0 = energy(H, x0, r0)
dE = E - E0
n1 = (np.log(u) - dE <= 0)
s1 = (np.log(u) - dE < Emax)
return x1, r1, x1, r1, x1, n1, s1, np.min(np.array([1, np.exp(dE)])), 1
else:
xn, rn, xp, rp, x1, n1, s1, a1, na1 = \
buildtree(x, r, u, v, j-1, e, x0, r0, H, dH, Emax)
if s1 == 1:
if v == -1:
xn, rn, _, _, x2, n2, s2, a2, na2 = \
buildtree(xn, rn, u, v, j-1, e, x0, r0, H, dH, Emax)
else:
_, _, xp, rp, x2, n2, s2, a2, na2 = \
buildtree(xp, rp, u, v, j-1, e, x0, r0, H, dH, Emax)
if bern(n2/max(n1 + n2, 1.)):
x1 = x2
a1 = a1 + a2
na1 = na1 + na2
dx = (xp - xn).tovector()
s1 = s2 * (np.dot(dx, rn.tovector()) >= 0) * \
(np.dot(dx, rp.tovector()) >= 0)
n1 = n1 + n2
return xn, rn, xp, rp, x1, n1, s1, a1, na1
|
mcleonard/sampyl
|
sampyl/samplers/NUTS.py
|
Python
|
mit
| 5,706
| 0.002103
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2017 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MIME-Type Parser.
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared
against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a
list of candidates.
"""
from functools import reduce
__version__ = "0.1.2"
__author__ = "Joe Gregorio"
__email__ = "joe@bitworking.org"
__credits__ = ""
# TODO: Can probably delete this module.
def parse_mime_type(mime_type):
"""Carves up a mime-type and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/xhtml;q=0.5' would get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict([tuple([s.strip() for s in param.split("=")]) for param in parts[1:]])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a single "*"
# Turn it into a legal wildcard.
if full_type == "*":
full_type = "*/*"
(type, subtype) = full_type.split("/")
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Carves up a media range and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/\*;q=0.5' would get parsed into:
('application', '\*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if (
"q" not in params
or "q" not in params
or not float(params["q"])
or float(params["q"]) > 1
or float(params["q"]) < 0
):
params["q"] = "1"
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime-type against a list of media_ranges that
have already been parsed by parse_media_range().
Returns a tuple of the fitness value and the value of the 'q' quality parameter of
the best match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) = parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
if (type == target_type or type == "*" or target_type == "*") and (
subtype == target_subtype or subtype == "*" or target_subtype == "*"
):
param_matches = reduce(
lambda x, y: x + y,
[
1
for (key, value) in list(target_params.items())
if key != "q" and key in params and value == params[key]
],
0,
)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params["q"]
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime-type against a list of media_ranges that
have already been parsed by parse_media_range().
Returns the 'q' quality parameter of the best match, 0 if no match was found. This
function behaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Returns the quality 'q' of a mime-type when compared against the media- ranges in
ranges. For example:
>>> quality('text/html', 'text/*;q=0.3, text/html;q=0.7, text/html;level=1,
text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Takes a list of supported mime-types and finds the best match for all the media-
ranges listed in header. The value of header must be a string that conforms to the
format of the HTTP Accept: header. The value of 'supported' is a list of mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
parsed_header = [parse_media_range(r) for r in header.split(",")]
weighted_matches = [
(fitness_and_quality_parsed(mime_type, parsed_header), mime_type)
for mime_type in supported
]
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][1] or ""
|
DataONEorg/d1_python
|
lib_common/src/d1_common/ext/mimeparser.py
|
Python
|
apache-2.0
| 6,325
| 0.003794
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import re
import os
from typing import (
NamedTuple, Optional
)
# project
from kiwi.command import Command
from kiwi.exceptions import KiwiKernelLookupError
kernel_type = NamedTuple(
'kernel_type', [
('name', str),
('filename', str),
('version', str)
]
)
xen_hypervisor_type = NamedTuple(
'xen_hypervisor_type', [
('filename', str),
('name', str)
]
)
class Kernel:
"""
**Implementes kernel lookup and extraction from given root tree**
:param str root_dir: root directory path name
:param list kernel_names: list of kernel names to search for
functions.sh::suseStripKernel() provides a normalized
file so that we do not have to search for many different
names in this code
"""
def __init__(self, root_dir: str):
self.root_dir = root_dir
self.kernel_names = self._setup_kernel_names_for_lookup()
def get_kernel(
self, raise_on_not_found: bool = False
) -> Optional[kernel_type]:
"""
Lookup kernel files and provide filename and version
:param bool raise_on_not_found: sets the method to raise an exception
if the kernel is not found
:raises KiwiKernelLookupError: if raise_on_not_found flag is active
and kernel is not found
:return: tuple with filename, kernelname and version
:rtype: tuple|None
"""
for kernel_name in self.kernel_names:
kernel_file = os.sep.join(
[self.root_dir, 'boot', kernel_name]
)
if os.path.exists(kernel_file):
version_match = re.match(
'.*?-(.*)', os.path.basename(kernel_file)
)
if version_match:
version = version_match.group(1)
return kernel_type(
name=os.path.basename(os.path.realpath(kernel_file)),
filename=kernel_file,
version=version
)
if raise_on_not_found:
raise KiwiKernelLookupError(
'No kernel found in {0}, searched for {1}'.format(
os.sep.join([self.root_dir, 'boot']),
','.join(self.kernel_names)
)
)
return None
def get_xen_hypervisor(self) -> Optional[xen_hypervisor_type]:
"""
Lookup xen hypervisor and provide filename and hypervisor name
:return: tuple with filename and hypervisor name
:rtype: tuple|None
"""
xen_hypervisor = self.root_dir + '/boot/xen.gz'
if os.path.exists(xen_hypervisor):
return xen_hypervisor_type(
filename=xen_hypervisor,
name='xen.gz'
)
return None
def copy_kernel(self, target_dir: str, file_name: str = None) -> None:
"""
Copy kernel to specified target
If no file_name is given the target filename is set
as kernel-<kernel.version>.kernel
:param str target_dir: target path name
:param str filename: base filename in target
"""
kernel = self.get_kernel()
if kernel:
if not file_name:
file_name = 'kernel-' + kernel.version + '.kernel'
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', kernel.filename, target_file])
def copy_xen_hypervisor(
self, target_dir: str, file_name: str = None
) -> None:
"""
Copy xen hypervisor to specified target
If no file_name is given the target filename is set
as hypervisor-<xen.name>
:param str target_dir: target path name
:param str filename: base filename in target
"""
xen = self.get_xen_hypervisor()
if xen:
if not file_name:
file_name = 'hypervisor-' + xen.name
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', xen.filename, target_file])
def _setup_kernel_names_for_lookup(self):
"""
The kernel image name is different per arch and distribution
This method returns a list of possible kernel image names in
order to search and find one of them
:return: list of kernel image names
:rtype: list
"""
kernel_names = []
kernel_dirs = sorted(
os.listdir(''.join([self.root_dir, '/lib/modules']))
)
if kernel_dirs:
# append lookup for the real kernel image names
# depending on the arch and os they are different
# in their prefix
kernel_prefixes = [
'uImage', 'Image', 'zImage', 'vmlinuz', 'image', 'vmlinux'
]
kernel_name_pattern = '{prefix}-{name}'
for kernel_prefix in kernel_prefixes:
for kernel_dir in kernel_dirs:
kernel_names.append(
kernel_name_pattern.format(
prefix=kernel_prefix, name=kernel_dir
)
)
return kernel_names
|
SUSE/kiwi
|
kiwi/system/kernel.py
|
Python
|
gpl-3.0
| 5,997
| 0
|
import medic
from maya import OpenMaya
class FaceAssigned(medic.PyTester):
def __init__(self):
super(FaceAssigned, self).__init__()
def Name(self):
return "FaceAssigned"
def Description(self):
return "Face assigned mesh(s)"
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kMesh) or node.object().hasFn(OpenMaya.MFn.kNurbsSurfaceGeom)
@staticmethod
def __TestObjGrp(node, parentPlug, childPlug):
dg = node.dg()
if not dg.hasAttribute(parentPlug) or not dg.hasAttribute(childPlug):
return False
io_plug = node.dg().findPlug(parentPlug)
og_obj = node.dg().attribute(childPlug)
for i in range(io_plug.numElements()):
elm = io_plug.elementByPhysicalIndex(i)
og_plug = elm.child(og_obj)
if not og_plug.numConnectedElements():
continue
for j in range(og_plug.numElements()):
gelm = og_plug.elementByPhysicalIndex(j)
arr = OpenMaya.MPlugArray()
if not gelm.connectedTo(arr, False, True):
continue
for n in range(arr.length()):
if arr[n].node().hasFn(OpenMaya.MFn.kShadingEngine):
return True
return False
def test(self, node):
if FaceAssigned.__TestObjGrp(node, "compInstObjGroups", "compObjectGroups"):
return medic.PyReport(node)
if FaceAssigned.__TestObjGrp(node, "instObjGroups", "objectGroups"):
return medic.PyReport(node)
return None
def Create():
return FaceAssigned()
|
sol-ansano-kim/medic
|
plugins/Tester/faceAssigned.py
|
Python
|
mit
| 1,666
| 0.001801
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gobject
import clutter
from text import TextContainer
from roundrect import RoundRectangle, OutlinedRoundRectangle
from clutter import cogl
class ClassicButton(TextContainer):
__gtype_name__ = 'ClassicButton'
def __init__(self, label=' ', margin=0, padding=6, texture=None, rounded=True, crypted=False):
TextContainer.__init__(self, label, margin=margin, padding=padding, texture=texture, rounded=rounded, crypted=crypted)
self.set_reactive(True)
def set_lock(self, lock):
self.set_reactive(not lock)
self.set_opacity(128 if lock else 255)
class ImageButton(ClassicButton):
__gtype_name__ = 'ImageButton'
def __init__(self, label=' ', image_src=None, margin=0, padding=10, spacing=10, texture=None, has_text=True, expand=False):
ClassicButton.__init__(self, label, margin=margin, padding=padding, texture=texture)
self.spacing = spacing
self._has_text = has_text
self._expand = expand
self.image = clutter.Texture()
if image_src:
self.image.set_from_file(image_src)
self.image.set_parent(self)
self.set_font_name('16')
self.set_font_color('#000000ff')
self.set_inner_color('#aaaaaaff')
self.set_border_color('#888888ff')
def set_image_src(self, image_src):
self.image.set_from_file(image_src)
def do_allocate(self, box, flags):
btn_width = box.x2 - box.x1
btn_height = box.y2 - box.y1
inner_width = btn_width - 2*self._padding.x
inner_height = btn_height - 2*self._padding.y
# allocate background
self._allocate_rect(0, 0, btn_width, btn_height, flags)
# allocate image
if self._has_text:
label_height = ClassicButton.do_get_preferred_height(self, for_width=inner_width)[1]
remaining_height = btn_height - label_height - self.spacing
else:
label_height = 0
remaining_height = inner_height
image_preferred_size = self.image.get_preferred_size()
if image_preferred_size[3] > 0:
image_ratio = float(image_preferred_size[2]) / float(image_preferred_size[3])
if self._expand:
image_height = remaining_height
image_width = round(float(image_height) * float(image_ratio))
if image_width > inner_width:
image_width = inner_width
image_height = round(float(image_width) / float(image_ratio))
else:
image_height = image_preferred_size[3]
if remaining_height < image_height:
image_height = remaining_height
image_width = round(float(image_height) * float(image_ratio))
if image_width > inner_width:
image_width = inner_width
image_height = round(float(image_width) / float(image_ratio))
else:
image_width = 0
image_height = 0
x_padding = round((inner_width - image_width) / 2.0)
y_padding = round((remaining_height - image_height) / 2.0)
image_box = clutter.ActorBox()
image_box.x1 = self._padding.x + x_padding
image_box.y1 = self._padding.y + y_padding
image_box.x2 = image_box.x1 + image_width
image_box.y2 = image_box.y1 + image_height
self.image.allocate(image_box, flags)
# allocate label
if self._has_text:
base_y = image_height + self.spacing
label_height = btn_height - base_y
self._allocate_label(0, base_y, btn_width, label_height, flags)
clutter.Actor.do_allocate(self, box, flags)
def do_set_property(self, pspec, value):
return ClassicButton.do_set_property(self, pspec, value)
def do_get_property(self, pspec):
return ClassicButton.do_get_property(self, pspec)
def do_paint(self):
self.rect.paint()
self.image.paint()
if self._has_text:
self.label.paint()
def do_foreach(self, func, data=None):
ClassicButton.do_foreach(self, func, data)
func(self.image, data)
def do_destroy(self):
self.unparent()
if hasattr(self, 'image'):
if self.image:
self.image.unparent()
self.image.destroy()
try:
ClassicButton.do_destroy(self)
except:
pass
gobject.type_register(ImageButton)
if __name__ == '__main__':
from flowbox import FlowBox
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
#toto = cogl.Material()
texture_path = '/home/aviolo/sources/easycast/unstable/easycast/images/buttons/copy.png'
texture = clutter.cogl.texture_new_from_file(texture_path, clutter.cogl.TEXTURE_NO_SLICING, clutter.cogl.PIXEL_FORMAT_ANY)
#toto.set_layer(0, texture)
#stage.add(toto)
t = ClassicButton('test efopkzekfopzf opfzeopfkz opfzegjzeh guzehiug ezhgiozeghizeogh eziogzeoighze oigzeiogzeig opg jzeopgjzepogzzeogjze zeigergre ergerg', texture = texture, rounded = True)
t.set_size(640, 480)
stage.add(t)
'''
# Main flowbox
box0 = FlowBox()
box0.set_size(640, 640)
# Invisible rectangle for top margin
r = clutter.Rectangle()
r.set_size(640, 1)
box0.add(r)
# Button at natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(*b.get_preferred_size()[2:])
box0.add(b)
# Button larger than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(630, 50)
box0.add(b)
# Intermediate flowbox to force line wrapping
box1 = FlowBox()
box1.set_size(640, 50)
box0.add(box1)
# Button fitter than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.')
b.set_size(420, 50)
box1.add(b)
# Button more fitter than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.')
b.set_size(210, 50)
box0.add(b)
# Intermediate flowbox to force line wrapping
box2 = FlowBox()
box2.set_size(640, 50)
box0.add(box2)
# Button at minimal size (just suspension marks)
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(*b.get_preferred_size()[:2])
box2.add(b)
# Invisible rectangle for bottom margin
r = clutter.Rectangle()
r.set_size(640, 1)
box0.add(r)
# Testing buttons
b = ClassicButton('A')
b.set_size(15, 15)
b.set_position(5, 450)
stage.add(b)
b = ClassicButton('B')
b.set_size(25, 25)
b.set_position(50, 425)
stage.add(b)
b = ClassicButton('C')
b.set_font_color('Yellow')
b.set_size(50, 50)
b.set_position(125, 375)
stage.add(b)
b = ClassicButton('D')
b.set_border_width(10)
b.set_border_color('Green')
b.set_size(100, 100)
b.set_position(250, 325)
stage.add(b)
b = ClassicButton('E', texture=texture)
b.set_inner_color('Pink')
b.set_size(170, 170)
b.set_position(425, 210)
stage.add(b)
stage.add(box0)
'''
test_memory_usage = False
if test_memory_usage:
import gc
gc.set_debug(gc.DEBUG_LEAK)
from pprint import pprint
max_count = 5000
#texture_path = '/home/sdiemer/sources/candies/main/candies2/effect_light.png'
texture = clutter.cogl.texture_new_from_file(texture_path, clutter.cogl.TEXTURE_NO_SLICING, clutter.cogl.PIXEL_FORMAT_ANY)
texture = None
def create_test_object():
t = ClassicButton('test efopkzekfopzf opfzeopfkz opfzegjzeh guzehiug ezhgiozeghizeogh eziogzeoighze oigzeiogzeig opg jzeopgjzepogzzeogjze zeigergre ergerg', texture = texture, rounded = True)
return t
def remove_test_object(obj, stage):
obj.destroy()
return False
def test_memory(stage, counter, max_count):
if counter < max_count or max_count == 0:
counter += 1
print counter
tested_object = create_test_object()
stage.add(tested_object)
gobject.timeout_add(2, remove_tested_object, tested_object, stage, counter)
return False
def remove_tested_object(tested_object, stage, counter):
remove_test_object(tested_object, stage)
gc.collect()
pprint(gc.garbage)
gobject.timeout_add(2, test_memory, stage, counter, max_count)
return False
gobject.timeout_add(10, test_memory, stage, 0, max_count)
stage.show()
clutter.main()
|
UbiCastTeam/candies
|
candies2/buttons.py
|
Python
|
lgpl-3.0
| 9,259
| 0.007884
|
import logging
from . import generic
from .elfreloc import ELFReloc
l = logging.getLogger(name=__name__)
# http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.pdf
arch = 'PPC64'
class R_PPC64_JMP_SLOT(ELFReloc):
def relocate(self):
if self.owner.is_ppc64_abiv1:
# R_PPC64_JMP_SLOT
# http://osxr.org/glibc/source/sysdeps/powerpc/powerpc64/dl-machine.h?v=glibc-2.15#0405
# copy an entire function descriptor struct
addr = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr)
toc = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr + 8)
aux = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr + 16)
self.owner.memory.pack_word(self.relative_addr, addr)
self.owner.memory.pack_word(self.relative_addr + 8, toc)
self.owner.memory.pack_word(self.relative_addr + 16, aux)
else:
self.owner.memory.pack_word(self.relative_addr, self.resolvedby.rebased_addr)
return True
class R_PPC64_RELATIVE(generic.GenericRelativeReloc):
pass
class R_PPC64_IRELATIVE(generic.GenericIRelativeReloc):
pass
class R_PPC64_ADDR64(generic.GenericAbsoluteAddendReloc):
pass
class R_PPC64_GLOB_DAT(generic.GenericJumpslotReloc):
pass
class R_PPC64_DTPMOD64(generic.GenericTLSModIdReloc):
pass
class R_PPC64_DTPREL64(generic.GenericTLSDoffsetReloc):
pass
class R_PPC64_TPREL64(generic.GenericTLSOffsetReloc):
pass
class R_PPC64_REL24(ELFReloc):
"""
Relocation Type: 10
Calculation: (S + A - P) >> 2
Field: low24*
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
P = self.rebased_addr
return (S + A - P) >> 2
def relocate(self):
if not self.resolved:
return False
instr = self.owner.memory.unpack_word(self.relative_addr, size=4) & 0b11111100000000000000000000000011
imm = self.value & 0xFFFFFF
self.owner.memory.pack_word(self.relative_addr, instr | (imm << 2), size=4)
return True
class R_PPC64_TOC16_LO(ELFReloc):
"""
Relocation Type: 48
Calculation: #lo(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return (S + A) & 0xFFFF
TOC = self.owner.ppc64_initial_rtoc
return (S + A - TOC) & 0xFFFF
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC16_HI(ELFReloc):
"""
Relocation Type: 49
Calculation: #hi(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return ((S + A) >> 16) & 0xFFFF
TOC = self.owner.ppc64_initial_rtoc
return ((S + A - TOC) >> 16) & 0xFFFF
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC16_HA(ELFReloc):
"""
Relocation Type: 50
Calculation: #ha(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return ((((S + A) >> 16) + (1 if ((S + A) & 0x8000) else 0)) & 0xFFFF)
TOC = self.owner.ppc64_initial_rtoc
return ((((S + A - TOC) >> 16) + (1 if ((S + A - TOC) & 0x8000) else 0)) & 0xFFFF)
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC(ELFReloc):
"""
Relocation Type: 51
Calculation: .TOC.
Field: doubleword64
"""
@property
def value(self):
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return 0
return self.owner.ppc64_initial_rtoc
|
angr/cle
|
cle/backends/elf/relocation/pcc64.py
|
Python
|
bsd-2-clause
| 4,448
| 0.004946
|
def flatten(x):
"""
Takes an N times nested list of list like [[a,b],[c, [d, e]],[f]]
and returns a single list [a,b,c,d,e,f]
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
return result
|
ai-se/Transfer-Learning
|
src/utils/misc_utils.py
|
Python
|
unlicense
| 344
| 0
|
"""
For a detailed gene table and a summary gene table
"""
#!/usr/bin/env python
from collections import defaultdict
filename = 'detailed_gene_table_v75'
detailed_out = open(filename, 'w')
file = 'summary_gene_table_v75'
summary_out = open(file, 'w')
# write out files for detailed and summary gene table
detailed_out.write("\t".join(["Chromosome","Gene_name","Is_hgnc","Ensembl_gene_id","Ensembl_transcript_id","Biotype",
"Transcript_status","CCDS_id","HGNC_id","CDS_length","Protein_length",
"Transcript_start","Transcript_end","strand","Synonyms",
"Rvis_pct","entrez_gene_id","mammalian_phenotype_id"]))
detailed_out.write("\n")
summary_out.write("\t".join(["Chromosome","Gene_name","Is_hgnc","Ensembl_gene_id",
"HGNC_id","Synonyms", "Rvis_pct","Strand","Transcript_min_start","Transcript_max_end","Mammalian_phenotype_id"]))
summary_out.write("\n")
mouse_phenotype = defaultdict(list)
genic_intolerance = defaultdict(list)
keygene = list_hgnc = []
#initializing values for the summary gene table
transcript_min = defaultdict(list)
transcript_max = defaultdict(list)
lines_seen = set()
for line in open("genic_intolerance_dataset2", 'r'):
if line.startswith("#") is False:
field = line.strip().split("\t")
name = str(field[0])
score = str(field[1])
percentile = str(field[2])
(key,value) = (name, percentile)
genic_intolerance[name].append(percentile)
#Phenotype data from MGI - Jax
for row in open("HMD_HumanPhenotype", 'r'):
col = row.strip().split("\t")
#Remove leading white spaces in the column
entrez_id = str(col[1]).lstrip()
#Remove leading white spaces in the column & join MP terms with a comma
mph = str(col[5]).lstrip().replace(' ',',') if str(col[5]) != '' else None
(key,value) = (entrez_id, mph)
mouse_phenotype[entrez_id].append(mph)
# Dictionary for summary gene table to handle transcript min, max co-ordinates
for each in open("raw_gene_table", 'r'):
if each.startswith("Chromosome") is False:
k = each.strip().split("\t")
chr = "chr"+str((k[0]))
ens = str(k[2])
start = str(k[10])
end = str(k[11])
transcript_min[(chr,ens)].append(start)
transcript_max[(chr,ens)].append(end)
for each in open("raw_gene_table", 'r'):
if each.startswith("Chromosome") is False:
k = each.strip().split("\t")
chrom = "chr"+str((k[0]))
hgnc = str(k[1])
ens_geneid = str(k[2])
ens_transid = str(k[3])
trans_biotype = str(k[4])
status = str(k[5])
ccds_id = str(k[6]) #these id's are unique to transcripts
hgnc_id = str(k[7])
cds_len = str(k[8])
protein_len = str(k[9])
transcript_start = str(k[10])
transcript_end = str(k[11])
strand = str(k[12])
#remove space between names
previous = str(k[13]).replace(" ","")
synonyms = str(k[14]).replace(" ","")
entrez = str(k[15])
# sort all transcript start and end positions for a gene (use ens_geneid, since HGNC is not always true)
# Capture the first and the last position from the sorted list to give min, max
if (chrom,ens_geneid) in transcript_min:
minmum = sorted(transcript_min[(chrom,ens_geneid)])[0]
if (chrom,ens_geneid) in transcript_max:
maxmum = sorted(transcript_max[(chrom,ens_geneid)])[-1]
rvis = genic_intolerance[hgnc][0] if hgnc in genic_intolerance else None
pheno = mouse_phenotype[entrez] if entrez in mouse_phenotype else None
if pheno is not None and len(pheno) == 1:
phenotype = pheno[0]
elif pheno is None:
phenotype = "None"
else:
if len(pheno) > 1:
#convert the list to a string
string = ",".join(pheno)
# store a None for multiple Nones
if "None" in string and "MP:" not in string:
phenotype = None
#remove redundancy in MP terms
if "None" not in string and "MP:" in string:
phenotype = ",".join(set(string.split(",")))
#remove nones when MP terms are available
if "None" in string and "MP:" in string:
phen = string.split(",")
phenotype = ",".join([x for x in phen if x != "None"])
if hgnc != "None":
list_hgnc.append(hgnc)
#we don't want string of Nones
if "None" in previous and "None" in synonyms and "None" in hgnc:
string = None
else:
# We would like all genes names to be put together
gene_string = hgnc+","+previous+","+synonyms
#get rid of Nones in gene strings
if gene_string.startswith("None"):
string = gene_string.replace("None,","")
else:
string = gene_string.replace(",None","")
#Nonetype object has no attribute split
if string is not None:
genes = set(string.split(","))
if len(genes) > 1:
# We would like to represent each member of the gene list as a key and the remainder as synonyms each time
for each in genes:
keygene = set([each])
synonym = genes.difference(keygene)
gene_name = ','.join(keygene)
other_names = ','.join(synonym)
hgnc_flag = "1" if gene_name in list_hgnc else "0"
# only when the gene is a HGNC name, it would have an hgnc id
is_hgnc_id = hgnc_id if gene_name in list_hgnc else "None"
# handling duplicate lines (due to transcripts) in summary table (which we don't care for in this table)
# writing to outfile for the summary gene table
line = "\t".join([chrom,gene_name,hgnc_flag,ens_geneid,is_hgnc_id,
other_names,str(rvis),strand,minmum,maxmum,str(phenotype)])
if line not in lines_seen:
summary_out.write(line)
summary_out.write("\n")
lines_seen.add(line)
# Writing to out for detailed gene table
detailed_out.write("\t".join([chrom,gene_name,hgnc_flag,ens_geneid,ens_transid,trans_biotype,
status,ccds_id,is_hgnc_id,cds_len,protein_len,transcript_start,
transcript_end,strand,other_names,str(rvis),entrez,str(phenotype)]))
detailed_out.write("\n")
# if there is one gene name in the list, we just want it to be the key
elif len(genes) == 1:
gene_name = ','.join(genes)
other_names = "None"
hgnc_flag = "1" if gene_name in list_hgnc else "0"
is_hgnc_id = hgnc_id if gene_name in list_hgnc else "None"
# handling duplicate lines (due to transcripts) in summary table (which we don't care for in this table)
# writing to outfile for the summary gene table
line = "\t".join([chrom,str(gene_name),hgnc_flag,ens_geneid,is_hgnc_id,
other_names,str(rvis),strand,minmum,maxmum,str(phenotype)])
if line not in lines_seen:
summary_out.write(line)
summary_out.write("\n")
lines_seen.add(line)
# write to out for detailed gene table
detailed_out.write("\t".join([chrom,str(gene_name),hgnc_flag,ens_geneid,ens_transid,trans_biotype,
status,ccds_id,is_hgnc_id,cds_len,protein_len,transcript_start,
transcript_end,strand,other_names,str(rvis),entrez,str(phenotype)]))
detailed_out.write("\n")
# if there are no HGNC, previous or synonyms names for an ensembl entry, just return None
elif string is None:
gene_name = "None"
other_names = "None"
hgnc_flag = "0"
is_hgnc_id = "None"
#handling duplicate lines (due to transcripts) in summary table (which we don't care for in this table)
#writing to outfile for the summary gene table
line = "\t".join([chrom,gene_name,hgnc_flag,ens_geneid,is_hgnc_id,
other_names,str(rvis),strand,minmum,maxmum,str(phenotype)])
if line not in lines_seen:
summary_out.write(line)
summary_out.write("\n")
lines_seen.add(line)
# probably we still want to print these lines where gene is none since ensembl gene id has value
detailed_out.write("\t".join([chrom,gene_name,hgnc_flag,ens_geneid,ens_transid,trans_biotype,status,
ccds_id,is_hgnc_id,cds_len,protein_len,transcript_start,transcript_end,
strand,other_names,str(rvis),entrez,str(phenotype)]))
detailed_out.write("\n")
detailed_out.close()
summary_out.close()
|
brentp/gemini
|
gemini/annotation_provenance/gene_table/combined_gene_table.py
|
Python
|
mit
| 9,693
| 0.018983
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_generalsetting_titulo'),
]
operations = [
migrations.AlterField(
model_name='imagen',
name='img',
field=models.ImageField(upload_to=b'imgenEvento', verbose_name=b'Ruta'),
),
]
|
nicolas471/Lecole
|
main/migrations/0015_auto_20160404_1648.py
|
Python
|
gpl-3.0
| 433
| 0.002309
|
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
def mymodules2():
print("test module2!")
mymodules2()
|
kmahyyg/learn_py3
|
modules/mymodule2/__init__.py
|
Python
|
agpl-3.0
| 107
| 0.018692
|
#! /usr/bin/env python3
import getopt
import os
import os.path
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import traceback
import types
import linecache
from code import InteractiveInterpreter
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkinter.messagebox as tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
if file is None:
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,
lineno, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error as err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print('*** Error in script or command!\n', file=tkerr)
print('Traceback (most recent call last):', file=tkerr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import IOBinding
# try:
# source = source.encode(IOBinding.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
self.color = color = self.ColorDelegator()
self.per.insertfilter(color)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use a textView someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = True
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
enable_shell = False
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
# Check for problematic OS X Tk versions and print a warning message
# in the IDLE shell window; this is less intrusive than always opening
# a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand(''.join(("print('", tkversionwarning, "')")))
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
wdv4758h/ZipPy
|
lib-python/3/idlelib/PyShell.py
|
Python
|
bsd-3-clause
| 52,145
| 0.001285
|
from __future__ import absolute_import
from Queue import Empty
from random import randint
from time import sleep
import os
from unittest import TestCase, skipUnless
from signal import SIGINT, SIGCHLD
from select import error as select_error
from os import getpid
from mock import MagicMock, patch, PropertyMock
from psycopg2 import OperationalError
from hermes.client import Client
from hermes.components import Component
from hermes.connectors import PostgresConnector
from hermes.exceptions import InvalidConfigurationException
from hermes.strategies import TERMINATE
_WATCH_PATH = '/tmp/hermes_test'
_FAILOVER_FILES = ('recovery.conf', 'recovery.done')
_POSTGRES_DSN = {
'database': 'test_hermes'
}
class RunningClientTestCase(TestCase):
def setUp(self):
# Create the folder
if not os.path.exists(_WATCH_PATH):
os.makedirs(_WATCH_PATH)
self.client = Client(_POSTGRES_DSN, _WATCH_PATH, _FAILOVER_FILES)
self.client.log = MagicMock()
def tearDown(self):
if self.client.is_alive():
self.client.terminate()
# Remove the folder
if not os.path.exists(_WATCH_PATH):
os.removedirs(_WATCH_PATH)
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_master(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
self.client._start_components = MagicMock(return_value=None)
# Start the client and allow to settle
self.client._start_observer()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertTrue(self.client._start_components.called)
PostgresConnector.is_server_master = old_func
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_slave(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=False)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(self.client.is_alive())
PostgresConnector.is_server_master = old_func
def test_client_directory_watcher_when_file_incorrect(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, 'random_file.rand')
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(PostgresConnector.is_server_master.called)
PostgresConnector.is_server_master = old_func
class ClientComponentTestCase(TestCase):
def test_add_listener_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_listener,
3)
def test_add_processor_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_processor,
3)
def test_add_listener_accepts_component(self):
client = Client(MagicMock())
client.add_listener(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._listener, Component)
def test_add_processor_accepts_component(self):
client = Client(MagicMock(), MagicMock())
client.add_processor(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._processor, Component)
class ValidateComponentsTestCase(TestCase):
def test_throws_on_non_listener(self):
client = Client(MagicMock())
client._processor = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_non_processor(self):
client = Client(MagicMock())
client._listener = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_different_queue(self):
client = Client(MagicMock())
client._listener = MagicMock()
client._processor = MagicMock()
client._listener.error_queue = MagicMock(
return_value=True
)
client._listener.error_queue = MagicMock(
return_value=False
)
self.assertRaises(InvalidConfigurationException,
client._validate_components)
class WatchdogObserverTestCase(TestCase):
def setUp(self):
self.client = Client(MagicMock())
self.client.directory_observer = MagicMock()
def test_start_schedules_obeserver_if_watch_path(self):
self.client._watch_path = randint(50, 1000)
self.client._start_observer()
self.client.directory_observer.schedule.assert_called_once_with(
self.client, self.client._watch_path, recursive=False
)
self.client.directory_observer.start.assert_called_once_with()
def test_start_not_schedule_observer_if_none_watch_path(self):
self.client._watch_path = None
self.client._start_observer()
self.assertEqual(self.client.directory_observer.schedule.call_count, 0)
self.assertEqual(self.client.directory_observer.start.call_count, 0)
def test_stop_stops_observer_if_watch_path_and_observer(self):
self.client.directory_observer.is_alive.return_value = True
self.client._watch_path = True
self.client._stop_observer()
self.client.directory_observer.stop.assert_called_once_with()
def test_stop_does_not_stop_observer_on_none(self):
self.client._watch_path = None
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
def test_stop_does_not_stop_on_dead(self):
self.client._watch_path = True
self.client.directory_observer.is_alive.return_value = False
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
class ClientStartupTestCase(TestCase):
def test_startup_functions_are_called(self):
with patch('multiprocessing.Process.start') as mock_process_start:
with patch('hermes.client.signal') as mock_signal:
client = Client(MagicMock())
client._validate_components = MagicMock()
client.start()
self.assertEqual(mock_signal.call_count, 2)
client._validate_components.assert_called_once_with()
mock_process_start.assert_called_once_with()
def test_initial_start_components(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._start_components()
client._listener.start.assert_called_once_with()
client._processor.start.assert_called_once_with()
def test_start_components_when_components_running(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = True
client._start_components()
self.assertEqual(client._listener.start.call_count, 0)
self.assertEqual(client._processor.start.call_count, 0)
def test_join_is_called_on_restart(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._processor.ident.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._listener.ident.return_value = True
client._start_components(restart=True)
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_once_with()
class ClientShutdownTestCase(TestCase):
def test_shutdown(self):
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client._stop_observer = MagicMock()
client._should_run = True
client._shutdown()
client._stop_components.assert_called_once_with()
client._stop_observer.assert_called_once_with()
self.assertFalse(client._should_run)
def test_stop_terminates(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._listener = MagicMock()
client._processor.ident.return_value = True
client._listener.ident.return_value = True
client._processor.is_alive.return_value = True
client._listener.is_alive.return_value = True
client._stop_components()
client._processor.terminate.assert_called_once_with()
client._listener.terminate.assert_called_once_with()
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_once_with()
def test_handle_terminate_when_same_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._shutdown = MagicMock()
mock_ident.return_value = getpid()
client._handle_terminate(None, None)
client._shutdown.assert_called_once_with()
def test_handle_terminate_when_different_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._exit_queue = MagicMock()
client._shutdown = MagicMock()
current_pid = getpid()
mock_ident.return_value = current_pid + 1
client._handle_terminate(None, None)
client._exit_queue.put_nowait.assert_called_once_with(True)
def test_handle_sigchld_when_should_not_run(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._should_run = False
client._handle_sigchld(None, None)
self.assertEqual(
client._processor.error_queue.get_nowait.call_count, 0
)
def test_handle_sigchld_when_expected_error_and_terminate(self):
client = Client(MagicMock())
client._processor = MagicMock()
client.execute_role_based_procedure = MagicMock()
client._processor.error_queue.get_nowait.return_value = (
True, TERMINATE
)
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertTrue(client._exception_raised)
client.execute_role_based_procedure.assert_called_once_with()
def test_handle_sigchld_when_not_expected(self):
client = Client(MagicMock())
client.log = MagicMock()
client._processor = MagicMock()
client._shutdown = MagicMock()
client._processor.error_queue.get_nowait.return_value = (
False, TERMINATE
)
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertTrue(client._exception_raised)
client._shutdown.assert_called_once_with()
def test_handle_sigchld_when_queue_is_empty(self):
client = Client(MagicMock())
client._start_components = MagicMock()
client._processor = MagicMock()
client._processor.error_queue.get_nowait.side_effect = Empty
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertFalse(client._exception_raised)
self.assertTrue(client._child_interrupted)
client._start_components.assert_called_once_with(restart=True)
class ClientRunProcedureTestCase(TestCase):
def test_initial_run_funcs(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal') as mock_signal:
with patch('select.select') as mock_select:
mock_select.side_effect = Exception
client = Client(MagicMock())
client._start_observer = MagicMock()
client.execute_role_based_procedure = MagicMock()
self.assertRaises(Exception, client.run)
mock_signal.assert_called_once_with(
SIGCHLD, client._handle_sigchld
)
client.execute_role_based_procedure.assert_called_once_with()
def test_role_based_procedures_are_called_outside_of_main_loop(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal'):
client = Client(MagicMock())
random_raised = randint(1, 10000)
client._exception_raised = random_raised
client._start_observer = MagicMock()
client.execute_role_based_procedure = MagicMock(
side_effect=Exception
)
self.assertRaises(Exception, client.run)
client.execute_role_based_procedure.assert_called_once_with()
# Raised value should be the same as that which we set
self.assertEqual(client._exception_raised, random_raised)
def test_client_calls_terminate_on_exit_queue(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal'):
client = Client(MagicMock())
client.execute_role_based_procedure = MagicMock()
client._start_observer = MagicMock()
client.terminate = MagicMock(side_effect=Exception)
self.assertRaises(Empty, client._exit_queue.get_nowait)
client._exit_queue.put(True)
self.assertRaises(Exception, client.run)
client.terminate.assert_called_once_with()
def test_client_sets_run_flag_on_interrupt(self):
with patch('hermes.log.get_logger'):
with patch('select.select', side_effect=select_error):
client = Client(MagicMock())
client.execute_role_based_procedure = MagicMock()
client.run()
self.assertFalse(client._should_run)
class RoleBasedProceduresTestCase(TestCase):
def test_when_server_is_master(self):
client = Client(MagicMock())
client.log = MagicMock()
client._start_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.return_value = True
client.execute_role_based_procedure()
client._start_components.assert_called_once_with(restart=True)
def test_when_server_is_slave(self):
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.return_value = False
client.execute_role_based_procedure()
client._stop_components.assert_called_once_with()
def test_when_server_is_down_and_no_backoff(self):
with patch('hermes.client.sleep') as mock_sleep:
mock_sleep.side_effect = Exception('Break out of loop')
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.side_effect = OperationalError
self.assertRaises(Exception, client.execute_role_based_procedure)
client._stop_components.assert_called_once_with()
mock_sleep.assert_called_once_with(1)
|
transifex/hermes
|
test_hermes/test_client.py
|
Python
|
bsd-3-clause
| 17,562
| 0.000228
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import random
class BackoffTimer(object):
def __init__(self, ratio=1, max_interval=None, min_interval=None):
self.c = 0
self.ratio = ratio
self.max_interval = max_interval
self.min_interval = min_interval
def is_reset(self):
return self.c == 0
def reset(self):
self.c = 0
return self
def success(self):
self.c = max(self.c - 1, 0)
return self
def failure(self):
self.c += 1
return self
def get_interval(self):
k = pow(2, self.c) - 1
interval = random.random() * k * self.ratio
if self.max_interval is not None:
interval = min(interval, self.max_interval)
if self.min_interval is not None:
interval = max(interval, self.min_interval)
return interval
|
wtolson/gnsq
|
gnsq/backofftimer.py
|
Python
|
bsd-3-clause
| 899
| 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.log import logger
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.util import normalize_path
from pip.wheel import WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
class WheelCommand(Command):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not recompiling your software during every install.
For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help="Build wheels into <dir>, where the default is '<cwd>/wheelhouse'.")
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# confirm requirements
try:
import wheel.bdist_wheel
except ImportError:
raise CommandError("'pip wheel' requires the 'wheel' package. To fix this, run: pip install wheel")
try:
import pkg_resources
except ImportError:
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
else:
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_unverified=options.allow_all_unverified,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
options.build_dir = os.path.abspath(options.build_dir)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=None,
download_dir=None,
download_cache=options.download_cache,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
session=session,
wheel_download_dir=options.wheel_dir
)
# make the wheelhouse
if not os.path.exists(options.wheel_dir):
os.makedirs(options.wheel_dir)
#parse args and/or requirements files
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder,
options=options,
session=session):
if req.editable:
logger.notify("ignoring %s" % req.url)
continue
requirement_set.add_requirement(req)
#fail if no requirements
if not requirement_set.has_requirements:
opts = {'name': self.name}
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.error(msg)
return
try:
#build wheels
wb = WheelBuilder(
requirement_set,
finder,
options.wheel_dir,
build_options = options.build_options or [],
global_options = options.global_options or []
)
wb.build()
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
|
tesb/flask-crystal
|
venv/Lib/site-packages/pip/commands/wheel.py
|
Python
|
apache-2.0
| 7,402
| 0.003513
|
# -*- coding: utf-8 -*-
import os
import lxml.etree
import io
from . import pipeline_item
import core.docvert_exception
class GeneratePostConversionEditorFiles(pipeline_item.pipeline_stage):
def stage(self, pipeline_value):
return pipeline_value
|
holloway/docvert-python3
|
core/pipeline_type/generatepostconversioneditorfiles.py
|
Python
|
gpl-3.0
| 263
| 0.003802
|
import random
from plugin import Plugin
class Flatter(Plugin):
def help_text(self, bot):
return bot.translate("flatter_help")
def on_msg(self, bot, user_nick, host, channel, message):
if message.lower().startswith(bot.translate("flatter_cmd")):
if len(message.split()) >= 2:
if bot.getlanguage() == "de":
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery.txt'))), user_nick)
elif bot.getlanguage() == "en":
# Source http://www.pickuplinesgalore.com/cheesy.html
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery_en.txt'))), user_nick)
|
k4cg/Rezeptionistin
|
plugins/flatter.py
|
Python
|
mit
| 687
| 0.016012
|
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sys
import matplotlib.lines as lines
import h5py
from matplotlib.font_manager import FontProperties
import matplotlib.ticker as ticker
from scipy.fftpack import fft
axial_label_font = FontProperties()
axial_label_font.set_family('sans-serif')
axial_label_font.set_style('normal')
axial_label_font.set_weight('bold')
# axial_label_font.set_size('x-large')
axial_label_font.set_size(20)
legend_label_font = FontProperties()
legend_label_font.set_family('sans-serif')
legend_label_font.set_style('normal')
legend_label_font.set_weight('normal')
# legend_label_font.set_size('large')
legend_label_font.set_size(16)
def node_response_extraction_sequential(node_ID, file_name, num_DOF):
h5_file = h5py.File(file_name, 'r');
Time = h5_file['time'][:];
displacement_index = int(h5_file['Model/Nodes/Index_to_Generalized_Displacements'][node_ID]);
displacement_component = h5_file['Model/Nodes/Generalized_Displacements'][int(displacement_index):int(displacement_index+num_DOF), :];
acceleration_component = h5_file['Model/Nodes/Generalized_Accelerations'][int(displacement_index):int(displacement_index+num_DOF), :];
for x1 in xrange(0,num_DOF):
displacement_component[x1,:] = displacement_component[x1,:]-displacement_component[x1,0]; ### in case self weight loading stage, get relative displacement
return Time, displacement_component, acceleration_component;
numbercol = 1;
surface_node_ID = 252; ## 252, 250, 249, 251
node_ID = [252, 212, 172, 132, 92, 52, 12]; ## node ID from surface to bottom
depth = [0, 2, 4, 6, 8, 10, 12];
bottom_node_ID = 6; ## node just beyond DRM layer
file_name = 'Motion1C_DRM_propagation.h5.feioutput' ##
parameteric_case = 'Motion1C_Northridge' ##
### ==========================================================================
postfix = '.feioutput';
middle_name_less_than_ten = '0';
num_DOF = 3;
Time, displacement_component_surface, acceleration_component_surface = node_response_extraction_sequential(surface_node_ID, file_name, num_DOF);
Time, displacement_component_bottom, acceleration_component_bottom = node_response_extraction_sequential(bottom_node_ID, file_name, num_DOF);
# surface_acc = np.loadtxt('Kobe_acc.txt');
# surface_disp = np.loadtxt('Kobe_disp.txt');
surface_acc = np.loadtxt('scaled_northridge_acc.dat');
surface_disp = np.loadtxt('scaled_northridge_dis.dat');
########################################################################################
#######===== Print acceleration of nodes ===== ######
########################################################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(surface_acc[:, 0], surface_acc[:, 1], '-r', label='surface analytical', linewidth= 1.5);
ax.plot(Time[200:]-2.0, acceleration_component_surface[0, 200:], '-k', label='DRM propagation', linewidth= 0.5);
plt.gca().set_xlim([0,38]);
# plt.gca().set_ylim([-10,10]);
# plt.gca().get_xaxis().set_ticks(np.arange(0, 60.1, 10))
# plt.gca().get_yaxis().set_ticks(np.arange(-15, 3.1, 3))
plt.gca().get_yaxis().set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
plt.gca().get_xaxis().set_tick_params(direction='in',labelsize='x-large')
plt.gca().get_yaxis().set_tick_params(direction='in',labelsize='x-large')
plt.xlabel('Time [s]', fontproperties=axial_label_font);
plt.ylabel('Acc. [$m/s^2$]', fontproperties=axial_label_font);
plt.grid(True);
plt.legend(ncol= numbercol, loc='upper right', prop=legend_label_font);
filename = 'acc_check_'+ parameteric_case + '.pdf'
plt.savefig(filename, bbox_inches='tight');
plt.show();
# # # ########################################################################################
# # # #######======================== Print Time series response along the depth ===== ######
# # # ########################################################################################
# print "Plot acceleration records along depth!";
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # scale_meter = 7;
# # plt.gca().text(32.7, 1.25, '$1g$', fontsize=20)
# # l1 = lines.Line2D([32, 32], [0.5, 0.5+10/scale_meter], color='k', linewidth=2.0)
# # l2 = lines.Line2D([31.7, 32.3], [0.5, 0.5], color='k', linewidth=0.5)
# # l3 = lines.Line2D([31.7, 32.3], [0.5+10/scale_meter, 0.5+10/scale_meter], color='k', linewidth=0.5)
# # plt.gca().add_line(l1);
# # plt.gca().add_line(l2);
# # plt.gca().add_line(l3);
# PGA_depth = sp.zeros(len(depth));
# for x in xrange(0,len(node_ID)):
# current_node = node_ID[x];
# current_depth = depth[x];
# Time, current_displacement_component, current_acceleration_component = node_response_extraction_sequential(current_node, file_name, num_DOF);
# plot_current_acceleration = current_depth + current_acceleration_component/15.0; ## scale acceleration
# PGA_depth[x] = max(abs(current_acceleration_component[0, :]));
# ax.plot(Time, plot_current_acceleration[0, :], '-k', linewidth= 1);
# plt.gca().set_ylim([-1,13]);
# plt.gca().invert_yaxis()
# # plt.gca().get_xaxis().set_ticks(np.arange(0, 60.1, 10))
# # plt.gca().get_yaxis().set_ticks(np.arange(-15, 3.1, 3))
# plt.gca().get_yaxis().set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
# plt.gca().get_xaxis().set_tick_params(direction='in',labelsize='x-large')
# plt.gca().get_yaxis().set_tick_params(direction='in',labelsize='x-large')
# plt.xlabel('Time [s]', fontproperties=axial_label_font);
# plt.ylabel('Depth. [m]', fontproperties=axial_label_font);
# plt.grid(True);
# plt.legend(ncol= numbercol, loc='upper right', prop=legend_label_font);
# filename = 'acc_depth_'+ parameteric_case + '.pdf'
# plt.savefig(filename, bbox_inches='tight');
# plt.show();
|
BorisJeremic/Real-ESSI-Examples
|
motion_one_component/Deconvolution_DRM_Propagation_Northridge/python_plot_parameteric_study.py
|
Python
|
cc0-1.0
| 5,870
| 0.019591
|
# -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/build/work/GCC-7-build/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/work/GCC-7-build/install-native/arm-none-eabi/lib/thumb/v7-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
jocelynmass/nrf51
|
toolchain/arm_cm0/arm-none-eabi/lib/thumb/v7-m/libstdc++.a-gdb.py
|
Python
|
gpl-2.0
| 2,482
| 0.006446
|
"""Run Monte Carlo simulations."""
from joblib import Parallel, delayed
from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint
from datetime import datetime
from copy import deepcopy
from glob import glob
import frbpoppy.paths
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import uuid
POP_SIZE = 5e7
class SimulationOverview:
"""Given values, return uid
Load from file, or make."""
def __init__(self, load_csv=True):
p = frbpoppy.paths.populations()
self.filename = f'{p}mc/simluation_overview.csv'
if load_csv and os.path.isfile(self.filename):
self.load()
else:
self.df = pd.DataFrame()
def load(self):
self.df = pd.read_csv(self.filename, index_col=0)
self.df = self.df.loc[:, ~self.df.columns.str.contains('^Unnamed')]
def save(self):
self.df.to_csv(self.filename)
def append(self, df):
self.df = self.df.append(df, ignore_index=True)
def map_surveys(self, ix, names):
mapping = dict(zip(ix, names))
self.df.replace({"survey": mapping}, inplace=True)
class MonteCarlo:
def __init__(self, pop_size=1e2, load_csv=True):
self.survey_names = ['parkes-htru',
'chime-frb',
'askap-incoh',
'wsrt-apertif']
self.load_csv = load_csv
self.pop_size = pop_size
self.survey_ix = [i for i in range(len(self.survey_names))]
self.surveys = self.set_up_surveys()
self.so = SimulationOverview(load_csv=self.load_csv)
self.set_up_dirs()
def set_up_surveys(self):
"""Set up surveys."""
surveys = []
for name in self.survey_names:
survey = Survey(name=name)
survey.set_beam(model='airy', n_sidelobes=1)
if name in ('chime-frb', 'wsrt-apertif', 'parkes-htru'):
survey.set_beam(model=name)
surveys.append(survey)
return surveys
def set_up_dirs(self, run=np.nan):
"""Create subdirectory for saving populations.
Returns True if directory had to be set up."""
f = f'{frbpoppy.paths.populations()}mc/'
if not os.path.isdir(f):
os.mkdir(f)
return True
if not np.isnan(run):
f = f'{frbpoppy.paths.populations()}mc/run_{run}/'
if not os.path.isdir(f):
os.mkdir(f)
return True
return False
def gen_par_set_1(self,
parallel=True,
lum_min=np.nan,
lum_max=np.nan,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=0):
alphas = np.linspace(-2.5, -1, 11)
sis = np.linspace(-2, 2, 11)
lis = np.linspace(-2, 0, 11)
# Put all options into a dataframe
if 'run' in self.so.df:
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
df['run'] = run
df['par_set'] = 1
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
def iter_alpha(i):
alpha = alphas[i]
pop = CosmicPopulation.complex(self.pop_size)
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
for si in sis:
pop.set_si(model='constant', value=si)
pop.gen_si()
for li in lis:
pop.set_lum(model='powerlaw',
low=1e40,
high=1e45, power=li)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min,
high=lum_max, index=li)
pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 1)
mask &= (self.so.df.run == run)
mask &= (self.so.df.alpha == alpha)
mask &= (self.so.df.si == si)
mask &= (self.so.df.li == li)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
if parallel:
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
r = range(len(alphas))
Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
else:
[iter_alpha(i) for i in tqdm(range(len(alphas)))]
def gen_par_set_2(self,
parallel=True,
alpha=-1.5,
si=0,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
lis = np.linspace(-1.5, 0, 11)
lum_mins = 10**np.linspace(38, 46, 11)
lum_maxs = 10**np.linspace(38, 46, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(lis, lum_mins, lum_maxs, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
cols = ('li', 'lum_min', 'lum_max', 'survey')
df = pd.DataFrame(options, columns=cols)
df['par_set'] = 2
df['run'] = run
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
df = df[~(df.lum_max < df.lum_min)]
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
li, lum_min, lum_max = e
if lum_max < lum_min:
return
t_pop = deepcopy(pop)
t_pop.set_lum(model='powerlaw', low=lum_min, high=lum_max,
power=li)
t_pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 2)
mask &= (self.so.df.run == run)
mask &= (self.so.df.li == li)
mask &= (self.so.df.lum_min == lum_min)
mask &= (self.so.df.lum_max == lum_max)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(lis, lum_mins, lum_maxs)
loop = np.array(mg).T.reshape(-1, 3)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_3(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
w_means = 10**np.linspace(-3, 1, 11)
w_stds = np.linspace(0, 3, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(w_means, w_stds, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('w_mean', 'w_std', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 3
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
w_mean, w_std = e
t_pop = deepcopy(pop)
t_pop.set_w(model='lognormal', mean=w_mean, std=w_std)
t_pop.gen_w()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 3)
mask &= (self.so.df.run == run)
mask &= (self.so.df.run == run)
mask &= (self.so.df.w_mean == w_mean)
mask &= (self.so.df.w_std == w_std)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(w_means, w_stds)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_4(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
w_mean=np.nan,
w_std=np.nan,
run=np.nan):
dm_igm_slopes = np.linspace(800, 1200, 11)
dm_hosts = np.linspace(0, 500, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(dm_igm_slopes, dm_hosts, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('dm_igm_slope', 'dm_host', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 4
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
pop.generate()
def adapt_pop(e):
dm_igm_slope, dm_host = e
t_pop = deepcopy(pop)
t_pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
t_pop.gen_dm_igm()
t_pop.set_dm_host(model='constant', value=dm_host)
t_pop.gen_dm_host()
t_pop.frbs.dm = t_pop.frbs.dm_mw + t_pop.frbs.dm_igm
t_pop.frbs.dm += t_pop.frbs.dm_host
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 4)
mask &= (self.so.df.run == run)
mask &= (self.so.df.dm_igm_slope == dm_igm_slope)
mask &= (self.so.df.dm_host == dm_host)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([4, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(dm_igm_slopes, dm_hosts)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
|
davidgardenier/frbpoppy
|
tests/monte_carlo/simulations.py
|
Python
|
mit
| 14,711
| 0
|
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
row, col = len(M), len(M[0])
ans = [[0]*col for i in xrange(row)]
for i in xrange(row):
for j in xrange(col):
cnt = 0
val = 0
for p in xrange(-1, 2):
for q in xrange(-1, 2):
if ((i+p)<0) or ((i+p)>=row) or ((j+q)<0) or ((j+q)>=col):
continue
cnt += 1
val += M[i+p][j+q]
ans[i][j] = val / cnt
return ans
|
YiqunPeng/Leetcode-pyq
|
solutions/661ImageSmoother.py
|
Python
|
gpl-3.0
| 684
| 0.010234
|
"""Help to make choice"""
# PYTHON STUFFS #######################################################
import random
import shlex
from nemubot import context
from nemubot.exception import IMException
from nemubot.hooks import hook
from nemubot.module.more import Response
# MODULE INTERFACE ####################################################
@hook.command("choice")
def cmd_choice(msg):
if not len(msg.args):
raise IMException("indicate some terms to pick!")
return Response(random.choice(msg.args),
channel=msg.channel,
nick=msg.frm)
@hook.command("choicecmd")
def cmd_choicecmd(msg):
if not len(msg.args):
raise IMException("indicate some command to pick!")
choice = shlex.split(random.choice(msg.args))
return [x for x in context.subtreat(context.subparse(msg, choice))]
@hook.command("choiceres")
def cmd_choiceres(msg):
if not len(msg.args):
raise IMException("indicate some command to pick a message from!")
rl = [x for x in context.subtreat(context.subparse(msg, " ".join(msg.args)))]
if len(rl) <= 0:
return rl
r = random.choice(rl)
if isinstance(r, Response):
for i in range(len(r.messages) - 1, -1, -1):
if isinstance(r.messages[i], list):
r.messages = [ random.choice(random.choice(r.messages)) ]
elif isinstance(r.messages[i], str):
r.messages = [ random.choice(r.messages) ]
return r
|
nbr23/nemubot
|
modules/rnd.py
|
Python
|
agpl-3.0
| 1,491
| 0.003353
|
from datetime import datetime
import uuid
class Torrent(object):
def __init__(self):
self.tracker = None
self.url = None
self.title = None
self.magnet = None
self.seeders = None
self.leechers = None
self.size = None
self.date = None
self.details = None
self.uuid = uuid.uuid4().hex
self._remove = False
@property
def human_age(self):
if self.date:
age = datetime.now() - self.date
return "%s days" % (int(age.total_seconds()/(60*60*24)))
else:
return "Unknown"
@property
def human_size(self):
if self.size:
if self.size > 1000000000:
return "%.2f GB" % (self.size / 1000000000)
elif self.size > 1000000:
return "%.2f MB" % (self.size/1000000)
else:
return "%s KB" % (self.size/1000)
@property
def html_friendly_title(self):
return self.title.replace('.', '.​').replace('[', '​[').replace(']', ']​')
def __unicode__(self):
return "%s Size: %s Seeders: %s Age: %s %s" % (self.title.ljust(60)[0:60], str(self.human_size).ljust(12),
str(self.seeders).ljust(6), self.human_age,
self.tracker)
def __str__(self):
return self.__unicode__()
|
stopstop/duvet
|
duvet/objects.py
|
Python
|
gpl-3.0
| 1,485
| 0.003367
|
from . import services
def prep_rules(rules):
prepped = []
for rule in rules:
if rule['enabled']:
prepped.append(prep_rule(rule))
return prepped
def prep_rule(raw_rule):
rule = dict(raw_rule)
if rule['service'] != 'custom':
proto, port = services.decode_service(rule['service'])
if not (proto and port):
raise ValueError("Unknown service: {service}".format(
service=rule['service']
))
rule['proto'] = proto
rule['port'] = port
if not rule['comment']:
rule['comment'] = "{service} service ({proto}:{port})".format(
service=rule['service'],
proto=proto,
port=port
)
return rule
|
Kromey/piroute
|
iptables/utils.py
|
Python
|
mit
| 808
| 0.001238
|
"""
The parser:
1. gets and expression
2. parses it
3. handles all boolean logic
4. delegates operator and rvalue parsing to the OperatorMap
SchemaFreeOperatorMap
supports all mongo operators for all fields.
SchemaAwareOperatorMap
1. verifies fields exist.
2. verifies operators are applied to fields of correct type.
currently unsupported:
1. $where - kind of intentionally against injections
2. geospatial
"""
import ast
import bson
import datetime
import dateutil.parser
from calendar import timegm
def parse_date(node):
if hasattr(node, 'n'): # it's a number!
return datetime.datetime.fromtimestamp(node.n)
try:
return dateutil.parser.parse(node.s)
except Exception as e:
raise ParseError('Error parsing date: ' + str(e), col_offset=node.col_offset)
class AstHandler(object):
def get_options(self):
return [f.replace('handle_', '') for f in dir(self) if f.startswith('handle_')]
def resolve(self, thing):
thing_name = thing.__class__.__name__
try:
handler = getattr(self, 'handle_' + thing_name)
except AttributeError:
raise ParseError('Unsupported syntax ({0}).'.format(thing_name,
self.get_options()),
col_offset=thing.col_offset if hasattr(thing, 'col_offset') else None,
options=self.get_options())
return handler
def handle(self, thing):
return self.resolve(thing)(thing)
def parse(self, string):
ex = ast.parse(string, mode='eval')
return self.handle(ex.body)
class ParseError(Exception):
def __init__(self, message, col_offset, options=[]):
super(ParseError, self).__init__(message)
self.message = message
self.col_offset = col_offset
self.options = options
def __str__(self):
if self.options:
return '{0} options: {1}'.format(self.message, self.options)
return self.message
class Parser(AstHandler):
def __init__(self, operator_map):
self._operator_map = operator_map
def get_options(self):
return self._operator_map.get_options()
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_And(self, op):
'''and'''
return '$and'
def handle_Or(self, op):
'''or'''
return '$or'
def handle_UnaryOp(self, op):
operator = self.handle(op.operand)
field, value = list(operator.items())[0]
return {field: {self.handle(op.op): value}}
def handle_Not(self, not_node):
'''not'''
return '$not'
def handle_Compare(self, compare):
if len(compare.comparators) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(compare.comparators)),
col_offset=compare.comparators[1].col_offset)
return self._operator_map.handle(left=compare.left,
operator=compare.ops[0],
right=compare.comparators[0])
class SchemaFreeParser(Parser):
def __init__(self):
super(SchemaFreeParser, self).__init__(SchemaFreeOperatorMap())
class SchemaAwareParser(Parser):
def __init__(self, *a, **k):
super(SchemaAwareParser, self).__init__(SchemaAwareOperatorMap(*a, **k))
class FieldName(AstHandler):
def handle_Str(self, node):
return node.s
def handle_Name(self, name):
return name.id
def handle_Attribute(self, attr):
return '{0}.{1}'.format(self.handle(attr.value), attr.attr)
class OperatorMap(object):
def resolve_field(self, node):
return FieldName().handle(node)
def handle(self, operator, left, right):
field = self.resolve_field(left)
return {field: self.resolve_type(field).handle_operator_and_right(operator, right)}
class SchemaFreeOperatorMap(OperatorMap):
def get_options(self):
return None
def resolve_type(self, field):
return GenericField()
class SchemaAwareOperatorMap(OperatorMap):
def __init__(self, field_to_type):
self._field_to_type = field_to_type
def resolve_field(self, node):
field = super(SchemaAwareOperatorMap, self).resolve_field(node)
try:
self._field_to_type[field]
except KeyError:
raise ParseError('Field not found: {0}.'.format(field),
col_offset=node.col_offset,
options=self._field_to_type.keys())
return field
def resolve_type(self, field):
return self._field_to_type[field]
#---Function-Handlers---#
class Func(AstHandler):
@staticmethod
def get_arg(node, index):
if index > len(node.args) - 1:
raise ParseError('Missing argument in {0}.'.format(node.func.id),
col_offset=node.col_offset)
return node.args[index]
@staticmethod
def parse_arg(node, index, field):
return field.handle(Func.get_arg(node, index))
def handle(self, node):
try:
handler = getattr(self, 'handle_' + node.func.id)
except AttributeError:
raise ParseError('Unsupported function ({0}).'.format(node.func.id),
col_offset=node.col_offset,
options=self.get_options())
return handler(node)
def handle_exists(self, node):
return {'$exists': self.parse_arg(node, 0, BoolField())}
def handle_type(self, node):
return {'$type': self.parse_arg(node, 0, IntField())}
class StringFunc(Func):
def handle_regex(self, node):
result = {'$regex': self.parse_arg(node, 0, StringField())}
try:
result['$options'] = self.parse_arg(node, 1, StringField())
except ParseError:
pass
return result
class IntFunc(Func):
def handle_mod(self, node):
return {'$mod': [self.parse_arg(node, 0, IntField()),
self.parse_arg(node, 1, IntField())]}
class ListFunc(Func):
def handle_size(self, node):
return {'$size': self.parse_arg(node, 0, IntField())}
def handle_all(self, node):
return {'$all': self.parse_arg(node, 0, ListField())}
def handle_match(self, node):
return {'$elemMatch': self.parse_arg(node, 0, DictField())}
class DateTimeFunc(Func):
def handle_date(self, node):
return parse_date(self.get_arg(node, 0))
class IdFunc(Func):
def handle_id(self, node):
return self.parse_arg(node, 0, IdField())
class EpochFunc(Func):
def handle_epoch(self, node):
return self.parse_arg(node, 0, EpochField())
class EpochUTCFunc(Func):
def handle_epoch_utc(self, node):
return self.parse_arg(node, 0, EpochUTCField())
class GeoShapeFuncParser(Func):
def handle_Point(self, node):
return {'$geometry':
{'type': 'Point',
'coordinates': [self.parse_arg(node, 0, IntField()),
self.parse_arg(node, 1, IntField())]}}
def handle_LineString(self, node):
return {'$geometry':
{'type': 'LineString',
'coordinates': self.parse_arg(node, 0, ListField(ListField(IntField())))}}
def handle_Polygon(self, node):
return {'$geometry':
{'type': 'Polygon',
'coordinates': self.parse_arg(node, 0, ListField(ListField(ListField(IntField()))))}}
def handle_box(self, node):
return {'$box': self.parse_arg(node, 0, ListField(ListField(IntField())))}
def handle_polygon(self, node):
return {'$polygon': self.parse_arg(node, 0, ListField(ListField(IntField())))}
def _any_center(self, node, center_name):
return {center_name: [self.parse_arg(node, 0, ListField(IntField())),
self.parse_arg(node, 1, IntField())]}
def handle_center(self, node):
return self._any_center(node, '$center')
def handle_centerSphere(self, node):
return self._any_center(node, '$centerSphere')
class GeoShapeParser(AstHandler):
def handle_Call(self, node):
return GeoShapeFuncParser().handle(node)
def handle_List(self, node):
'''
This is a legacy coordinate pair. consider supporting box, polygon, center, centerSphere
'''
return ListField(IntField()).handle(node)
class GeoFunc(Func):
def _any_near(self, node, near_name):
shape = GeoShapeParser().handle(self.get_arg(node, 0))
result = bson.SON({near_name: shape}) # use SON because mongo expects the command before the arguments
if len(node.args) > 1:
distance = self.parse_arg(node, 1, IntField()) # meters
if isinstance(shape, list): # legacy coordinate pair
result['$maxDistance'] = distance
else:
shape['$maxDistance'] = distance
return result
def handle_near(self, node):
return self._any_near(node, '$near')
def handle_nearSphere(self, node):
return self._any_near(node, '$nearSphere')
def handle_geoIntersects(self, node):
return {'$geoIntersects': GeoShapeParser().handle(self.get_arg(node, 0))}
def handle_geoWithin(self, node):
return {'$geoWithin': GeoShapeParser().handle(self.get_arg(node, 0))}
class GenericFunc(StringFunc, IntFunc, ListFunc, DateTimeFunc,
IdFunc, EpochFunc, EpochUTCFunc, GeoFunc):
pass
#---Operators---#
class Operator(AstHandler):
def __init__(self, field):
self.field = field
def handle_Eq(self, node):
'''=='''
return self.field.handle(node)
def handle_NotEq(self, node):
'''!='''
return {'$ne': self.field.handle(node)}
def handle_In(self, node):
'''in'''
try:
elts = node.elts
except AttributeError:
raise ParseError('Invalid value type for `in` operator: {0}'.format(node.__class__.__name__),
col_offset=node.col_offset)
return {'$in': list(map(self.field.handle, elts))}
def handle_NotIn(self, node):
'''not in'''
try:
elts = node.elts
except AttributeError:
raise ParseError('Invalid value type for `not in` operator: {0}'.format(node.__class__.__name__),
col_offset=node.col_offset)
return {'$nin': list(map(self.field.handle, elts))}
class AlgebricOperator(Operator):
def handle_Gt(self, node):
'''>'''
return {'$gt': self.field.handle(node)}
def handle_Lt(self,node):
'''<'''
return {'$lt': self.field.handle(node)}
def handle_GtE(self, node):
'''>='''
return {'$gte': self.field.handle(node)}
def handle_LtE(self, node):
'''<='''
return {'$lte': self.field.handle(node)}
#---Field-Types---#
class Field(AstHandler):
OP_CLASS = Operator
SPECIAL_VALUES = {'None': None,
'null': None}
def handle_NameConstant(self,node):
try:
return self.SPECIAL_VALUES[str(node.value)]
except KeyError:
raise ParseError('Invalid name: {0}'.format(node.value), node.col_offset, options=list(self.SPECIAL_VALUES))
def handle_Name(self, node):
try:
return self.SPECIAL_VALUES[node.id]
except KeyError:
raise ParseError('Invalid name: {0}'.format(node.id), node.col_offset, options=list(self.SPECIAL_VALUES))
def handle_operator_and_right(self, operator, right):
return self.OP_CLASS(self).resolve(operator)(right)
class GeoField(Field):
def handle_Call(self, node):
return GeoFunc().handle(node)
class AlgebricField(Field):
OP_CLASS = AlgebricOperator
class StringField(AlgebricField):
def handle_Call(self, node):
return StringFunc().handle(node)
def handle_Str(self, node):
return node.s
class IntField(AlgebricField):
def handle_Num(self, node):
return node.n
def handle_Call(self, node):
return IntFunc().handle(node)
class BoolField(Field):
SPECIAL_VALUES = dict(Field.SPECIAL_VALUES,
**{'False': False,
'True': True,
'false': False,
'true': True})
class ListField(Field):
def __init__(self, field=None):
self._field = field
def handle_List(self, node):
return list(map((self._field or GenericField()).handle, node.elts))
def handle_Call(self, node):
return ListFunc().handle(node)
class DictField(Field):
def __init__(self, field=None):
self._field = field
def handle_Dict(self, node):
return dict((StringField().handle(key), (self._field or GenericField()).handle(value))
for key, value in zip(node.keys, node.values))
class DateTimeField(AlgebricField):
def handle_Str(self, node):
return parse_date(node)
def handle_Num(self, node):
return parse_date(node)
def handle_Call(self, node):
return DateTimeFunc().handle(node)
class EpochField(AlgebricField):
def handle_Str(self, node):
return float(parse_date(node).strftime('%s.%f'))
def handle_Num(self, node):
return node.n
def handle_Call(self, node):
return EpochFunc().handle(node)
class EpochUTCField(AlgebricField):
def handle_Str(self, node):
return timegm(parse_date(node).timetuple())
def handle_Num(self, node):
return node.n
def handle_Call(self, node):
return EpochUTCFunc().handle(node)
class IdField(AlgebricField):
def handle_Str(self, node):
return bson.ObjectId(node.s)
def handle_Call(self, node):
return IdFunc().handle(node)
class GenericField(IntField, BoolField, StringField, ListField, DictField, GeoField):
def handle_Call(self, node):
return GenericFunc().handle(node)
|
alonho/pql
|
pql/matching.py
|
Python
|
bsd-3-clause
| 14,159
| 0.00678
|
from __future__ import unicode_literals
from . import exceptions
DEFAULT_HOST = 'http://api.acoustid.org/'
FORMATS = ('json', 'jsonp', 'xml')
META = (
'recordings', 'recordingids', 'releases', 'releaseids',
'releasegroups', 'releasegroupids', 'tracks', 'compress',
'usermeta', 'sources'
)
ERRORS = {
1: exceptions.UnknownFormat,
2: exceptions.MissingParameter,
3: exceptions.InvalidFingerprint,
4: exceptions.InvalidClientKey,
5: exceptions.InternalError,
6: exceptions.InvalidUserApiKey,
7: exceptions.InvalidUUID,
8: exceptions.InvalidDuration,
9: exceptions.InvalidBitrate,
10: exceptions.InvalidForeignID,
11: exceptions.InvalidMaxDurationDiff,
12: exceptions.NotAllowed,
13: exceptions.ServiceUnavailable,
14: exceptions.TooManyRequests,
}
|
mattdennewitz/python-acoustid-api
|
acoustid_api/consts.py
|
Python
|
mit
| 819
| 0
|
import sys
import time
from naoqi import ALProxy
IP = "nao.local"
PORT = 9559
if (len(sys.argv) < 2):
print "Usage: 'python RecordAudio.py nume'"
sys.exit(1)
fileName = "/home/nao/" + sys.argv[1] + ".wav"
aur = ALProxy("ALAudioRecorder", IP, PORT)
channels = [0,0,1,0]
aur.startMicrophonesRecording(fileName, "wav", 160000, channels)
c=raw_input("Sfarsit?")
aur.stopMicrophonesRecording()
c=raw_input("play?")
aup = ALProxy("ALAudioPlayer", IP, PORT)
#Launchs the playing of a file
aup.playFile(fileName,0.5,-1.0)
c=raw_input("gata?")
#Launchs the playing of a file
#aup.playFile("/usr/share/naoqi/wav/random.wav")
#Launchs the playing of a file on the left speaker to a volume of 50%
#aup.playFile("/usr/share/naoqi/wav/random.wav",0.5,-1.0)
|
ioanaantoche/muhaha
|
ioana/RecordAudio.py
|
Python
|
gpl-2.0
| 757
| 0.018494
|
import os, sys, shutil
import zipfile
from zipfile import ZipFile
from urllib import urlretrieve
from subprocess import Popen, PIPE
from distutils.cmd import Command
def zip_directory(dir, zip_file):
zip = ZipFile(zip_file, 'w', compression=zipfile.ZIP_DEFLATED)
root_len = len(os.path.abspath(dir))
for root, dirs, files in os.walk(dir):
archive_root = os.path.abspath(root)[root_len:]
for f in files:
fullpath = os.path.join(root, f)
archive_name = os.path.join(archive_root, f)
zip.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
class WindowsPortableBuild(Command):
description = "custom build command that builds portable win32 package"
user_options = [
('dist-dir=', None,
"path of dist directory to use for building portable pymt, the end result will be output to this driectory. default to cwd."),
('deps-url=', None,
"url of binary dependancies for portable pymt package default: http://pymt.googlecode.com/files/portable-deps-win32.zip"),
('no-cext', None,
"flag to disable building of c extensions"),
('no-mingw', None,
"flag to disable bundling of mingw compiler for compiling c/cython extensions")
]
def initialize_options(self):
self.dist_dir = None
self.deps_url = None
self.no_cext = None
self.no_mingw = None
def finalize_options(self):
if not self.deps_url:
self.deps_url = 'http://pymt.googlecode.com/files/portable-deps-win32.zip'
if not self.dist_dir:
self.dist_dir = os.getcwd()
self.src_dir = os.path.dirname(sys.modules['__main__'].__file__)
self.dist_name = self.distribution.get_fullname() # e.g. PyMT-0.5 (name and verison passed to setup())
self.build_dir = os.path.join(self.dist_dir, self.dist_name+'-w32')
def run(self):
print "---------------------------------"
print "Building PyMT Portable for Win 32"
print "---------------------------------"
print "\nPreparing Build..."
print "---------------------------------------"
if os.path.exists(self.build_dir):
print "*Cleaning old build dir"
shutil.rmtree(self.build_dir, ignore_errors=True)
print "*Creating build directory:"
print " "+self.build_dir
os.makedirs(self.build_dir)
print "\nGetting binary dependencies..."
print "---------------------------------------"
print "*Downloading:", self.deps_url
#report_hook is called every time a piece of teh file is downloaded to print progress
def report_hook(block_count, block_size, total_size):
p = block_count*block_size*100.0/total_size
print "\b\b\b\b\b\b\b\b\b", "%06.2f"%p +"%",
print " Progress: 000.00%",
urlretrieve(self.deps_url, #location of binary dependencioes needed for portable pymt
os.path.join(self.build_dir,'deps.zip'), #tmp file to store teh archive
reporthook=report_hook)
print " [Done]"
print "*Extracting binary dependencies..."
zf = ZipFile(os.path.join(self.build_dir,'deps.zip'))
zf.extractall(self.build_dir)
zf.close()
if self.no_mingw:
print "*Excluding MinGW from portable distribution (--no-mingw option is set)"
shutil.rmtree(os.path.join(self.build_dir, 'MinGW'), ignore_errors=True)
print "\nPutting pymt into portable environment"
print "---------------------------------------"
print "*Building pymt source distribution"
sdist_cmd = [sys.executable, #path to python.exe
os.path.join(self.src_dir,'setup.py'), #path to setup.py
'sdist', #make setup.py create a src distribution
'--dist-dir=%s'%self.build_dir] #put it into build folder
Popen(sdist_cmd, stdout=PIPE, stderr=PIPE).communicate()
print "*Placing pymt source distribution in portable context"
src_dist = os.path.join(self.build_dir,self.dist_name)
zf = ZipFile(src_dist+'.zip')
zf.extractall(self.build_dir)
zf.close()
if self.no_mingw or self.no_cext:
print "*Skipping C Extension build (either --no_cext or --no_mingw option set)"
else:
print "*Compiling C Extensions inplace for portable distribution"
cext_cmd = [sys.executable, #path to python.exe
'setup.py',
'build_ext', #make setup.py create a src distribution
'--inplace'] #do it inplace
#this time it runs teh setup.py inside the source distribution
#thats has been generated inside the build dir (to generate ext
#for teh target, instead of the source were building from)
Popen(cext_cmd, cwd=src_dist, stdout=PIPE, stderr=PIPE).communicate()
print "\nFinalizing pymt portable distribution..."
print "---------------------------------------"
print "*Copying scripts and resources"
#copy launcher script and readme to portable root dir/build dir
pymt_bat = os.path.join(src_dist,'pymt','tools','packaging','win32', 'pymt.bat')
shutil.copy(pymt_bat, os.path.join(self.build_dir, 'pymt.bat'))
readme = os.path.join(src_dist,'pymt','tools','packaging','win32', 'README.txt')
shutil.copy(readme, os.path.join(self.build_dir, 'README.txt'))
#rename pymt directory to "pymt"
os.rename(src_dist, os.path.join(self.build_dir,'pymt'))
print "*Removing intermediate file"
os.remove(os.path.join(self.build_dir,'deps.zip'))
os.remove(os.path.join(self.build_dir,src_dist+'.zip'))
print "*Compressing portable distribution target"
target = os.path.join(self.dist_dir, self.dist_name+"-w32.zip")
zip_directory(self.build_dir, target)
print "*Writing target:", target
print "*Removing build dir"
shutil.rmtree(self.build_dir, ignore_errors=True)
|
nuigroup/pymt-widgets
|
pymt/tools/packaging/win32/build.py
|
Python
|
lgpl-3.0
| 6,313
| 0.010771
|
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
''' Nuage specific exceptions '''
from neutron.common import exceptions as n_exc
class OperationNotSupported(n_exc.InvalidConfigurationOption):
message = _("Nuage Plugin does not support this operation: %(msg)s")
class NuageBadRequest(n_exc.BadRequest):
message = _("Bad request: %(msg)s")
|
samsu/neutron
|
plugins/nuage/common/exceptions.py
|
Python
|
apache-2.0
| 919
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-26 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Occupancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room_name', models.CharField(max_length=255)),
('occupancy', models.IntegerField()),
('timestamp', models.DateField()),
],
),
]
|
christianknu/eitu
|
eitu/migrations/0001_initial.py
|
Python
|
mit
| 662
| 0.001511
|
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", user_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
if subm.grading.means_passed:
return green_label
else:
return red_label
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.html')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline_timeout(assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading.html')
def grading(submission):
return {'submission': submission}
|
troeger/opensubmit
|
web/opensubmit/templatetags/projecttags.py
|
Python
|
agpl-3.0
| 1,869
| 0.000535
|
"""
This script is a starting point for new Blocks users already familiar with
Machine Learning and Theano.
We demonstrate how to use blocks to train a generic set of parameters (theano
shared variables) that influence some arbitrary cost function (theano
symbolic variable), so you can start using blocks features (e.g. monitoring,
extensions, training algorithms) with your Theano code today.
To run an experiment, we simply construct a main_loop.MainLoop and call its
run() method. It suffices to pass the MainLoop a blocks.model.Model
(which needs the cost), a blocks.algorithms.TrainingAlgorithm (which needs the
cost and parameters), and a fuel.streams.DataStream*
As it is the script will run indefinitely, with no output. You can interrupt
training training anytime with Ctrl+C, or termination conditions can be added
via extensions.
*The DataStream object is part of the partner library Fuel
(https://github.com/mila-udem/fuel).
"""
import numpy
np = numpy
import theano
import theano.tensor as T
# (Here we make a toy dataset of two 2D gaussians with different means.)
num_examples = 1000
batch_size = 100
means = np.array([[-1., -1.], [1, 1]])
std = 0.5
labels = np.random.randint(size=num_examples, low=0, high=1)
features = means[labels, :] + std * np.random.normal(size=(num_examples, 2))
labels = labels.reshape((num_examples, 1)).astype(theano.config.floatX)
features = features.astype(theano.config.floatX)
# Define "data_stream"
from collections import OrderedDict
from fuel.datasets import IndexableDataset
# The names here (e.g. 'name1') need to match the names of the variables which
# are the roots of the computational graph for the cost.
dataset = IndexableDataset(
OrderedDict([('name1', features), ('name2', labels)]))
from fuel.streams import DataStream, ForceFloatX
from fuel.schemes import SequentialScheme
data_stream = ForceFloatX(DataStream(dataset,
iteration_scheme=SequentialScheme(
dataset.num_examples, batch_size)))
# Define "cost" and "parameters"
# (We use logistic regression to classify points by distribution)
inputs = T.matrix('name1')
targets = T.matrix('name2')
ninp, nout = 2, 1
W = theano.shared(.01*np.random.uniform(
size=((ninp, nout))).astype(theano.config.floatX))
b = theano.shared(np.zeros(nout).astype(theano.config.floatX))
output = T.nnet.sigmoid(T.dot(inputs, W) + b)
# a theano symbolic expression
cost = T.mean(T.nnet.binary_crossentropy(output, targets))
# a list of theano.shared variables
parameters = [W, b]
# wrap everything in Blocks objects and run!
from blocks.model import Model
model = Model([cost])
from blocks.algorithms import GradientDescent, Scale
algorithm = GradientDescent(cost=cost,
parameters=parameters,
step_rule=Scale(learning_rate=.01))
from blocks.main_loop import MainLoop
my_loop = MainLoop(model=model,
data_stream=data_stream,
algorithm=algorithm)
my_loop.run()
|
capybaralet/Blocks_quickstart
|
basic_blocks_script.py
|
Python
|
mit
| 3,082
| 0.00292
|
# -----------------------------------------------------------------------------
# File name: main.py #
# Date created: 3/20/2014 #
# Date last modified: 1/18/2015 #
# #
# Author: Tony Wu (Xiangbo) #
# Email: xb.wu@mail.utoronto.ca #
# #
# Python version: developed under 3.4, additionally tested under 2.7 #
# Dependencies: Pygame 1.9.2, rsclasses.py #
# #
# License: GNU GPL v2.0 #
# #
# Copyright (c) 2014-2015 [Tony Wu], All Right Reserved #
# -----------------------------------------------------------------------------
if __name__ == "__main__":
import pygame
import sys
import time
import pygame.mixer
from math import *
from pygame.locals import *
pygame.init()
pygame.mixer.init(frequency=44100, size=-16, channels=2, buffer=4096)
# rscalsses.py must be present
from rsclasses import *
# Constants - use default value unless debugging
HORI_RES = 800 # Horizontal Resolution
VERT_RES = 600 # Vertical Resolution
FONT = "timesnewroman" # Game font
FPS = 60 # Frames-per-second
# The following image asset files must present
bg = "background.jpg"
wstar = "whitestar.png"
rstar = "redstar.png"
ystar = "yellowstar.png"
bstar = "bluestar.png"
bkship1 = "minienemy1.png"
pship = "pship.png"
pshipfl = "pshipfirelaser.png"
pshipfly = "pshipfly.png"
pshipflyback = "pshipflyback.png"
pro1 = "projectile1.png"
pro1f = "projectile1flash.png"
las1 = "laser1.png"
lasr = "laserred.png"
em1 = "enemy1.png"
em2 = "enemy2.png"
em3 = "enemy3.png"
em3f = "enemy3fire.png"
em4 = "enemy4.png"
ex1 = "explosion.png"
bs1 = "boss1.png"
bs2 = "boss2.png"
bs2shoot = "boss2shoot.png"
bs3 = "boss3.png"
bs4 = "boss4.png"
bs4r = "boss4ram.png"
bf = "bossfinalyellow.png"
bfr = "bossfinal.png"
isplash = "introsplash.jpg"
isplash2 = "poweredbysource.jpg"
sscreen = "startscreen.jpg"
hscreen = "helpscreen.jpg"
b1w = "boss1red.png"
b2w = "boss2red.png"
b2sw = "boss2shootred.png"
b3w = "boss3red.png"
b4w = "boss4red.png"
hbar = "healthbar.png"
ebar = "energybar.png"
eunit = "energyunit.png"
eunitred = "energyunitred.png"
efire = "enginefire.png"
efireb = "enginefireblue.png"
menus = "menuselector.png"
menusf = "menuselectorflash.png"
creds = "creditscreen.jpg"
dscreen = "deathscreen.jpg"
efl = "enginefirelow.png"
wscrn = "winscreen.png"
# The following sound asset files must present
introsound = pygame.mixer.Sound("introlow.wav")
menutheme = pygame.mixer.Sound("menutheme.wav")
bossfight = pygame.mixer.Sound("bossfight.wav")
boss2fight = pygame.mixer.Sound("boss2theme.wav")
explosionS = pygame.mixer.Sound("explosion.wav")
laserFX = pygame.mixer.Sound("laserfx.wav")
leveltheme = pygame.mixer.Sound("leveltheme.wav")
boss3fight = pygame.mixer.Sound("boss3theme.wav")
boss4fight = pygame.mixer.Sound("boss4theme.wav")
bombFX = pygame.mixer.Sound("nuke.wav")
explosionS.set_volume(0.15)
laserFX.set_volume(1.0)
# Setting up game window
screen = pygame.display.set_mode((HORI_RES, VERT_RES), 0, 32)
# Setting up fonts
stdfont = pygame.font.SysFont(FONT, 24)
stdfont_bold = pygame.font.SysFont(FONT, 24)
stdfont_bold.set_bold(True)
# Generating pygame surfaces
# Stars
background = pygame.image.load(bg).convert()
whitestar = pygame.image.load(wstar).convert_alpha()
redstar = pygame.image.load(rstar).convert_alpha()
yellowstar = pygame.image.load(ystar).convert_alpha()
bluestar = pygame.image.load(bstar).convert_alpha()
# Ships and projectiles
backgroundship1 = pygame.image.load(bkship1).convert_alpha()
playership = pygame.image.load(pship).convert_alpha()
playershipfirelaser = pygame.image.load(pshipfl).convert_alpha()
playershipfly = pygame.image.load(pshipfly).convert_alpha()
playershipflyback = pygame.image.load(pshipflyback).convert_alpha()
rocket = pygame.image.load(pro1).convert_alpha()
rocketflash = pygame.image.load(pro1f).convert_alpha()
enemy1 = pygame.image.load(em1).convert_alpha()
enemy2 = pygame.image.load(em2).convert_alpha()
enemy3 = pygame.image.load(em3).convert_alpha()
enemy3fire = pygame.image.load(em3f).convert_alpha()
enemy4 = pygame.image.load(em4).convert_alpha()
explosion = pygame.image.load(ex1).convert_alpha()
boss1 = pygame.image.load(bs1).convert_alpha()
boss2 = pygame.image.load(bs2).convert_alpha()
boss2shoot = pygame.image.load(bs2shoot).convert_alpha()
boss3 = pygame.image.load(bs3).convert_alpha()
boss4 = pygame.image.load(bs4).convert_alpha()
boss4ram = pygame.image.load(bs4r).convert_alpha()
bossfinal = pygame.image.load(bf).convert_alpha()
bossfinalred = pygame.image.load(bfr).convert_alpha()
introsplash = pygame.image.load(isplash).convert()
introsplash2 = pygame.image.load(isplash2).convert()
startscreen = pygame.image.load(sscreen).convert()
helpscreen = pygame.image.load(hscreen).convert()
boss1white = pygame.image.load(b1w).convert_alpha()
boss2white = pygame.image.load(b2w).convert_alpha()
boss2shootwhite = pygame.image.load(b2sw).convert_alpha()
boss3white = pygame.image.load(b3w).convert_alpha()
boss4white = pygame.image.load(b4w).convert_alpha()
laser1 = pygame.image.load(las1).convert_alpha()
laserred = pygame.image.load(lasr).convert_alpha()
laserredver = pygame.transform.rotate(pygame.image.load(lasr).
convert_alpha(), 90)
enginefire = pygame.image.load(efire).convert_alpha()
enginefireblue = pygame.image.load(efireb).convert_alpha()
enginefirebig = pygame.transform.scale2x(enginefire).convert_alpha()
enginefirelow = pygame.image.load(efl).convert_alpha()
# In-game UI
ui_healthbar = pygame.image.load(hbar).convert_alpha()
ui_energybar = pygame.image.load(ebar).convert_alpha()
ui_energyunit = pygame.image.load(eunit).convert_alpha()
ui_energyunitred = pygame.image.load(eunitred).convert_alpha()
# Menu UI
ui_menuselector = pygame.image.load(menus).convert_alpha()
ui_menuselectorflash = pygame.image.load(menusf).convert_alpha()
creditscreen = pygame.image.load(creds).convert()
deathscreen = pygame.image.load(dscreen).convert()
winscreen = pygame.image.load(wscrn).convert()
clock = pygame.time.Clock()
pause = False
a = cstar(30, HORI_RES, VERT_RES)
laser = claser()
# For movement
wkey = False
akey = False
skey = False
dkey = False
win = False
# For missile weapon
isfire = False
# For laser weapon
islaser = False
timer = -400
(ex, ey) = (0, 0) # Used for temp store of explosion locations
score = 0 # Player's score
hitinframe = False # Used to trigger collision warning
collidelabeldelay = 0
stage = 0
# 1 -- FIRST WAVE
# 2 -- BOSS (#1)
# 3 -- SECOND WAVE
# 4 -- BOSS (#2)
# 5 -- THIRD WAVE
# 6 -- BOSS (#3)
# 7 -- FOURTH WAVE
# 8 -- BOSS (#4)
quota = 0
flash = 0
# introsplash
pygame.display.set_caption("REDSHIFT v1.1")
introsound.set_volume(1.0)
introsound.play()
for i in range(0, 50):
screen.blit(background, (0, 0))
pygame.display.update()
clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
for i in range(0, 240):
screen.blit(introsplash, (0, 0))
pygame.display.update()
clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
for i in range(0, 45):
screen.blit(background, (0, 0))
pygame.display.update()
clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
for i in range(0, 180):
screen.blit(introsplash2, (0, 0))
pygame.display.update()
clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
for i in range(0, 30):
screen.blit(background, (0, 0))
pygame.display.update()
clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
lab_start = stdfont_bold.render("START GAME", True, (0, 160, 80))
lab_help = stdfont_bold.render("VIEW HELP", True, (0, 160, 80))
lab_credits = stdfont_bold.render("CREDITS", True, (0, 160, 80))
lab_start_red = stdfont_bold.render("START GAME", True, (200, 80, 80))
lab_help_red = stdfont_bold.render("VIEW HELP", True, (200, 80, 80))
lab_credits_red = stdfont_bold.render("CREDITS", True, (200, 80, 80))
start = False
men = C_Menu(1, 1, 0)
menutheme.set_volume(0.15)
menutheme.play()
# A trigger variable for starting the game
gamestart = False
while not start:
# This loop runs while in the main menu
clock.tick(FPS)
men.timer += 1
if men.location == 1:
screen.blit(startscreen, (0, 0))
if men.location == 2:
screen.blit(helpscreen, (0, 0))
if men.location == 3:
screen.blit(creditscreen, (0, 0))
if men.location == 1:
if men.point == 1:
if men.timer % 6 < 3:
screen.blit(ui_menuselector, (305, 330))
else:
screen.blit(ui_menuselectorflash, (305, 330))
if men.point == 2:
if men.timer % 6 < 3:
screen.blit(ui_menuselector, (305, 380))
else:
screen.blit(ui_menuselectorflash, (305, 380))
if men.point == 3:
if men.timer % 6 < 3:
screen.blit(ui_menuselector, (305, 430))
else:
screen.blit(ui_menuselectorflash, (305, 430))
if men.location == 1:
if men.point == 1:
screen.blit(lab_start_red, (335, 330))
else:
screen.blit(lab_start, (335, 330))
if men.point == 2:
screen.blit(lab_help_red, (342, 380))
else:
screen.blit(lab_help, (342, 380))
if men.point == 3:
screen.blit(lab_credits_red, (359, 430))
else:
screen.blit(lab_credits, (359, 430))
pygame.display.update()
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_DOWN:
if men.point < 3:
men.point += 1
laserFX.play()
if event.key == K_UP:
if men.point > 1:
men.point -= 1
laserFX.play()
if event.key == K_RETURN:
if men.point == 1:
start = True
menutheme.stop()
leveltheme.play()
if men.point == 2:
men.location = 2
if men.point == 3:
men.location = 3
if event.key == K_ESCAPE:
if men.location == 2 or men.location == 3:
men.location = 1
if event.type == QUIT:
pygame.quit()
sys.exit()
playerobj = cplayer(100, 250, 100, 100)
# Initializing messages
getreadymessage1 = stdfont.render("The Battle Approaches...",
True, (255, 255, 255))
getreadymessage = stdfont.render("PREPARE YOURSELF!", True, (255, 255, 0))
endmsg1 = stdfont.render("People die in war.", True, (0, 0, 0))
endmsg2 = stdfont.render("Good night, sweet prince.", True, (0, 0, 0))
# Initializing enemy objects
e1 = cenemy1(HORI_RES, VERT_RES, 5)
e2 = cenemy2(HORI_RES, VERT_RES, 3)
e3 = cenemy3(HORI_RES, VERT_RES)
b1 = cboss1(HORI_RES, VERT_RES)
b2 = cboss2(HORI_RES, VERT_RES)
e4 = cenemy4(HORI_RES, VERT_RES, 2)
# Explosion object
expobj = C_Explosion()
# Detector
detect = cDetect()
# Main game loop
while True:
scorelabel = stdfont.render("Hits: " + str(score), True,
(255, 255, 255))
healthlabel = stdfont.render("Health: " + str(playerobj.health), True,
(255, 255, 255))
energylabel = stdfont.render("Energy: " + str(playerobj.energy), True,
(225, 225, 255))
collidelabel = stdfont_bold.render("Collision Detected!", True,
(225, 0, 0))
clock.tick(FPS)
timer += 1
if flash < 11:
flash += 1
# -----------------------------------------------------
# start of stage control code
# Starting the game after the initial delay
if stage == 0 and timer > 0:
stage = 1
# Refreshing enemy 1 during STAGE 1 and STAGE 3
# 2nd and 3rd instance of enemy type 1
if stage == 1:
if timer > 0:
if not e1.death[0]:
e1.refresh(0)
if e1.xpos[0] < -40:
e1.respawn(0)
if timer > 60:
if not e1.death[1]:
e1.refresh(1)
if e1.xpos[1] < -40:
e1.respawn(1)
if timer > 80:
if not e1.death[2]:
e1.refresh(2)
if e1.xpos[2] < -40:
e1.respawn(2)
if timer > 100:
if not e1.death[3]:
e1.refresh(3)
if e1.xpos[3] < -40:
e1.respawn(3)
if timer > 120:
if not e1.death[4]:
e1.refresh(4)
if e1.xpos[4] < -40:
e1.respawn(4)
# When kill quota is reached in stage 1
if stage == 1 and quota > 10:
for ennum in range(0, 5):
expobj.addexplosion(e1.xpos[ennum], e1.ypos[ennum])
quota = 0
b1 = cboss1(HORI_RES, VERT_RES)
stage = 2
leveltheme.stop()
bossfight.play()
# When boss1 dies during STAGE 2
if stage == 2:
b1.refresh()
if b1.health < 1:
stage = 3
e1.respawn(0)
e2.respawn(0)
delay1 = 0
bossfight.stop()
leveltheme.play()
# Refreshing enemy2 during STAGE 3
if stage == 3:
if not e1.death[0]:
e1.refresh(0)
if e1.xpos[0] < -40:
e1.respawn(0)
if not e2.death[0]:
e2.refresh(0)
if e2.xpos[0] < -40:
e2.respawn(0)
if delay1 < 76:
delay1 += 1
if delay1 == 50:
e2.respawn(1)
if delay1 == 75:
e2.respawn(2)
delay1 = 76
if delay1 > 50:
if not e2.death[1]:
e2.refresh(1)
if e2.xpos[1] < -40:
e2.respawn(1)
if delay1 > 75:
if not e2.death[2]:
e2.refresh(2)
if e2.xpos[2] < -40:
e2.respawn(2)
# When kill quota is reached during STAGE 3
if stage == 3 and quota > 10:
expobj.addexplosion(e1.xpos[0], e1.ypos[0])
for ennum in range(0, 3):
expobj.addexplosion(e2.xpos[ennum], e2.ypos[ennum])
quota = 0
b2 = cboss2(HORI_RES, VERT_RES)
stage = 4
leveltheme.stop()
boss2fight.play()
# Refreshing boss2 during STAGE 4
if stage == 4:
b2.refresh()
if b2.health < 1:
stage = 5
quota = 0
e3.respawn()
e2.respawn(0)
boss2fight.stop()
leveltheme.play()
if stage == 5:
if not e3.death:
e3.refresh()
if e3.xpos < -40:
e3.respawn()
if not e2.death[0]:
e2.refresh(0)
if e2.xpos[0] < -40:
e2.respawn(0)
if quota > 10:
stage = 6
quota = 0
b3 = cboss3(HORI_RES, VERT_RES)
leveltheme.stop()
boss3fight.play()
if stage == 6:
b3.refresh()
if b3.health < 1:
quota = 0
stage = 7
delay1 = 0
e4.respawn(0)
boss3fight.stop()
leveltheme.play()
if stage == 7:
if delay1 < 50:
delay1 += 1
if delay1 == 50:
e4.respawn(1)
delay1 = 51
if not e4.death[0]:
e4.refresh(0, playerobj.ypos)
if e4.xpos[0] < -40:
e4.respawn(0)
if delay1 == 51:
if not e4.death[1]:
e4.refresh(1, playerobj.ypos)
if e4.xpos[1] < -40:
e4.respawn(1)
if quota > 10:
stage = 8
quota = 0
b4 = cboss4(HORI_RES, VERT_RES)
leveltheme.stop()
boss4fight.play()
if stage == 8:
b4.refresh()
if b4.health < 1:
stage = 9
quota = 0
e2.respawn(0)
e3.respawn()
e4.respawn(0)
boss4fight.stop()
leveltheme.play()
if stage == 9:
if not e2.death[0]:
e2.refresh(0)
if not e3.death:
e3.refresh()
if not e4.death[0]:
e4.refresh(0, playerobj.ypos)
if e2.xpos[0] < -40:
e2.respawn(0)
if e3.xpos < -40:
e3.respawn()
if e4.xpos[0] < -40:
e4.respawn(0)
if quota > 10:
stage = 10
quota = 0
b5 = cbossf(HORI_RES, VERT_RES)
bombFX.play()
if stage == 10:
b5.refresh()
if b5.done:
win = True
# End of stage control code
# -----------------------------------------------------
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_w:
wkey = True
if event.key == K_s:
skey = True
if event.key == K_a:
akey = True
if event.key == K_d:
dkey = True
if (event.key == K_SPACE) and (not isfire):
r = cmissile(playerobj.xpos, playerobj.ypos)
isfire = True
if event.key == K_p:
if playerobj.energy > 0:
laser.show = True
islaser = True
if event.type == KEYUP:
if event.key == K_w:
wkey = False
if event.key == K_s:
skey = False
if event.key == K_a:
akey = False
if event.key == K_d:
dkey = False
if event.key == K_p:
laser.show = False
islaser = False
a.refresh()
if playerobj.energy < 1:
laser.show = False
islaser = False
if (not islaser) and playerobj.energy < 100:
if timer % 3 == 0:
playerobj.energy += 1
if isfire:
r.refresh()
screen.blit(background, (0, 0))
for n in range(0, 18):
screen.blit(whitestar, (a.xcors[n], a.ycors[n]))
for n in range(18, 21):
screen.blit(redstar, (a.xcors[n], a.ycors[n]))
for n in range(21, 24):
screen.blit(yellowstar, (a.xcors[n], a.ycors[n]))
for n in range(24, 28):
screen.blit(bluestar, (a.xcors[n], a.ycors[n]))
for n in range(28, 30):
screen.blit(backgroundship1, (a.xcors[n], a.ycors[n]))
# Ship movement
if akey:
playerobj.moveleft()
if dkey:
playerobj.moveright()
if wkey:
playerobj.moveup()
if skey:
playerobj.movedown()
# Laser following
laser.refresh(playerobj.xpos, playerobj.ypos+13)
if (laser.show):
if (timer % 6 == 0 or timer % 6 == 1 or timer % 6 == 2):
screen.blit(laser1, (laser.xpos, laser.ypos))
if (timer % 6 == 0):
laserFX.play()
# When missile hits
if (isfire):
if (r.death):
isfire = False
# Missiles is fired/Collision detection
if (isfire or laser.show) and stage > 0:
if isfire:
if (timer % 4 < 2):
screen.blit(rocket, (r.xpos, r.ypos))
else:
screen.blit(rocketflash, (r.xpos, r.ypos))
if islaser:
playerobj.energy -= 1
if stage == 1:
if isfire:
for ennum in range(0, 5):
if abs(r.xpos - e1.xpos[ennum]) < 20 and \
abs(r.ypos - e1.ypos[ennum] - 10) < 20:
e1.health[ennum] -= 10
if islaser:
for ennum in range(0, 5):
if abs(laser.ypos - e1.ypos[ennum] - 10) < 20:
e1.health[ennum] -= 1
for ennum in range(0, 5):
if e1.health[ennum] < 1:
expobj.addexplosion(e1.xpos[ennum], e1.ypos[ennum])
e1.death[ennum] = True
if isfire:
r.death = True
score += 1
quota += 1
if stage == 1:
e1.respawn(ennum)
e1.death[ennum] = False
if stage == 2:
if isfire:
if abs(r.xpos - b1.xpos) < 20 and \
abs(r.ypos - b1.ypos - 28) < 30:
b1.health -= 1
r.death = True
score += 1
flash = 0
expobj.addexplosion(b1.xpos, b1.ypos)
if stage == 3:
if isfire:
for ennum in range(0, 3):
if abs(r.xpos - e2.xpos[ennum]) < 20 and \
abs(r.ypos - e2.ypos[ennum] - 10) < 20:
e2.health[ennum] -= 15
if abs(r.xpos - e1.xpos[0]) < 20 and \
abs(r.ypos - e1.ypos[0] - 10) < 20:
e1.health[0] -= 10
if islaser:
for ennum in range(0, 3):
if abs(laser.ypos - e2.ypos[ennum] - 10) < 20:
e2.health[ennum] -= 1
if abs(laser.ypos - e1.ypos[0] - 10) < 20:
e1.health[0] -= 1
if e1.health[0] < 1:
expobj.addexplosion(e1.xpos[0], e1.ypos[0])
e1.death[0] = True
if isfire:
r.death = True
score += 1
quota += 1
if stage == 3:
e1.respawn(0)
e1.death[0] = False
for ennum in range(0, 3):
if e2.health[ennum] < 1:
expobj.addexplosion(e2.xpos[ennum], e2.ypos[ennum])
e2.death[ennum] = True
r.death = True
score += 1
quota += 1
if stage == 3:
e2.respawn(ennum)
e2.death[ennum] = False
if stage == 4:
if isfire:
if abs(r.xpos - b2.xpos) < 20 and \
abs(r.ypos - b2.ypos - 28) < 30:
b2.health -= 1
r.death = True
score += 1
flash = 0
expobj.addexplosion(b2.xpos, b2.ypos)
if stage == 5:
if isfire:
if abs(r.xpos - e3.xpos) < 20 and \
abs(r.ypos - e3.ypos - 10) < 20:
e3.health -= 10
if abs(r.xpos - e2.xpos[0]) < 20 and \
abs(r.ypos - e2.ypos[0] - 10) < 20:
e2.health[0] -= 15
if islaser:
if abs(laser.ypos - e3.ypos - 10) < 20:
e3.health -= 1
if abs(laser.ypos - e2.ypos[0] - 10) < 20:
e2.health[0] -= 1
if e3.health < 1:
expobj.addexplosion(e3.xpos, e3.ypos)
e3.death = True
r.death = True
score += 1
quota += 1
if stage == 5:
e3.respawn()
e3.death = False
if e2.health[0] < 1:
expobj.addexplosion(e2.xpos[0], e2.ypos[0])
e2.death[0] = True
r.death = True
score += 1
quota += 1
if stage == 5:
e2.respawn(0)
e2.death[0] = False
if stage == 6:
if isfire:
if abs(r.xpos - b3.xpos) < 40 and \
abs(r.ypos - b3.ypos - 70) < 40:
b3.health -= 1
r.death = True
score += 1
flash = 0
expobj.addexplosion(b3.xpos, b3.ypos + 30)
if stage == 7:
if isfire:
if abs(r.xpos - e4.xpos[0]) < 20 and \
bs(r.ypos - e4.ypos[0] - 10) < 20:
e4.health[0] -= 10
if abs(r.xpos - e4.xpos[1]) < 20 and \
abs(r.ypos - e4.ypos[1] - 10) < 20:
e4.health[1] -= 10
if islaser:
if abs(laser.ypos - e4.ypos[0] - 10) < 20:
e4.health[0] -= 1
if abs(laser.ypos - e4.ypos[1] - 10) < 20:
e4.health[1] -= 1
if e4.health[0] < 1:
expobj.addexplosion(e4.xpos[0], e4.ypos[0])
e4.death[0] = True
if isfire:
r.death = True
score += 1
quota += 1
if stage == 7:
e4.respawn(0)
e4.death[0] = False
if e4.health[1] < 1:
expobj.addexplosion(e4.xpos[1], e4.ypos[1])
e4.death[1] = True
if isfire:
r.death = True
score += 1
quota += 1
if stage == 7:
e4.respawn(1)
e4.death[1] = False
if stage == 8:
if isfire:
if abs(r.xpos - b4.xpos) < 40 and \
abs(r.ypos - b4.ypos - 70) < 40:
b4.health -= 1
r.death = True
score += 1
flash = 0
expobj.addexplosion(b4.xpos, b4.ypos + 30)
if stage == 9:
if isfire:
if abs(r.xpos - e2.xpos[0]) < 20 and \
abs(r.ypos - e2.ypos[0] - 10) < 20:
e2.health[0] -= 15
if abs(r.xpos - e3.xpos) < 20 and \
abs(r.ypos - e3.ypos - 10) < 20:
e3.health -= 10
if abs(r.xpos - e4.xpos[0]) < 20 and \
abs(r.ypos - e4.ypos[0] - 10) < 20:
e4.health[0] -= 10
if islaser:
if abs(laser.ypos - e2.ypos[0] - 10) < 20:
e2.health[0] -= 1
if abs(laser.ypos - e3.ypos - 10) < 20:
e3.health -= 1
if abs(laser.ypos - e4.ypos[0] - 10) < 20:
e4.health[0] -= 1
if e2.health[0] < 1:
expobj.addexplosion(e2.xpos[0], e2.ypos[0])
e2.death[0] = True
r.death = True
score += 1
quota += 1
if stage == 9:
e2.respawn(0)
e2.death[0] = False
if e4.health[0] < 1:
expobj.addexplosion(e4.xpos[0], e4.ypos[0])
e4.death[0] = True
if isfire:
r.death = True
score += 1
quota += 1
if stage == 9:
e4.respawn(0)
e4.death[0] = False
if e3.health < 1:
expobj.addexplosion(e3.xpos, e3.ypos)
e3.death = True
r.death = True
score += 1
quota += 1
if stage == 9:
e3.respawn()
e3.death = False
# When explosion happens
if stage > 0:
expobj.refresh()
if expobj.hasexplosion():
for i in range(expobj.numofexplosion):
screen.blit(explosion,
(expobj.ongoingxp[i][0],
expobj.ongoingxp[i][1] - 20))
explosionS.play()
# Rendering enemies during stage 1
if stage == 1:
# 5 instances of interceptor
for ennum in range(0, 5):
if not e1.death[ennum]:
screen.blit(enemy1, (e1.xpos[ennum], e1.ypos[ennum]))
if timer % 4 < 2:
screen.blit(enginefire,
(e1.xpos[ennum] + 43, e1.ypos[ennum] + 20))
screen.blit(enginefire,
(e1.xpos[ennum] + 43, e1.ypos[ennum]))
# Rendering enemies during stage 3
if stage == 3:
# 1 instance of interceptor
if not e1.death[0]:
screen.blit(enemy1, (e1.xpos[0], e1.ypos[0]))
if timer % 4 < 2:
screen.blit(enginefire, (e1.xpos[0] + 43, e1.ypos[0] + 20))
screen.blit(enginefire, (e1.xpos[0] + 43, e1.ypos[0]))
# 3 instances of wave
for ennum in range(0, 3):
if not e2.death[ennum]:
screen.blit(enemy2, (e2.xpos[ennum], e2.ypos[ennum]))
if timer % 4 < 2:
screen.blit(enginefirebig,
(e2.xpos[ennum] + 38, e2.ypos[ennum] + 4))
# print player ship
if not laser.show:
if dkey:
screen.blit(playershipfly,
(playerobj.xpos, playerobj.ypos))
else:
if akey:
screen.blit(playershipflyback,
(playerobj.xpos, playerobj.ypos))
else:
screen.blit(playership,
(playerobj.xpos, playerobj.ypos))
else:
screen.blit(playershipfirelaser,
(playerobj.xpos, playerobj.ypos))
if wkey or akey or skey or dkey:
if timer % 4 < 2:
screen.blit(enginefire, (playerobj.xpos, playerobj.ypos - 3))
screen.blit(enginefire, (playerobj.xpos, playerobj.ypos + 18))
else:
if timer % 7 < 3:
screen.blit(enginefirelow,
(playerobj.xpos, playerobj.ypos - 3))
screen.blit(enginefirelow,
(playerobj.xpos, playerobj.ypos + 18))
# Rendering the first boss during the first boss fight (stage 2)
if stage == 2:
if b1.xpos < 555:
if timer % 4 < 2:
if b1.direction == 4 or b1.direction == 7:
screen.blit(enginefire, (b1.xpos + 50, b1.ypos + 0))
if b1.direction == 1:
screen.blit(enginefire, (b1.xpos + 50, b1.ypos + 55))
if b1.direction == 2:
screen.blit(enginefirebig,
(b1.xpos + 71, b1.ypos + 20))
screen.blit(laserredver, (b1.xpos + 40, 0))
if b1.direction == 5:
screen.blit(enginefirebig,
(b1.xpos + 71, b1.ypos + 20))
screen.blit(laserredver, (b1.xpos + 40, 0))
if b1.direction == 8:
screen.blit(enginefirebig,
(b1.xpos + 71, b1.ypos + 20))
screen.blit(laserredver, (b1.xpos + 40, 0))
else:
if timer % 4 < 2:
screen.blit(enginefirebig, (b1.xpos + 71, b1.ypos + 20))
if flash > 10:
screen.blit(boss1, (b1.xpos, b1.ypos))
else:
screen.blit(boss1white, (b1.xpos, b1.ypos))
if stage == 4:
if b2.limit < 50 and b2.limit > 10:
if timer % 6 < 3:
screen.blit(laserred, (-390, b2.ypos + 34))
if flash > 10:
screen.blit(boss2shoot, (b2.xpos, b2.ypos))
else:
screen.blit(boss2shootwhite, (b2.xpos, b2.ypos))
else:
if flash > 10:
screen.blit(boss2, (b2.xpos, b2.ypos))
else:
screen.blit(boss2white, (b2.xpos, b2.ypos))
if timer % 4 < 2:
screen.blit(enginefireblue, (b2.xpos + 50, b2.ypos + 3))
screen.blit(enginefireblue, (b2.xpos + 56, b2.ypos + 28))
screen.blit(enginefireblue, (b2.xpos + 50, b2.ypos + 53))
if stage == 5:
if not e2.death[0]:
screen.blit(enemy2, (e2.xpos[0], e2.ypos[0]))
if timer % 4 < 2:
screen.blit(enginefirebig,
(e2.xpos[0] + 38, e2.ypos[0] + 4))
if not e3.death:
if e3.delay < 50 and e3.delay > 10:
screen.blit(enemy3fire, (e3.xpos, e3.ypos))
if (timer % 6 == 0 or timer % 6 == 1 or timer % 6 == 2):
screen.blit(laserred, (e3.xpos-HORI_RES, e3.ypos + 16))
else:
screen.blit(enemy3, (e3.xpos, e3.ypos))
if timer % 4 < 2:
screen.blit(enginefire, (e3.xpos + 45, e3.ypos - 1))
screen.blit(enginefire, (e3.xpos + 45, e3.ypos + 22))
if stage == 6:
if b3.fire:
if (timer % 6 == 0 or timer % 6 == 1 or timer % 6 == 2):
screen.blit(laserred, (b3.xpos - 780, b3.ypos + 15))
else:
screen.blit(laserred, (b3.xpos - 780, b3.ypos + 91))
if flash > 10:
screen.blit(boss3, (b3.xpos, b3.ypos))
else:
screen.blit(boss3white, (b3.xpos, b3.ypos))
if timer % 4 < 2:
screen.blit(enginefire, (b3.xpos + 94, b3.ypos + 15))
screen.blit(enginefire, (b3.xpos + 94, b3.ypos + 80))
screen.blit(enginefireblue, (b3.xpos + 20, b3.ypos + 20))
screen.blit(enginefireblue, (b3.xpos + 20, b3.ypos + 75))
screen.blit(enginefireblue, (b3.xpos + 60, b3.ypos + 48))
screen.blit(enginefirebig, (b3.xpos + 90, b3.ypos + 40))
if stage == 7:
if not e4.death[0]:
screen.blit(enemy4, (e4.xpos[0], e4.ypos[0]))
if timer % 4 < 2:
screen.blit(enginefire, (e4.xpos[0] + 43, e4.ypos[0] + 20))
screen.blit(enginefire, (e4.xpos[0] + 43, e4.ypos[0]))
if not e1.death[1]:
screen.blit(enemy4, (e4.xpos[1], e4.ypos[1]))
if timer % 4 < 2:
screen.blit(enginefire, (e4.xpos[1] + 43, e4.ypos[1] + 20))
screen.blit(enginefire, (e4.xpos[1] + 43, e4.ypos[1]))
if stage == 8:
if timer % 4 < 2:
screen.blit(enginefireblue, (b4.xpos + 96, b4.ypos - 1))
screen.blit(enginefireblue, (b4.xpos + 96, b4.ypos + 75))
screen.blit(enginefireblue, (b4.xpos + 96, b4.ypos + 20))
screen.blit(enginefireblue, (b4.xpos + 96, b4.ypos + 96))
screen.blit(enginefirebig, (b4.xpos + 91, b4.ypos + 40))
if b4.direction == 2 or b4.direction == 5:
if flash > 10:
screen.blit(boss4ram, (b4.xpos, b4.ypos))
else:
screen.blit(boss4white, (b4.xpos, b4.ypos))
else:
if flash > 10:
screen.blit(boss4, (b4.xpos, b4.ypos))
else:
screen.blit(boss4white, (b4.xpos, b4.ypos))
if stage == 9:
if not e2.death[0]:
screen.blit(enemy2, (e2.xpos[0], e2.ypos[0]))
if timer % 4 < 2:
screen.blit(enginefirebig,
(e2.xpos[0] + 38, e2.ypos[0] + 4))
if not e3.death:
if e3.delay < 50 and e3.delay > 10:
screen.blit(enemy3fire, (e3.xpos, e3.ypos))
if timer % 4 < 2:
screen.blit(laserred, (e3.xpos-HORI_RES, e3.ypos+16))
else:
screen.blit(enemy3, (e3.xpos, e3.ypos))
if timer % 4 < 2:
screen.blit(enginefire, (e3.xpos + 45, e3.ypos - 1))
screen.blit(enginefire, (e3.xpos + 45, e3.ypos + 22))
if not e4.death[0]:
screen.blit(enemy4, (e4.xpos[0], e4.ypos[0]))
if timer % 4 < 2:
screen.blit(enginefire, (e4.xpos[0] + 43, e4.ypos[0] + 20))
screen.blit(enginefire, (e4.xpos[0] + 43, e4.ypos[0]))
if stage == 10:
if timer % 30 > 1 and timer % 30 < 15:
screen.blit(bossfinal, (b5.xpos, b5.ypos))
else:
screen.blit(bossfinalred, (b5.xpos, b5.ypos))
# Detects if player is in contact with enemy1
if (stage == 1):
for ennum in range(0, 5):
if not e1.death[ennum]:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
e1.xpos[ennum], e1.ypos[ennum],
e1.ocxpos[ennum], e1.ocypos[ennum])):
playerobj.health -= 1
hitinframe = True
if (not e1.death[0]) and (stage == 3):
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
e1.xpos[0], e1.ypos[0], e1.ocxpos[0],
e1.ocypos[0])):
playerobj.health -= 1
hitinframe = True
# Detects if player is in contact with boss1
if stage == 2:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
b1.xpos, b1.ypos, b1.ocxpos, b1.ocypos)):
playerobj.health -= 1
hitinframe = True
elif b1.direction == 2 or b1.direction == 5 or b1.direction == 8:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
b1.xpos + 40, 0, b1.xpos + 41, VERT_RES)):
playerobj.health -= 1
hitinframe = True
# Detects if player is in contact with enemy2
if (stage == 3 or stage == 5 or stage == 9) and (e2.death[0] == False):
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
e2.xpos[0], e2.ypos[0], e2.ocxpos[0],
e2.ocypos[0])):
playerobj.health -= 2
hitinframe = True
if stage == 3 and (not e2.death[1]):
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
e2.xpos[1], e2.ypos[1], e2.ocxpos[1],
e2.ocypos[1])):
playerobj.health -= 2
hitinframe = True
if stage == 3 and e2.death[2] == False:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
e2.xpos[2], e2.ypos[2], e2.ocxpos[2],
e2.ocypos[2])):
playerobj.health -= 2
hitinframe = True
# Detects if player is in contact with boss2
if stage == 4:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
b2.xpos, b2.ypos, b2.ocxpos, b2.ocypos)):
playerobj.health -= 1
hitinframe = True
if b2.limit < 50 and b2.limit > 10:
if abs(playerobj.ypos - b2.ypos - 25) < 30 \
and playerobj.xpos < b2.xpos:
playerobj.health -= 1
hitinframe = True
# Detects contact with sniper's beam
if stage == 5 or stage == 9:
if e3.delay < 50 and e3.delay > 10:
if abs(playerobj.ypos - e3.ypos - 15) < 30:
playerobj.health -= 1
hitinframe = True
# Detects contact with boss3
if stage == 6:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
b3.xpos, b3.ypos, b3.ocxpos, b3.ocypos)):
playerobj.health -= 1
hitinframe = True
if b3.fire:
if (abs(playerobj.ypos - b3.ypos)) < 6 or \
(abs(playerobj.ypos - b3.ypos - 76)) < 6:
playerobj.health -= 1
hitinframe = True
# Detects contact with tracker
if (not e4.death[0]) and (stage == 7 or stage == 9):
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
e4.xpos[0], e4.ypos[0], e4.ocxpos[0],
e4.ocypos[0])):
playerobj.health -= 1
hitinframe = True
if (not e4.death[1]) and stage == 7:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
e4.xpos[1], e4.ypos[1], e4.ocxpos[1],
e4.ocypos[1])):
playerobj.health -= 1
hitinframe = True
# Detects contact with boss 4
if stage == 8:
if (detect.isColli(playerobj.xpos, playerobj.ypos,
playerobj.ocxpos, playerobj.ocypos,
b4.xpos, b4.ypos, b4.ocxpos, b4.ocypos)):
playerobj.health -= 1
hitinframe = True
if playerobj.health < 1:
ender = False
while not ender:
screen.blit(deathscreen, (0, 0))
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.quit()
sys.exit()
if win:
screen.blit(winscreen, (0, 0))
pygame.display.update()
ender = False
a = 0
while not ender:
clock.tick(FPS)
a += 1
if a > 90:
screen.blit(endmsg1, (100, 150))
if a > 180:
screen.blit(endmsg2, (200, 350))
if a > 270:
screen.blit(endtime, (400, 250))
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.quit()
sys.exit()
# --------------------------------------------------------------------
# Start of game UI code
truetime = float(timer / 6)
truetime = truetime / 10
if truetime >= 0:
lab_clock = stdfont.render("Time: " + str(int(floor(truetime))),
True, (255, 255, 255))
else:
lab_clock = stdfont.render("Time: 0", True, (255, 255, 255))
screen.blit(lab_clock, (140, 30))
endtime = stdfont.render("Your clear time was " +
str(int(floor(truetime))) +
" seconds.", True, (0, 0, 0))
if hitinframe:
screen.blit(collidelabel, (230, 55))
collidelabeldelay += 1
if collidelabeldelay > 30:
hitinframe = False
collidelabeldelay = 0
if timer < 0:
screen.blit(getreadymessage1, (300, 260))
if timer > -200 and timer < 0:
screen.blit(getreadymessage, (300, 300))
screen.blit(scorelabel, (30, 30))
screen.blit(ui_healthbar, (400, 0))
for i in range(0, int(floor(playerobj.health/5))):
if playerobj.health > 33:
screen.blit(ui_energyunit, (513 + i * 12, 12))
else:
screen.blit(ui_energyunitred, (513 + i * 12, 12))
screen.blit(ui_energybar, (200, 555))
for i in range(0, int(floor(playerobj.energy / 5))):
if playerobj.energy > 33:
screen.blit(ui_energyunit, (313 + i * 12, 570))
else:
screen.blit(ui_energyunitred, (313 + i * 12, 570))
# End of game UI code
# --------------------------------------------------------------------
pygame.display.update()
|
TonyWu386/redshift-game
|
main.py
|
Python
|
gpl-2.0
| 49,393
| 0.000121
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
# Datetime ############################
dt = datetime.datetime.now()
print(dt)
dt = datetime.datetime(year=2018, month=8, day=30, hour=13, minute=30)
print(dt)
print(dt.isoformat())
# Date ################################
d = datetime.date.today()
print(d)
d = datetime.datetime.now().date()
print(d)
d = datetime.date(year=2018, month=8, day=30)
print(d)
print(d.isoformat())
# Time ################################
t = datetime.datetime.now().time()
print(t)
t = datetime.time(hour=1, minute=30)
print(t)
print(t.isoformat())
|
jeremiedecock/snippets
|
python/datetime_snippets.py
|
Python
|
mit
| 605
| 0
|
"""AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if aci.available:
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html#aws-permissions-for-integration-tests.'
% target.name)
|
azaghal/ansible
|
test/lib/ansible_test/_internal/cloud/aws.py
|
Python
|
gpl-3.0
| 3,937
| 0.002286
|
from Sire.IO import *
from Sire.MM import *
from Sire.System import *
from Sire.Mol import *
from Sire.Maths import *
from Sire.FF import *
from Sire.Move import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Qt import *
import os
coul_cutoff = 20 * angstrom
lj_cutoff = 10 * angstrom
amber = Amber()
(molecules, space) = amber.readCrdTop("test/io/waterbox.crd", "test/io/waterbox.top")
system = System()
swapwaters = MoleculeGroup("swapwaters")
waters = MoleculeGroup("waters")
molnums = molecules.molNums();
for molnum in molnums:
water = molecules[molnum].molecule()
if water.residue().number() == ResNum(2025):
center_water = water
swapwaters.add(center_water)
center_point = center_water.evaluate().center()
for molnum in molnums:
if molnum != center_water.number():
water = molecules[molnum].molecule()
if Vector.distance(center_point, water.evaluate().center()) < 7.5:
water = water.residue().edit().setProperty("PDB-residue-name", "SWP").commit()
swapwaters.add(water)
else:
waters.add(water)
system.add(swapwaters)
system.add(waters)
gridff = GridFF("gridff")
gridff.setCombiningRules("arithmetic")
print("Combining rules are %s" % gridff.combiningRules())
gridff.setBuffer(2 * angstrom)
gridff.setGridSpacing( 0.5 * angstrom )
gridff.setLJCutoff(lj_cutoff)
gridff.setCoulombCutoff(coul_cutoff)
gridff.setShiftElectrostatics(True)
#gridff.setUseAtomisticCutoff(True)
#gridff.setUseReactionField(True)
cljgridff = CLJGrid()
cljgridff.setCLJFunction( CLJShiftFunction(coul_cutoff,lj_cutoff) )
cljgridff.setFixedAtoms( CLJAtoms(waters.molecules()) )
cljatoms = CLJAtoms(swapwaters.molecules())
cljgridff.setGridDimensions( cljatoms, 0.5 * angstrom, 2 * angstrom )
print("Grid box equals %s" % cljgridff.grid())
cljboxes = CLJBoxes(cljatoms)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
cljgridff.setUseGrid(False)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
gridff.add(swapwaters, MGIdx(0))
gridff.add(waters, MGIdx(1))
gridff.setSpace( Cartesian() )
gridff2 = GridFF2("gridff2")
gridff2.setCombiningRules("arithmetic")
gridff2.setBuffer(2*angstrom)
gridff2.setGridSpacing( 0.5 * angstrom )
gridff2.setLJCutoff(lj_cutoff)
gridff2.setCoulombCutoff(coul_cutoff)
gridff2.setShiftElectrostatics(True)
#gridff2.setUseAtomisticCutoff(True)
#gridff2.setUseReactionField(True)
gridff2.add( swapwaters, MGIdx(0) )
gridff2.addFixedAtoms(waters.molecules())
gridff2.setSpace( Cartesian() )
testff = TestFF()
testff.add( swapwaters.molecules() )
testff.addFixedAtoms(waters.molecules())
testff.setCutoff(coul_cutoff, lj_cutoff)
cljff = InterGroupCLJFF("cljff")
cljff.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff.add(swapwaters, MGIdx(0))
cljff.add(waters, MGIdx(1))
cljff.setShiftElectrostatics(True)
#cljff.setUseAtomisticCutoff(True)
#cljff.setUseReactionField(True)
cljff.setSpace( Cartesian() )
cljff2 = InterCLJFF("cljff2")
cljff2.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff2.add(waters)
cljff2.setShiftElectrostatics(True)
cljff2.setSpace( Cartesian() )
print(gridff.energies())
print(gridff2.energies())
print("\nEnergies")
print(gridff.energies())
print(gridff2.energies())
t = QTime()
t.start()
nrgs = cljff.energies()
ms = t.elapsed()
print(cljff.energies())
print("Took %d ms" % ms)
testff.calculateEnergy()
t.start()
nrgs = cljff2.energies()
ms = t.elapsed()
print("\nExact compare")
print(cljff2.energies())
print("Took %d ms" % ms)
|
chryswoods/SireTests
|
unittests/SireMM/testgridff2.py
|
Python
|
gpl-2.0
| 3,699
| 0.011895
|
# Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from hashlib import md5
import math
import time
from lxml import etree
from oslo_log import log as logging
import requests
import six
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._login = login
self._password = password
self._base_url = "%s://%s/api" % (protocol, host)
self._session_key = None
self.ssl_verify = ssl_verify
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
tree = etree.XML(xml)
if tree.findtext(".//PROPERTY[@name='response-type']") == "success":
self._session_key = tree.findtext(".//PROPERTY[@name='response']")
def login(self):
"""Authenticates the service on the device."""
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify)
except requests.exceptions.RequestException:
raise exception.DotHillConnectionError
self._get_auth_token(xml.text.encode('utf8'))
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0
or if the return code is None.
"""
# Get the return code for the operation, raising an exception
# if it is not present.
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if not return_code:
raise exception.DotHillRequestError(message="No status found")
# If no error occurred, just return.
if return_code == '0':
return
# Format a message for the status code.
msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"),
return_code)
raise exception.DotHillRequestError(message=msg)
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an HTTP request on the device.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
LOG.debug("DotHill Request URL: %s", url)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers, verify=self.ssl_verify)
tree = etree.XML(xml.text.encode('utf8'))
except Exception:
raise exception.DotHillConnectionError
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify)
return True
except Exception:
return False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
self._request("/create/volume", name, **path_dict)
return None
def delete_volume(self, name):
self._request("/delete/volumes", name)
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
self._request("/create/snapshots", snap_name, volumes=volume_name)
def delete_snapshot(self, snap_name):
self._request("/delete/snapshot", "cleanup", snap_name)
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (10 ** 9)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def linear_copy_volume(self, src_name, dest_name, dest_bknd_name):
"""Copy a linear volume."""
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
# The copy has started; now monitor until the operation completes.
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
break
time.sleep(1)
count += 1
time.sleep(5)
def copy_volume(self, src_name, dest_name, dest_bknd_name,
backend_type='virtual'):
"""Copy a linear or virtual volume."""
if backend_type == 'linear':
return self.linear_copy_volume(src_name, dest_name, dest_bknd_name)
# Copy a virtual volume to another in the same pool.
self._request("/copy/volume", src_name, name=dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s started.",
{'src_name': src_name, 'dest_name': dest_name, })
# Loop until this volume copy is no longer in progress.
while self.volume_copy_in_progress(src_name):
time.sleep(5)
# Once the copy operation is finished, check to ensure that
# the volume was not deleted because of a subsequent error. An
# exception will be raised if the named volume is not present.
self._request("/show/volumes", dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s completed.",
{'src_name': src_name, 'dest_name': dest_name, })
def volume_copy_in_progress(self, src_name):
"""Check if a volume copy is in progress for the named volume."""
# 'show volume-copies' always succeeds, even if none in progress.
tree = self._request("/show/volume-copies")
# Find 0 or 1 job(s) with source volume we're interested in
q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name
joblist = tree.xpath(q)
if len(joblist) == 0:
return False
LOG.debug("Volume copy of volume: %(src_name)s is "
"%(pc)s percent completed.",
{'src_name': src_name,
'pc': joblist[0].findtext("PROPERTY[@name='progress']"), })
return True
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (32 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
index = len(hostname)
if index > 15:
index = 15
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name, backend_type):
if backend_type == 'linear':
tree = self._request("/show/vdisks", backend_name)
else:
tree = self._request("/show/pools", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
|
nikesh-mahalka/cinder
|
cinder/volume/drivers/dothill/dothill_client.py
|
Python
|
apache-2.0
| 14,785
| 0
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Moyal."""
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
class _MoyalTest(object):
def make_tensor(self, x):
x = tf.cast(x, self.dtype)
return tf1.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
def testMoyalShape(self):
loc = np.array([3.0] * 5, dtype=self.dtype)
scale = np.array([3.0] * 5, dtype=self.dtype)
moyal = tfd.Moyal(loc=loc, scale=scale, validate_args=True)
self.assertEqual((5,), self.evaluate(moyal.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([5]), moyal.batch_shape)
self.assertAllEqual([], self.evaluate(moyal.event_shape_tensor()))
self.assertEqual(tf.TensorShape([]), moyal.event_shape)
def testInvalidScale(self):
scale = [-.01, 0., 2.]
with self.assertRaisesOpError('Argument `scale` must be positive.'):
moyal = tfd.Moyal(loc=0., scale=scale, validate_args=True)
self.evaluate(moyal.mean())
scale = tf.Variable([.01])
self.evaluate(scale.initializer)
moyal = tfd.Moyal(loc=0., scale=scale, validate_args=True)
self.assertIs(scale, moyal.scale)
self.evaluate(moyal.mean())
with tf.control_dependencies([scale.assign([-.01])]):
with self.assertRaisesOpError('Argument `scale` must be positive.'):
self.evaluate(moyal.mean())
def testMoyalLogPdf(self):
batch_size = 6
loc = np.array([0.] * batch_size, dtype=self.dtype)
scale = np.array([3.] * batch_size, dtype=self.dtype)
x = np.array([2., 3., 4., 5., 6., 7.], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_pdf = moyal.log_prob(self.make_tensor(x))
self.assertAllClose(
stats.moyal.logpdf(x, loc=loc, scale=scale),
self.evaluate(log_pdf))
pdf = moyal.prob(x)
self.assertAllClose(
stats.moyal.pdf(x, loc=loc, scale=scale), self.evaluate(pdf))
def testMoyalLogPdfMultidimensional(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=self.dtype).T
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_pdf = moyal.log_prob(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_pdf), stats.moyal.logpdf(x, loc=loc, scale=scale))
pdf = moyal.prob(self.make_tensor(x))
self.assertAllClose(
self.evaluate(pdf), stats.moyal.pdf(x, loc=loc, scale=scale))
def testMoyalCDF(self):
batch_size = 6
loc = np.array([0.] * batch_size, dtype=self.dtype)
scale = np.array([3.] * batch_size, dtype=self.dtype)
x = np.array([2., 3., 4., 5., 6., 7.], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_cdf = moyal.log_cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_cdf), stats.moyal.logcdf(x, loc=loc, scale=scale))
cdf = moyal.cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(cdf), stats.moyal.cdf(x, loc=loc, scale=scale))
def testMoyalCdfMultidimensional(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=self.dtype).T
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_cdf = moyal.log_cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_cdf),
stats.moyal.logcdf(x, loc=loc, scale=scale))
cdf = moyal.cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(cdf),
stats.moyal.cdf(x, loc=loc, scale=scale))
def testMoyalMean(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.mean()),
stats.moyal.mean(loc=loc, scale=scale))
def testMoyalVariance(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.variance()),
stats.moyal.var(loc=loc, scale=scale))
def testMoyalStd(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.stddev()),
stats.moyal.std(loc=loc, scale=scale))
def testMoyalMode(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.mode()), self.evaluate(moyal.loc))
def testMoyalSample(self):
loc = self.dtype(4.0)
scale = self.dtype(1.0)
n = int(3e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
stats.moyal.mean(loc=loc, scale=scale),
sample_values.mean(), rtol=.01)
self.assertAllClose(
stats.moyal.var(loc=loc, scale=scale),
sample_values.var(), rtol=.01)
def testMoyalSampleMultidimensionalMean(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0, 0.8, 0.5], dtype=self.dtype)
n = int(2e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# TODO(b/157561663): Remove the masking once tf.math.special.erfcinv exists.
sample_values = np.ma.masked_invalid(sample_values)
self.assertAllClose(
stats.moyal.mean(loc=loc, scale=scale),
sample_values.mean(axis=0),
rtol=.03,
atol=0)
def testMoyalSampleMultidimensionalVar(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0, 0.8, 0.5], dtype=self.dtype)
n = int(1e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# TODO(b/157561663): Remove the masking once tf.math.special.erfcinv exists.
sample_values = np.ma.masked_invalid(sample_values)
self.assertAllClose(
stats.moyal.var(loc=loc, scale=scale),
sample_values.var(axis=0),
rtol=.03,
atol=0)
def testMoyalMoyalKL(self):
a_loc = np.arange(-2.0, 3.0, 1.0)
a_scale = np.arange(0.5, 2.5, 0.5)
b_loc = 2 * np.arange(-2.0, 3.0, 1.0)
b_scale = np.arange(0.5, 2.5, 0.5)
# This reshape is intended to expand the number of test cases.
a_loc = a_loc.reshape((len(a_loc), 1, 1, 1))
a_scale = a_scale.reshape((1, len(a_scale), 1, 1))
b_loc = b_loc.reshape((1, 1, len(b_loc), 1))
b_scale = b_scale.reshape((1, 1, 1, len(b_scale)))
a = tfd.Moyal(loc=a_loc, scale=a_scale, validate_args=True)
b = tfd.Moyal(loc=b_loc, scale=b_scale, validate_args=True)
kl = tfd.kl_divergence(a, b)
x = a.sample(int(3e5), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(a.log_prob(x) - b.log_prob(x), axis=0)
kl_, kl_sample_ = self.evaluate([kl, kl_sample])
self.assertAllClose(kl_, kl_sample_, atol=1e-15, rtol=1e-1)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(zero_kl), zero_kl])
self.assertAllClose(true_zero_kl_, zero_kl_)
@test_util.test_all_tf_execution_regimes
class MoyalTestStaticShape(test_util.TestCase, _MoyalTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class MoyalTestDynamicShape(test_util.TestCase, _MoyalTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class MoyalTestFloat64StaticShape(test_util.TestCase, _MoyalTest):
dtype = np.float64
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class MoyalTestFloat64DynamicShape(test_util.TestCase, _MoyalTest):
dtype = np.float64
use_static_shape = False
if __name__ == '__main__':
test_util.main()
|
tensorflow/probability
|
tensorflow_probability/python/distributions/moyal_test.py
|
Python
|
apache-2.0
| 10,137
| 0.002861
|
""" UnitTests for the SimpleHTTPServer
"""
import mock
import unittest
class TestHTTPServerHandler(unittest.TestCase):
"""
"""
def setUp(self):
self.handler = mock.Mock()
def test_do_GET(self):
pass
def test_do_POST(self):
pass
def tearDown(self):
self.handler()
if __name__ == "__main__":
unittest.main()
|
martynbristow/gabbi-examples
|
test_server.py
|
Python
|
mit
| 367
| 0.016349
|
#a=[int(x) for x in input().split()]
#print (a)
x=5
y=10
b=[int(y) for y in input().split()]
#a=[int(x) for x in input().split()]
dir(_builtins_)
|
krishnakantkumar0/Simple-Python
|
13.py
|
Python
|
gpl-3.0
| 147
| 0.040816
|
a = "python"
print(a*2)
try:
print(a[-10])
except IndexError as e:
print("์ธ๋ฑ์ค ๋ฒ์๋ฅผ ์ด๊ณผ ํ์ต๋๋ค.")
print(e)
print(a[0:4])
print(a[1:-2])
# -10์ hi๋ค๋ก 10์นธ
print("%-10sjane." % "hi")
b = "Python is best choice."
print(b.find("b"))
print(b.find("B"))
try:
print(b.index("B"))
except ValueError as e:
print(e)
c = "hi"
print(c.upper())
a = " hi"
print("kk",a.lstrip())
a = " hi "
print(a.strip())
|
JaeGyu/PythonEx_1
|
20170106.py
|
Python
|
mit
| 455
| 0.004728
|
# 3rd party imports
from reportlab.platypus import Image, Paragraph, PageBreak, Table, Spacer
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
# Django imports
from django.conf import settings
# Project imports
from .arabic_reshaper import reshape
from .pdf_canvas import NumberedCanvas, getArabicStyle, getHeaderStyle, getTableStyle, \
get_hnec_logo_fname, drawHnecLogo
from .strings import STRINGS
from .utils import chunker, format_name, CountingDocTemplate, build_copy_info, \
truncate_center_name, out_of_disk_space_handler_context
from libya_elections.constants import MALE, FEMALE
def generate_pdf(filename, center, voter_roll, gender, center_book=False):
# filename: the file to which the PDF will be written
# center: a data_pull.Center instance
# voter_roll: list of registration dicts --
# {national_id, first_name, father_name, grandfather_name, family_name, gender}
# gender: one of the MALE/FEMALE constants. UNISEX is not valid.
# center_book: ???
#
# separates by gender code using one of the constants in utils.Gender
# sorts by name fields in query
# assembles display string from parts
# writes to filename
#
# returns number of pages in the PDF
if gender not in (MALE, FEMALE):
raise ValueError("generate_pdf() gender must be MALE or FEMALE")
# set styles
styles = getArabicStyle()
# get strings
mf_string = STRINGS['female'] if (gender == FEMALE) else STRINGS['male']
cover_string = STRINGS['center_book_cover'] if center_book else STRINGS['center_list_cover']
header_string = STRINGS['center_book_header'] if center_book else STRINGS['center_list_header']
# cover page
center_name = reshape(center.name)
template = '%s: %s / %s'
subconstituency_name = reshape(center.subconstituency.name_arabic)
params = (STRINGS['subconstituency_name'], center.subconstituency.id, subconstituency_name)
subconstituency = template % params
center_info = {
'gender': '%s: %s' % (STRINGS['gender'], mf_string),
'number': '%s: %d' % (STRINGS['center_number'], center.center_id),
'name': '%s: %s' % (STRINGS['center_name'], center_name),
'name_trunc': '%s: %s' % (STRINGS['center_name'], truncate_center_name(center_name)),
'subconstituency': subconstituency,
'copy_info': build_copy_info(center),
}
# create document
doc = CountingDocTemplate(filename, pagesize=A4, topMargin=1 * cm, bottomMargin=1 * cm,
leftMargin=1.5 * cm, rightMargin=2.54 * cm)
# elements, cover page first
with open(get_hnec_logo_fname(), 'rb') as hnec_f:
elements = [
Image(hnec_f, width=10 * cm, height=2.55 * cm),
Spacer(48, 48),
Paragraph(cover_string, styles['Title']),
Spacer(18, 18),
Paragraph(center_info['gender'], styles['CoverInfo-Bold']),
Paragraph(center_info['number'], styles['CoverInfo']),
Paragraph(center_info['name'], styles['CoverInfo']),
Paragraph(center_info['copy_info'], styles['CoverInfo']),
Paragraph(center_info['subconstituency'], styles['CoverInfo']),
PageBreak(),
]
# Focus on one specific gender.
voter_roll = [voter for voter in voter_roll if voter.gender == gender]
# We wrap the page header in a table because we want the header's gray background to extend
# margin-to-margin and that's easy to do with a table + background color. It's probably
# possible with Paragraphs alone, but I'm too lazy^w busy to figure out how.
# It's necessary to wrap the table cell text in Paragraphs to ensure the base text direction
# is RTL. See https://github.com/hnec-vr/libya-elections/issues/1197
para_prefix = Paragraph(STRINGS['center_header_prefix'], styles['InnerPageHeader'])
para_header = Paragraph(header_string, styles['InnerPageHeader'])
page_header = Table([[para_prefix], [para_header]], 15 * cm, [16, 24])
page_header.setStyle(getHeaderStyle())
n_pages = 0
for page in chunker(voter_roll, settings.ROLLGEN_REGISTRATIONS_PER_PAGE_REGISTRATION):
n_pages += 1
elements.append(page_header)
elements += [Paragraph(center_info['gender'], styles['CenterInfo-Bold']),
Paragraph(center_info['number'], styles['CenterInfo']),
Paragraph(center_info['name_trunc'], styles['CenterInfo']),
]
elements.append(Spacer(10, 10))
# The contents of each table cell are wrapped in a Paragraph to set the base text
# direction.
# See https://github.com/hnec-vr/libya-elections/issues/1197
data = [[Paragraph(reshape(format_name(voter)), styles['TableCell'])] for voter in page]
# Insert header before the data.
data.insert(0, [Paragraph(STRINGS['the_names'], styles['TableCell'])])
table = Table(data, 15 * cm, 0.825 * cm)
table.setStyle(getTableStyle())
elements.append(table)
elements.append(Paragraph(mf_string, styles['PageBottom']))
elements.append(PageBreak())
if not n_pages:
# When there are no pages (==> no registrants for this gender), we need to emit a page
# that states that.
elements.append(page_header)
key = 'no_male_registrants' if gender == MALE else 'no_female_registrants'
elements.append(Paragraph(STRINGS[key], styles['BlankPageNotice']))
with out_of_disk_space_handler_context():
doc.build(elements, canvasmaker=NumberedCanvas, onLaterPages=drawHnecLogo)
return doc.n_pages
|
SmartElect/SmartElect
|
rollgen/generate_pdf.py
|
Python
|
apache-2.0
| 5,841
| 0.003938
|
from .analysis import *
from .toolbox import *
from . import utils
|
Geosyntec/python-tidegates
|
tidegates/__init__.py
|
Python
|
bsd-3-clause
| 67
| 0
|
"""Utilities for extracting common archive formats"""
import zipfile
import tarfile
import os
import shutil
import posixpath
import contextlib
from distutils.errors import DistutilsError
if "__PEX_UNVENDORED__" in __import__("os").environ:
from pkg_resources import ensure_directory # vendor:skip
else:
from pex.third_party.pkg_resources import ensure_directory
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src, dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(filename, extract_dir, progress_filter=default_filter,
drivers=None):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % filename)
paths = {
filename: ('', extract_dir),
}
for base, dirs, files in os.walk(filename):
src, dst = paths[base]
for d in dirs:
paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
for f in files:
target = os.path.join(dst, f)
target = progress_filter(src + f, target)
if not target:
# skip non-files
continue
ensure_directory(target)
f = os.path.join(base, f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
with zipfile.ZipFile(filename) as z:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.filename)
with open(target, 'wb') as f:
f.write(data)
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
with contextlib.closing(tarobj):
# don't do any chowning!
tarobj.chown = lambda *args: None
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/'))
# resolve any links and to extract the link targets as normal
# files
while member is not None and (member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
base = posixpath.dirname(member.name)
linkpath = posixpath.join(base, linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
# XXX Ugh
tarobj._extract_member(member, final_dst)
except tarfile.ExtractError:
# chown/chmod/mkfifo/mknode/makedev failed
pass
return True
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
|
pantsbuild/pex
|
pex/vendor/_vendored/setuptools/setuptools/archive_util.py
|
Python
|
apache-2.0
| 6,730
| 0.000594
|
# coding: utf-8
# These tests are taken from astropy, as with the astrodynamics.constant.Constant
# class. It retains the original license (see licenses/ASTROPY_LICENSE.txt)
from __future__ import absolute_import, division, print_function
import copy
import astropy.units as u
from astropy.units import Quantity
import astrodynamics.constants as const
from astrodynamics.constants import J2, Constant
def test_units():
"""Confirm that none of the constants defined in astrodynamics have invalid
units.
"""
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
copied = copy.deepcopy(J2)
assert copied == J2
copied = copy.copy(J2)
assert copied == J2
def test_view():
"""Check that Constant and Quantity views can be taken."""
x = J2
x2 = x.view(Constant)
assert x2 == x
assert x2.value == x.value
# make sure it has the necessary attributes and they're not blank
assert x2.uncertainty
assert x2.name == x.name
assert x2.reference == x.reference
assert x2.unit == x.unit
q1 = x.view(Quantity)
assert q1 == x
assert q1.value == x.value
assert type(q1) is Quantity
assert not hasattr(q1, 'reference')
q2 = Quantity(x)
assert q2 == x
assert q2.value == x.value
assert type(q2) is Quantity
assert not hasattr(q2, 'reference')
x3 = Quantity(x, subok=True)
assert x3 == x
assert x3.value == x.value
# make sure it has the necessary attributes and they're not blank
assert x3.uncertainty
assert x3.name == x.name
assert x3.reference == x.reference
assert x3.unit == x.unit
x4 = Quantity(x, subok=True, copy=False)
assert x4 is x
def test_repr():
a = Constant('the name', value=1, unit='m2', uncertainty=0.1, reference='me')
s = ("Constant(name='the name', value=1, unit='m2', uncertainty=0.1, "
"reference='me')")
assert repr(a) == s
|
python-astrodynamics/astrodynamics
|
tests/test_constants.py
|
Python
|
mit
| 2,079
| 0.000962
|
# coding: utf-8
# # Query `apiso:ServiceType`
# In[43]:
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import numpy as np
# The GetCaps request for these services looks like this:
# http://catalog.data.gov/csw-all/csw?SERVICE=CSW&VERSION=2.0.2&REQUEST=GetCapabilities
# In[56]:
endpoint = 'http://data.ioos.us/csw' # FAILS apiso:ServiceType
#endpoint = 'http://catalog.data.gov/csw-all' # FAILS apiso:ServiceType
#endpoint = 'http://geoport.whoi.edu/csw' # SUCCEEDS apiso:ServiceType
csw = CatalogueServiceWeb(endpoint,timeout=60)
print csw.version
# In[57]:
csw.get_operation_by_name('GetRecords').constraints
# Search first for records containing the text "COAWST" and "experimental".
# In[45]:
val = 'coawst'
filter1 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [ filter1 ]
# In[46]:
val = 'experimental'
filter2 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2])]
# In[47]:
csw.getrecords2(constraints=filter_list,maxrecords=100,esn='full')
print len(csw.records.keys())
for rec in list(csw.records.keys()):
print csw.records[rec].title
# Now let's print out the references (service endpoints) to see what types of services are available
# In[48]:
choice=np.random.choice(list(csw.records.keys()))
print(csw.records[choice].title)
csw.records[choice].references
# In[49]:
csw.records[choice].xml
# We see that the `OPeNDAP` service is available, so let's see if we can add that to the query, returning only datasets that have text "COAWST" and "experimental" and that have an "opendap" service available.
#
# We should get the same number of records, as all COAWST records have OPeNDAP service endpoints. If we get no records, something is wrong with the CSW server.
# In[50]:
val = 'OPeNDAP'
filter3 = fes.PropertyIsLike(propertyname='apiso:ServiceType',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2, filter3])]
csw.getrecords2(constraints=filter_list, maxrecords=1000)
# In[51]:
print(len(csw.records.keys()))
for rec in list(csw.records.keys()):
print('title:'+csw.records[rec].title)
print('identifier:'+csw.records[rec].identifier)
print('modified:'+csw.records[rec].modified)
print(' ')
# In[53]:
print(csw.request)
# In[ ]:
|
rsignell-usgs/notebook
|
CSW/data.ioos.us-pycsw.py
|
Python
|
mit
| 2,570
| 0.01323
|
from __future__ import unicode_literals
import json
from django.utils import six
from kgb import SpyAgency
from reviewboard.hostingsvcs.github import GitHub
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.repository import RemoteRepository
from reviewboard.hostingsvcs.utils.paginator import APIPaginator
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
remote_repository_item_mimetype,
remote_repository_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_remote_repository_item_url,
get_remote_repository_list_url)
def _compare_item(self, item_rsp, remote_repository):
self.assertEqual(item_rsp['id'], remote_repository.id)
self.assertEqual(item_rsp['name'], remote_repository.name)
self.assertEqual(item_rsp['owner'], remote_repository.owner)
self.assertEqual(item_rsp['scm_type'], remote_repository.scm_type)
self.assertEqual(item_rsp['path'], remote_repository.path)
self.assertEqual(item_rsp['mirror_path'], remote_repository.mirror_path)
class RemoteRepositoryTestPaginator(APIPaginator):
def __init__(self, results):
self.results = results
super(RemoteRepositoryTestPaginator, self).__init__(client=None,
url='')
def fetch_url(self, url):
return {
'data': self.results,
}
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(SpyAgency, BaseWebAPITestCase):
"""Testing the RemoteRepositoryResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'hosting-service-accounts/<id>/remote-repositories/'
resource = resources.remote_repository
basic_get_use_admin = True
compare_item = _compare_item
def setup_http_not_allowed_list_test(self, user):
account = HostingServiceAccount.objects.create(service_name='github',
username='bob')
return get_remote_repository_list_url(account)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
account = HostingServiceAccount.objects.create(
service_name='github',
username='bob',
local_site=self.get_local_site_or_none(name=local_site_name),
data=json.dumps({
'authorization': {
'token': '123',
},
}))
service = account.service
remote_repositories = [
RemoteRepository(service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1',
mirror_path='https://example.com/repo1'),
RemoteRepository(service,
repository_id='456',
name='repo2',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo2',
mirror_path='https://example.com/repo2'),
]
paginator = RemoteRepositoryTestPaginator(remote_repositories)
self.spy_on(GitHub.get_remote_repositories,
owner=GitHub,
call_fake=lambda *args, **kwargs: paginator)
return (get_remote_repository_list_url(account, local_site_name),
remote_repository_list_mimetype,
remote_repositories)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(SpyAgency, BaseWebAPITestCase):
"""Testing the RemoteRepositoryResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'hosting-service-accounts/<id>/remote-repositories/<id>/'
resource = resources.remote_repository
basic_get_use_admin = True
compare_item = _compare_item
def setup_http_not_allowed_item_test(self, user):
account = HostingServiceAccount.objects.create(service_name='github',
username='bob')
remote_repository = RemoteRepository(
account.service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1')
return get_remote_repository_item_url(remote_repository)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
account = HostingServiceAccount.objects.create(
service_name='github',
username='bob',
local_site=self.get_local_site_or_none(name=local_site_name),
data=json.dumps({
'authorization': {
'token': '123',
},
}))
remote_repository = RemoteRepository(
account.service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1',
mirror_path='https://example.com/repo1')
self.spy_on(GitHub.get_remote_repository,
owner=GitHub,
call_fake=lambda *args, **kwargs: remote_repository)
return (get_remote_repository_item_url(remote_repository,
local_site_name),
remote_repository_item_mimetype,
remote_repository)
|
chipx86/reviewboard
|
reviewboard/webapi/tests/test_remote_repository.py
|
Python
|
mit
| 5,858
| 0
|
"""
Support to interface with Sonos players (via SoCo).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.sonos/
"""
import datetime
import logging
from os import path
import socket
import urllib
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_CLEAR_PLAYLIST,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, STATE_OFF,
ATTR_ENTITY_ID)
from homeassistant.config import load_yaml_config_file
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['SoCo==0.12']
_LOGGER = logging.getLogger(__name__)
# The soco library is excessively chatty when it comes to logging and
# causes a LOT of spam in the logs due to making a http connection to each
# speaker every 10 seconds. Quiet it down a bit to just actual problems.
_SOCO_LOGGER = logging.getLogger('soco')
_SOCO_LOGGER.setLevel(logging.ERROR)
_REQUESTS_LOGGER = logging.getLogger('requests')
_REQUESTS_LOGGER.setLevel(logging.ERROR)
SUPPORT_SONOS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA |\
SUPPORT_SEEK | SUPPORT_CLEAR_PLAYLIST | SUPPORT_SELECT_SOURCE
SERVICE_GROUP_PLAYERS = 'sonos_group_players'
SERVICE_UNJOIN = 'sonos_unjoin'
SERVICE_SNAPSHOT = 'sonos_snapshot'
SERVICE_RESTORE = 'sonos_restore'
SERVICE_SET_TIMER = 'sonos_set_sleep_timer'
SERVICE_CLEAR_TIMER = 'sonos_clear_sleep_timer'
SUPPORT_SOURCE_LINEIN = 'Line-in'
SUPPORT_SOURCE_TV = 'TV'
# Service call validation schemas
ATTR_SLEEP_TIME = 'sleep_time'
SONOS_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
SONOS_SET_TIMER_SCHEMA = SONOS_SCHEMA.extend({
vol.Required(ATTR_SLEEP_TIME): vol.All(vol.Coerce(int),
vol.Range(min=0, max=86399))
})
# List of devices that have been registered
DEVICES = []
# pylint: disable=unused-argument, too-many-locals
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Sonos platform."""
import soco
global DEVICES
if discovery_info:
player = soco.SoCo(discovery_info)
# if device allready exists by config
if player.uid in DEVICES:
return True
if player.is_visible:
device = SonosDevice(hass, player)
add_devices([device])
if not DEVICES:
register_services(hass)
DEVICES.append(device)
return True
return False
players = None
hosts = config.get('hosts', None)
if hosts:
# Support retro compatibility with comma separated list of hosts
# from config
hosts = hosts.split(',') if isinstance(hosts, str) else hosts
players = []
for host in hosts:
players.append(soco.SoCo(socket.gethostbyname(host)))
if not players:
players = soco.discover(interface_addr=config.get('interface_addr',
None))
if not players:
_LOGGER.warning('No Sonos speakers found.')
return False
DEVICES = [SonosDevice(hass, p) for p in players]
add_devices(DEVICES)
register_services(hass)
_LOGGER.info('Added %s Sonos speakers', len(players))
return True
def register_services(hass):
"""Register all services for sonos devices."""
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_GROUP_PLAYERS,
_group_players_service,
descriptions.get(SERVICE_GROUP_PLAYERS),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_UNJOIN,
_unjoin_service,
descriptions.get(SERVICE_UNJOIN),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SNAPSHOT,
_snapshot_service,
descriptions.get(SERVICE_SNAPSHOT),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_RESTORE,
_restore_service,
descriptions.get(SERVICE_RESTORE),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_TIMER,
_set_sleep_timer_service,
descriptions.get(SERVICE_SET_TIMER),
schema=SONOS_SET_TIMER_SCHEMA)
hass.services.register(DOMAIN, SERVICE_CLEAR_TIMER,
_clear_sleep_timer_service,
descriptions.get(SERVICE_CLEAR_TIMER),
schema=SONOS_SCHEMA)
def _apply_service(service, service_func, *service_func_args):
"""Internal func for applying a service."""
entity_ids = service.data.get('entity_id')
if entity_ids:
_devices = [device for device in DEVICES
if device.entity_id in entity_ids]
else:
_devices = DEVICES
for device in _devices:
service_func(device, *service_func_args)
device.update_ha_state(True)
def _group_players_service(service):
"""Group media players, use player as coordinator."""
_apply_service(service, SonosDevice.group_players)
def _unjoin_service(service):
"""Unjoin the player from a group."""
_apply_service(service, SonosDevice.unjoin)
def _snapshot_service(service):
"""Take a snapshot."""
_apply_service(service, SonosDevice.snapshot)
def _restore_service(service):
"""Restore a snapshot."""
_apply_service(service, SonosDevice.restore)
def _set_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.set_sleep_timer,
service.data[ATTR_SLEEP_TIME])
def _clear_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.clear_sleep_timer)
def only_if_coordinator(func):
"""Decorator for coordinator.
If used as decorator, avoid calling the decorated method if player is not
a coordinator. If not, a grouped speaker (not in coordinator role) will
throw soco.exceptions.SoCoSlaveException.
Also, partially catch exceptions like:
soco.exceptions.SoCoUPnPException: UPnP Error 701 received:
Transition not available from <player ip address>
"""
def wrapper(*args, **kwargs):
"""Decorator wrapper."""
if args[0].is_coordinator:
from soco.exceptions import SoCoUPnPException
try:
func(*args, **kwargs)
except SoCoUPnPException:
_LOGGER.error('command "%s" for Sonos device "%s" '
'not available in this mode',
func.__name__, args[0].name)
else:
_LOGGER.debug('Ignore command "%s" for Sonos device "%s" (%s)',
func.__name__, args[0].name, 'not coordinator')
return wrapper
# pylint: disable=too-many-instance-attributes, too-many-public-methods
# pylint: disable=abstract-method
class SonosDevice(MediaPlayerDevice):
"""Representation of a Sonos device."""
# pylint: disable=too-many-arguments
def __init__(self, hass, player):
"""Initialize the Sonos device."""
from soco.snapshot import Snapshot
self.hass = hass
self.volume_increment = 5
self._player = player
self._speaker_info = None
self._name = None
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self.update()
self.soco_snapshot = Snapshot(self._player)
@property
def should_poll(self):
"""Polling needed."""
return True
def update_sonos(self, now):
"""Update state, called by track_utc_time_change."""
self.update_ha_state(True)
@property
def unique_id(self):
"""Return an unique ID."""
return self._player.uid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status == 'PAUSED_PLAYBACK':
return STATE_PAUSED
if self._status == 'PLAYING':
return STATE_PLAYING
if self._status == 'STOPPED':
return STATE_IDLE
if self._status == 'OFF':
return STATE_OFF
return STATE_UNKNOWN
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._player.is_coordinator
def update(self):
"""Retrieve latest state."""
self._speaker_info = self._player.get_speaker_info()
self._name = self._speaker_info['zone_name'].replace(
' (R)', '').replace(' (L)', '')
if self.available:
self._status = self._player.get_current_transport_info().get(
'current_transport_state')
trackinfo = self._player.get_current_track_info()
if trackinfo['uri'].startswith('x-rincon:'):
# this speaker is a slave, find the coordinator
# the uri of the track is 'x-rincon:{coordinator-id}'
coordinator_id = trackinfo['uri'][9:]
coordinators = [device for device in DEVICES
if device.unique_id == coordinator_id]
self._coordinator = coordinators[0] if coordinators else None
else:
self._coordinator = None
if not self._coordinator:
mediainfo = self._player.avTransport.GetMediaInfo([
('InstanceID', 0)
])
duration = trackinfo.get('duration', '0:00')
# if the speaker is playing from the "line-in" source, getting
# track metadata can return NOT_IMPLEMENTED, which breaks the
# volume logic below
if duration == 'NOT_IMPLEMENTED':
duration = None
else:
duration = sum(60 ** x[0] * int(x[1]) for x in enumerate(
reversed(duration.split(':'))))
media_image_url = trackinfo.get('album_art', None)
media_artist = trackinfo.get('artist', None)
media_album_name = trackinfo.get('album', None)
media_title = trackinfo.get('title', None)
if media_image_url in ('', 'NOT_IMPLEMENTED', None):
# fallback to asking the speaker directly
media_image_url = \
'http://{host}:{port}/getaa?s=1&u={uri}'.format(
host=self._player.ip_address,
port=1400,
uri=urllib.parse.quote(mediainfo['CurrentURI'])
)
if media_artist in ('', 'NOT_IMPLEMENTED', None):
# if listening to a radio stream the media_artist field
# will be empty and the title field will contain the
# filename that is being streamed
current_uri_metadata = mediainfo["CurrentURIMetaData"]
if current_uri_metadata not in \
('', 'NOT_IMPLEMENTED', None):
# currently soco does not have an API for this
import soco
current_uri_metadata = soco.xml.XML.fromstring(
soco.utils.really_utf8(current_uri_metadata))
md_title = current_uri_metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
if md_title not in ('', 'NOT_IMPLEMENTED', None):
media_artist = ''
media_title = md_title
self._media_content_id = trackinfo.get('title', None)
self._media_duration = duration
self._media_image_url = media_image_url
self._media_artist = media_artist
self._media_album_name = media_album_name
self._media_title = media_title
else:
self._status = 'OFF'
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player.volume / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player.mute
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._coordinator:
return self._coordinator.media_content_id
else:
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._coordinator:
return self._coordinator.media_duration
else:
return self._media_duration
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._coordinator:
return self._coordinator.media_image_url
else:
return self._media_image_url
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_artist
else:
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_album_name
else:
return self._media_album_name
@property
def media_title(self):
"""Title of current playing media."""
if self._player.is_playing_line_in:
return SUPPORT_SOURCE_LINEIN
if self._player.is_playing_tv:
return SUPPORT_SOURCE_TV
if self._coordinator:
return self._coordinator.media_title
else:
return self._media_title
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
if not self.source_list:
# some devices do not allow source selection
return SUPPORT_SONOS ^ SUPPORT_SELECT_SOURCE
return SUPPORT_SONOS
def volume_up(self):
"""Volume up media player."""
self._player.volume += self.volume_increment
def volume_down(self):
"""Volume down media player."""
self._player.volume -= self.volume_increment
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._player.volume = str(int(volume * 100))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._player.mute = mute
def select_source(self, source):
"""Select input source."""
if source == SUPPORT_SOURCE_LINEIN:
self._player.switch_to_line_in()
elif source == SUPPORT_SOURCE_TV:
self._player.switch_to_tv()
@property
def source_list(self):
"""List of available input sources."""
model_name = self._speaker_info['model_name']
if 'PLAY:5' in model_name:
return [SUPPORT_SOURCE_LINEIN]
elif 'PLAYBAR' in model_name:
return [SUPPORT_SOURCE_LINEIN, SUPPORT_SOURCE_TV]
@property
def source(self):
"""Name of the current input source."""
if self._player.is_playing_line_in:
return SUPPORT_SOURCE_LINEIN
if self._player.is_playing_tv:
return SUPPORT_SOURCE_TV
return None
@only_if_coordinator
def turn_off(self):
"""Turn off media player."""
self._player.pause()
def media_play(self):
"""Send play command."""
if self._coordinator:
self._coordinator.media_play()
else:
self._player.play()
def media_pause(self):
"""Send pause command."""
if self._coordinator:
self._coordinator.media_pause()
else:
self._player.pause()
def media_next_track(self):
"""Send next track command."""
if self._coordinator:
self._coordinator.media_next_track()
else:
self._player.next()
def media_previous_track(self):
"""Send next track command."""
if self._coordinator:
self._coordinator.media_previous_track()
else:
self._player.previous()
def media_seek(self, position):
"""Send seek command."""
if self._coordinator:
self._coordinator.media_seek(position)
else:
self._player.seek(str(datetime.timedelta(seconds=int(position))))
def clear_playlist(self):
"""Clear players playlist."""
if self._coordinator:
self._coordinator.clear_playlist()
else:
self._player.clear_queue()
@only_if_coordinator
def turn_on(self):
"""Turn the media player on."""
self._player.play()
def play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if self._coordinator:
self._coordinator.play_media(media_type, media_id, **kwargs)
else:
if kwargs.get(ATTR_MEDIA_ENQUEUE):
from soco.exceptions import SoCoUPnPException
try:
self._player.add_uri_to_queue(media_id)
except SoCoUPnPException:
_LOGGER.error('Error parsing media uri "%s", '
"please check it's a valid media resource "
'supported by Sonos', media_id)
else:
self._player.play_uri(media_id)
def group_players(self):
"""Group all players under this coordinator."""
if self._coordinator:
self._coordinator.group_players()
else:
self._player.partymode()
@only_if_coordinator
def unjoin(self):
"""Unjoin the player from a group."""
self._player.unjoin()
@only_if_coordinator
def snapshot(self):
"""Snapshot the player."""
self.soco_snapshot.snapshot()
@only_if_coordinator
def restore(self):
"""Restore snapshot for the player."""
self.soco_snapshot.restore(True)
@only_if_coordinator
def set_sleep_timer(self, sleep_time):
"""Set the timer on the player."""
self._player.set_sleep_timer(sleep_time)
@only_if_coordinator
def clear_sleep_timer(self):
"""Clear the timer on the player."""
self._player.set_sleep_timer(None)
@property
def available(self):
"""Return True if player is reachable, False otherwise."""
try:
sock = socket.create_connection(
address=(self._player.ip_address, 1443),
timeout=3)
sock.close()
return True
except socket.error:
return False
|
betrisey/home-assistant
|
homeassistant/components/media_player/sonos.py
|
Python
|
mit
| 20,355
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from shutil import which
from unittest.mock import patch
from urllib.parse import quote
from libthumbor import CryptoURL
from preggy import expect
from tornado.testing import gen_test
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, RequestParameters, ServerParameters
from thumbor.importer import Importer
# pylint: disable=broad-except,abstract-method,attribute-defined-outside-init,line-too-long,too-many-public-methods
# pylint: disable=too-many-lines
class ImageOperationsWithAutoWebPTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which("gifsicle")
return ctx
async def get_as_webp(self, url):
return await self.async_fetch(
url, headers={"Accept": "image/webp,*/*;q=0.8"}
)
@gen_test
async def test_can_auto_convert_jpeg(self):
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_not_convert_animated_gifs_to_webp(self):
response = await self.get_as_webp("/unsafe/animated.gif")
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_should_convert_image_with_small_width_and_no_height(self):
response = await self.get_as_webp("/unsafe/0x0:1681x596/1x/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_convert_monochromatic_jpeg(self):
response = await self.get_as_webp("/unsafe/grayscale.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_convert_cmyk_jpeg(self):
response = await self.get_as_webp("/unsafe/cmyk.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_shouldnt_convert_cmyk_jpeg_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(png)/cmyk.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_png()
@gen_test
async def test_shouldnt_convert_cmyk_jpeg_if_gif(self):
response = await self.get_as_webp(
"/unsafe/filters:format(gif)/cmyk.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_shouldnt_convert_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(gif)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_shouldnt_add_vary_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(webp)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_webp()
@gen_test
async def test_should_add_vary_if_format_invalid(self):
response = await self.get_as_webp(
"/unsafe/filters:format(asdf)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_converting_return_etags(self):
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.headers).to_include("Etag")
class ImageOperationsWithAutoWebPWithResultStorageTestCase(
BaseImagingTestCase
):
def get_request(self, *args, **kwargs):
return RequestParameters(*args, **kwargs)
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = "thumbor.result_storages.file_storage"
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
ctx.request = self.get_request()
ctx.server.gifsicle_path = which("gifsicle")
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
async def get_as_webp(self, url):
return await self.async_fetch(
url, headers={"Accept": "image/webp,*/*;q=0.8"}
)
@patch("thumbor.handlers.Context")
@gen_test
async def test_can_auto_convert_jpeg_from_result_storage(
self, context_mock
): # NOQA
context_mock.return_value = self.context
crypto = CryptoURL("ACME-SEC")
url = crypto.generate(
image_url=quote("http://test.com/smart/image.jpg")
)
self.context.request = self.get_request(url=url, accepts_webp=True)
with open("./tests/fixtures/images/image.webp", "rb") as fixture:
await self.context.modules.result_storage.put(fixture.read())
response = await self.get_as_webp(url)
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@patch("thumbor.handlers.Context")
@gen_test
async def test_can_auto_convert_unsafe_jpeg_from_result_storage(
self, context_mock
):
context_mock.return_value = self.context
self.context.request = self.get_request(accepts_webp=True)
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
|
thumbor/thumbor
|
tests/handlers/test_base_handler_with_auto_webp.py
|
Python
|
mit
| 7,701
| 0.00013
|
#!/usr/bin/env python
import os
import sys
import string
import random
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
kernels = {
'aes-aes' : 'gf_alog,gf_log,gf_mulinv,rj_sbox,rj_xtime,aes_subBytes,aes_addRoundKey,aes_addRoundKey_cpy,aes_shiftRows,aes_mixColumns,aes_expandEncKey,aes256_encrypt_ecb',
'backprop-backprop':'sigmoid,update_layer,update,propagate_error_out,propagate_error_layer,update_weights,propagate_errors,comp_error,backprop',
'bfs-bulk' : 'bfs',
'bfs-queue' : 'bfs',
'kmp-kmp' : 'CPF,kmp',
'fft-strided' : 'fft',
'fft-transpose':'twiddles8,loadx8,loady8,fft1D_512',
'gemm-blocked': 'bbgemm',
'gemm-ncubed' : 'gemm',
'md-grid':'md',
'md-knn':'md_kernel',
'nw-nw' : 'needwun',
'sort-merge' : 'merge,mergesort',
'sort-radix' : 'local_scan,sum_scan,last_step_scan,init,hist,update,ss_sort',
'spmv-crs' : 'spmv',
'spmv-ellpack' : 'ellpack',
'stencil-stencil2d' : 'stencil',
'stencil-stencil3d' : 'stencil3d',
'viterbi-viterbi' : 'viterbi',
}
def main (directory, bench, source):
if not 'TRACER_HOME' in os.environ:
raise Exception('Set TRACER_HOME directory as an environment variable')
if not 'MACH_HOME' in os.environ:
raise Exception('Set MACH_HOME directory as an environment variable')
#id = id_generator()
os.chdir(directory)
obj = source + '.llvm'
opt_obj = source + '-opt.llvm'
executable = source + '-instrumented'
os.environ['WORKLOAD']=kernels[bench]
test = os.getenv('MACH_HOME')+'/common/harness.c'
test_obj = source + '_test.llvm'
source_file = source + '.c'
#for key in os.environ.keys():
# print "%30s %s" % (key,os.environ[key])
print directory
print '======================================================================'
command = 'clang -g -O1 -S -I' + os.environ['ALADDIN_HOME'] + \
' -fno-slp-vectorize -fno-vectorize -fno-unroll-loops ' + \
' -fno-inline -fno-builtin -emit-llvm -o ' + obj + ' ' + source_file
print command
os.system(command)
command = 'clang -g -O1 -S -I' + os.environ['ALADDIN_HOME'] + \
' -fno-slp-vectorize -fno-vectorize -fno-unroll-loops ' + \
' -fno-inline -fno-builtin -emit-llvm -o ' + test_obj + ' ' + test
print command
os.system(command)
command = 'opt -S -load=' + os.getenv('TRACER_HOME') + \
'/full-trace/full_trace.so -fulltrace ' + obj + ' -o ' + opt_obj
print command
os.system(command)
command = 'llvm-link -o full.llvm ' + opt_obj + ' ' + test_obj + ' ' + \
os.getenv('TRACER_HOME') + '/profile-func/trace_logger.llvm'
print command
os.system(command)
command = 'llc -O0 -disable-fp-elim -filetype=asm -o full.s full.llvm'
print command
os.system(command)
command = 'gcc -O0 -fno-inline -o ' + executable + ' full.s -lm -lz'
print command
os.system(command)
command = './' + executable + ' input.data check.data'
print command
os.system(command)
print '======================================================================'
if __name__ == '__main__':
directory = sys.argv[1]
bench = sys.argv[2]
source = sys.argv[3]
print directory, bench, source
main(directory, bench, source)
|
giosalv/526-aladdin
|
MachSuite/script/llvm_compile.py
|
Python
|
apache-2.0
| 3,257
| 0.023641
|
import Linked_List
import sys
import random
def split_list(lst, a, b):
if lst.length % 2 == 1:
first_length = (lst.length / 2) + 1
else:
first_length = lst.length / 2
list_iterator = lst.head
count = 0
while count < first_length:
a.append(list_iterator.data)
list_iterator = list_iterator.next
count += 1
while list_iterator != None:
b.append(list_iterator.data)
list_iterator = list_iterator.next
lst = Linked_List.LinkedList()
for iterator in range(0, int(sys.argv[1])):
lst.push(random.randint(1, 101))
print "\nOriginal List:"
lst.print_list()
a = Linked_List.LinkedList()
b = Linked_List.LinkedList()
split_list(lst, a, b)
print "\nSplitted List A:"
a.print_list()
print "\nSplitted List B:"
b.print_list()
# Performance
# ------------
#
# * Speed
# The algorithm traverses the original list once and constructs
# both the list. The list construction operation (append) can be implemented with
# O(1) complexity. In a nutshell, the time complexity of this algorithm is
# O(N).
#
# Ideal time complexity for this algorithm?
# O(1). It's all about changing the pointers. However, the limiting factor is
# traversing the list, which is a linear operation.
#
# * Memory
# 2N. Where N is the memory required to store the original list.
|
afaquejam/Linked-List-Problems
|
Others/FrontBackSplit.py
|
Python
|
mit
| 1,330
| 0.003008
|
๏ปฟimport re
from typing import Callable, Dict, List # noqa: F401
FormatText = Callable[[str], str]
ascii: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
r'[\]^_`'
'abcdefghijklmnopqrstuvwxyz'
'{|}~')
upper: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
r'[\]^_`'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'{|}~')
lower: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'abcdefghijklmnopqrstuvwxyz'
r'[\]^_`'
'abcdefghijklmnopqrstuvwxyz'
'{|}~')
full: str = ('''ใ๏ผ๏ผ๏ผ๏ผ๏ผ
๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผใผ๏ผ๏ผ'''
'๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ'
'๏ผ๏ผใ๏ผใ๏ผ๏ผ '
'๏ผก๏ผข๏ผฃ๏ผค๏ผฅ๏ผฆ๏ผง๏ผจ๏ผฉ๏ผช๏ผซ๏ผฌ๏ผญ๏ผฎ๏ผฏ๏ผฐ๏ผฑ๏ผฒ๏ผณ๏ผด๏ผต๏ผถ๏ผท๏ผธ๏ผน๏ผบ'
'๏ผป๏ผผ๏ผฝ๏ผพ๏ผฟ๏ฝ'
'๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ
๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ'
'๏ฝ๏ฝ๏ฝ๏ฝ')
parenthesized: str = (''' !"#$%&'()*+,-./'''
'0โดโตโถโทโธโนโบโปโผ'
':;<=>?@'
'โโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโต'
r'[\]^_`'
'โโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโต'
'{|}~')
circled: str = (''' !"#$%&'()*+,-./'''
'โชโ โกโขโฃโคโฅโฆโงโจ'
':;<=>?@'
'โถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโ'
'[\\]^_`'
'โโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉ'
'{|}~')
smallcaps: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'แดสแดแด
แด๊ฐษขสษชแดแดสแดษดแดแดฉQสsแดแดแด แดกxYแดข'
r'[\]^_`'
'แดสแดแด
แด๊ฐษขสษชแดแดสแดษดแดแดฉqสsแดแดแด แดกxyแดข'
'{|}~')
upsidedown: str = (''' ยก"#$%โ
,()*+โ-./'''
'0123456789'
':;<=>ยฟ@'
'ษqษpวษฦษฅฤฑษพสืษฏuodbษนsสnสสxสz'
r'[\]^_`'
'ษqษpวษฦษฅฤฑษพสืษฏuodbษนsสnสสxสz'
'{|}~')
serifBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ'
'{|}~')
serifItalic: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐โ๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง'
'{|}~')
serifBoldItalic: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐'
r'[\]^_`'
'๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
sanSerif: str = (''' !"#$%&'()*+,-./'''
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
':;<=>?@'
'๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น'
r'[\]^_`'
'๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
sanSerifBold: str = (''' !"#$%&'()*+,-./'''
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ'
r'[\]^_`'
'๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐'
'{|}~')
sanSerifItalic: str = (''' !"#$%&'()*+,-./'''
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก'
r'[\]^_`'
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป'
'{|}~')
sanSerifBoldItalic: str = (''' !"#$%&'()*+,-./'''
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
':;<=>?@'
'๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ'
'{|}~')
script: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐โฌ๐๐โฐโฑ๐ขโโ๐ฅ๐ฆโโณ๐ฉ๐ช๐ซ๐ฌโ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
r'[\]^_`'
'๐ถ๐ท๐ธ๐นโฏ๐ปโ๐ฝ๐พ๐ฟ๐๐๐๐โด๐
๐๐๐๐๐๐๐๐๐๐'
'{|}~')
scriptBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ'
r'[\]^_`'
'๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐'
'{|}~')
fraktur: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐๐
โญ๐๐๐๐โโ๐๐๐๐๐๐๐๐โ๐๐๐๐๐๐๐โจ'
r'[\]^_`'
'๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท'
'{|}~')
frakturBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
monospace: str = (''' !"#$%&'()*+,-./'''
'๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ'
':;<=>?@'
'๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ'
'{|}~')
doubleStruck: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐ ๐ก'
':;<=>?@'
'๐ธ๐นโ๐ป๐ผ๐ฝ๐พโ๐๐๐๐๐โ๐โโโ๐๐๐๐๐๐๐โค'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
'{|}~')
def _createAsciiTo(name: str,
toTable: str) -> FormatText:
table = str.maketrans(ascii, toTable)
def asciiTo(text: str) -> str:
return text.translate(table)
asciiTo.__name__ = name
return asciiTo
to_upper: FormatText = _createAsciiTo('to_upper', upper)
to_lower: FormatText = _createAsciiTo('to_lower', lower)
to_full_width: FormatText = _createAsciiTo('to_full_width', full)
to_parenthesized: FormatText = _createAsciiTo(
'to_parenthesized', parenthesized)
to_circled: FormatText = _createAsciiTo('to_circled', circled)
to_small_caps: FormatText = _createAsciiTo('to_small_caps', smallcaps)
_to_upside_down_reversed: FormatText = _createAsciiTo(
'to_upside_down', upsidedown)
def to_upside_down(text: str) -> str:
return _to_upside_down_reversed(text)[::-1]
to_serif_bold: FormatText = _createAsciiTo('to_serif_bold', serifBold)
to_serif_italic: FormatText = _createAsciiTo('to_serif_italic', serifItalic)
to_serif_bold_italic: FormatText = _createAsciiTo(
'to_serif_bold_italic', serifBoldItalic)
to_sanserif: FormatText = _createAsciiTo('to_sanserif', sanSerif)
to_sanserif_bold: FormatText = _createAsciiTo('to_sanserif_bold', sanSerifBold)
to_sanserif_italic: FormatText = _createAsciiTo(
'to_sanserif_italic', sanSerifItalic)
to_sanserif_bold_italic: FormatText = _createAsciiTo(
'to_sanserif_bold_italic', sanSerifBoldItalic)
to_script: FormatText = _createAsciiTo('to_script', script)
to_script_bold: FormatText = _createAsciiTo('to_script_bold', scriptBold)
to_fraktur: FormatText = _createAsciiTo('to_fraktur', fraktur)
to_fraktur_bold: FormatText = _createAsciiTo('to_fraktur_bold', frakturBold)
to_monospace: FormatText = _createAsciiTo('to_monospace', monospace)
to_double_struck: FormatText = _createAsciiTo('to_double_struck', doubleStruck)
def to_ascii(text: str) -> str:
fromTable: List[str]
fromTable = [full, parenthesized, circled, smallcaps, upsidedown,
serifBold, serifItalic, serifBoldItalic, sanSerif,
sanSerifBold, sanSerifItalic, sanSerifBoldItalic, script,
scriptBold, fraktur, frakturBold, monospace, doubleStruck,
ascii]
toTable: Dict[int, int] = {}
for table in fromTable:
toTable.update(str.maketrans(table, ascii))
return text.translate(toTable)
def format(string: str,
format_: str) -> str:
format_ = format_.lower()
strTable: Dict[str, FormatText] = {
'ascii': to_ascii,
'upper': to_upper,
'lower': to_lower,
'full': to_full_width,
'parenthesized': to_parenthesized,
'circled': to_circled,
'smallcaps': to_small_caps,
'upsidedown': to_upside_down,
'sanserif': to_sanserif,
'script': to_script,
'cursive': to_script,
'fraktur': to_fraktur,
'monospace': to_monospace,
'doublestruck': to_double_struck,
}
reTable: Dict[str, FormatText] = {
r'serif-?bold': to_serif_bold,
r'serif-?italic': to_serif_italic,
r'serif-?(bold-?italic|italic-?bold)': to_serif_bold_italic,
r'(sanserif-?)?bold': to_sanserif_bold,
r'(sanserif-?)?italic': to_sanserif_italic,
r'(sanserif-?)?(bold-?italic|italic-?bold)': to_sanserif_bold_italic,
r'(script|cursive)-?bold': to_script_bold,
r'fraktur-?bold': to_fraktur_bold,
}
if format_ in strTable:
return strTable[format_](string)
pattern: str
for pattern in reTable:
if re.fullmatch(pattern, format_):
return reTable[pattern](string)
return string
|
MeGotsThis/BotGotsThis
|
lib/helper/textformat.py
|
Python
|
gpl-3.0
| 11,881
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.