repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mrpau/kolibri
|
kolibri/core/tasks/queue.py
|
Python
|
mit
| 3,851
| 0.002337
|
from kolibri.core.tasks.job import Job
from kolibri.core.tasks.job import State
from kolibri.core.tasks.storage import Storage
DEFAULT_QUEUE = "ICEQUBE_DEFAULT_QUEUE"
class Queue(object):
def __init__(self, queue=DEFAULT_QUEUE, connection=None):
if connection is None:
raise ValueError("Connection must be defined")
self.name = queue
self.storage = Storage(connection)
def __len__(self):
return self.storage.count_all_jobs(self.name)
@property
def job_ids(self):
return [job.job_id for job in self.storage.get_all_jobs(self.name)]
@property
def jobs(self):
"""
Return all the jobs scheduled, queued, running, failed or completed.
Returns: A list of all jobs.
"""
return self.storage.get_all_jobs(self.name)
def enqueue(self, func, *args, **kwargs):
"""
Enqueues a function func for execution.
One special parameter is track_progress. If passed in and not None, the func will be passed in a
keyword parameter called update_progress:
def update_progress(progress, total_progress, stage=""):
The running function
|
can call the update_progress functio
|
n to notify interested parties of the function's
current progress.
Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special
"check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job
to be cancelled.
The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored
within the job and can be retrieved when the job status is queried.
All other parameters are directly passed to the function when it starts running.
:type func: callable or str
:param func: A callable object that will be scheduled for running.
:return: a string representing the job_id.
"""
# if the func is already a job object, just schedule that directly.
if isinstance(func, Job):
job = func
# else, turn it into a job first.
else:
job = Job(func, *args, **kwargs)
job.state = State.QUEUED
job_id = self.storage.enqueue_job(job, self.name)
return job_id
def cancel(self, job_id):
"""
Mark a job as canceling, and let the worker pick this up to initiate
the cancel of the job.
:param job_id: the job_id of the Job to cancel.
"""
self.storage.mark_job_as_canceling(job_id)
def fetch_job(self, job_id):
"""
Returns a Job object corresponding to the job_id. From there, you can query for the following attributes:
- function string to run
- its current state (see Job.State for the list of states)
- progress (returning an int), total_progress (returning an int), and percentage_progress
(derived from running job.progress/total_progress)
- the job.exception and job.traceback, if the job's function returned an error
:param job_id: the job_id to get the Job object for
:return: the Job object corresponding to the job_id
"""
return self.storage.get_job(job_id)
def empty(self):
"""
Clear all jobs.
"""
self.storage.clear(force=True, queue=self.name)
def clear(self):
"""
Clear all succeeded, failed, or cancelled jobs.
"""
self.storage.clear(force=False, queue=self.name)
def clear_job(self, job_id):
"""
Clear a job if it has succeeded, failed, or been cancelled.
:type job_id: str
:param job_id: id of job to clear.
"""
self.storage.clear(job_id=job_id, force=False)
|
PaleNeutron/EpubBuilder
|
my_mainwindow.py
|
Python
|
apache-2.0
| 3,933
| 0.002628
|
__author__ = 'PaleNeutron'
import os
from urllib.parse import urlparse, unquote
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
class MyMainWindow(QtWidgets.QMainWindow):
file_loaded = QtCore.pyqtSignal(str)
image_loaded = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self):
super(MyMainWindow, self).__init__()
self.windowList = []
self.text_path = ''
self.epub_path = ''
self.win_file_mime = "application/x-qt-windows-mime;value=\"FileNameW\""
self.text_uri_mime = "text/uri-list"
self.create_content_browser()
def create_content_browser(self):
self.content_browser = QtWidgets.QTextBrowser()
self.content_browser.setFontPointSize(12)
self.content_browser.setGeometry(QtCore.QRect(300, 150, 600, 400))
self.windowList.append(self.content_browser)
def dragEnterEvent(self, ev):
ev.accept()
def load_file(self, file_path):
self.file_loaded.emit(file_path)
#
|
def image_loaded(self, file_path):
# with open(file_path, "b") as f:
# r = f.read()
# with open("images/cover.jpg", "wb") as f:
#
|
f.write(r)
# def epub_loaded(self, file_path):
# self.epub_path = file_path
# self.file_loaded.emit(False, )
def uri_to_path(self, uri):
if sys.platform == "win32":
path = unquote(urlparse(uri).path)[1:]
elif sys.platform == "linux":
path = unquote(urlparse(uri).path)
else:
path = None
return path
def dropEvent(self, ev):
# formats = ev.mimeData().formats()
# for i in formats:
# print(i)
# if ev.mimeData().hasFormat(self.win_file_mime):
# ev.accept()
# file_path = bytes(ev.mimeData().data(self.win_file_mime).data())[:-2].decode('utf16')
# if file_path.endswith(".txt"):
# self.text_loaded(file_path)
# elif file_path.endswith(".jpg") or file_path.endswith(".jpeg") or file_path.endswith(".png"):
# self.image_loaded(file_path)
# elif file_path.endswith(".epub"):
# self.epub_loaded(file_path)
# print(file_path)
if ev.mimeData().hasImage():
self.image_loaded.emit(ev.mimeData().imageData())
if ev.mimeData().hasFormat(self.text_uri_mime):
uri = ev.mimeData().data(self.text_uri_mime).data().decode("utf8").strip()
file_path = self.uri_to_path(uri)
if uri.lower().endswith(".txt") or uri.lower().endswith(".epub"):
self.load_file(file_path)
elif uri.lower().endswith(".zip"):
#ๆๅผไธไธชzipๆๆกฃ๏ผ่ทๅๅ
ถไธญ็txt
import zipfile
zf = zipfile.ZipFile(file_path)
for filename in zf.namelist():
#ๅฆๆๆๆกฃไธญtxtๆไปถๅคงไบ10kbๅ่งฃๅๅฐๅฝๅๆไปถๅคน
if filename.lower().endswith(".txt") and zf.getinfo(filename).file_size > 10 * 1024:
zf.extract(filename)
# ๅ้ๆไปถไฝ็ฝฎไฟกๅท
self.load_file(os.curdir + os.sep + filename)
break
elif uri.lower().endswith(".rar"):
import rarfile
rf = rarfile.RarFile(file_path)
for filename in rf.namelist():
# ๅฆๆๆๆกฃไธญtxtๆไปถๅคงไบ10kbๅ่งฃๅๅฐๅฝๅๆไปถๅคน
if filename.lower().endswith(".txt") and rf.getinfo(filename).file_size > 10 * 1024:
rf.extract(filename)
#ๅ้ๆไปถไฝ็ฝฎไฟกๅท
self.load_file(os.curdir + os.sep + filename)
break
else:
ev.ignore()
|
andreaso/ansible
|
lib/ansible/modules/system/seboolean.py
|
Python
|
gpl-3.0
| 7,200
| 0.003194
|
#!/usr/bin/python
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['s
|
tableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans.
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the bool
|
ean to configure
required: true
default: null
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot
required: false
default: no
choices: [ "yes", "no" ]
state:
description:
- Desired boolean value
required: true
default: null
choices: [ 'yes', 'no' ]
notes:
- Not tested on any debian based system
requirements: [ libselinux-python, libsemanage-python ]
author: "Stephen Fromm (@sfromm)"
'''
EXAMPLES = '''
# Set (httpd_can_network_connect) flag on and keep it persistent across reboots
- seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import semanage
HAVE_SEMANAGE=True
except ImportError:
HAVE_SEMANAGE=False
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
if to_bytes(name) in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
handle = semanage.semanage_handle_create()
if handle is None:
module.fail_json(msg="Failed to create semanage library handle")
try:
managed = semanage.semanage_is_managed(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
module.fail_json(msg="Failed to connect to semanage")
if semanage.semanage_begin_transaction(handle) < 0:
module.fail_json(msg="Failed to begin semanage transaction")
rc, sebool = semanage.semanage_bool_create(handle)
if rc < 0:
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, sebool, name) < 0:
module.fail_json(msg="Failed to set seboolean name with semanage")
semanage.semanage_bool_set_value(sebool, value)
rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool)
if rc < 0:
module.fail_json(msg="Failed to extract boolean key with semanage")
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(sebool)
semanage.semanage_set_reload(handle, 0)
if semanage.semanage_commit(handle) < 0:
module.fail_json(msg="Failed to commit changes to semanage")
semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e)))
return True
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True),
persistent=dict(default='no', type='bool'),
state=dict(required=True, type='bool')
),
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = {}
result['name'] = name
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
cur_value = get_boolean_value(module, name)
if cur_value == state:
result['state'] = cur_value
result['changed'] = False
module.exit_json(**result)
if module.check_mode:
module.exit_json(changed=True)
if persistent:
r = semanage_boolean_value(module, name, state)
else:
r = set_boolean_value(module, name, state)
result['changed'] = r
if not r:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
try:
selinux.security_commit_booleans()
except:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils._text import to_bytes
if __name__ == '__main__':
main()
|
hackersql/sq1map
|
thirdparty/bottle/bottle.py
|
Python
|
gpl-3.0
| 152,507
| 0.001489
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# ๅฝไปค่กๆฅๅฃ###################################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to main() is at the end of the file.
def _cli_parse(args):
from optparse import OptionParser
parser = OptionParser(
usage="usage: %prog [options] package.module:app")
opt = parser.add_option
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opts, args = parser.parse_args(args[1:])
return opts, args, parser
def _cli_patch(args):
opts, _, _ = _cli_parse(args)
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ###########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(par
|
am.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
from inspect import getargspec
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json impo
|
rt dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser, Error as ConfigParserError
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser, \
Error as ConfigParserError
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it):
return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self):
pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __ge
|
ellisdg/3DUnetCNN
|
unet3d/utils/normalize.py
|
Python
|
mit
| 7,176
| 0.002508
|
import numpy as np
def zero_mean_normalize_image_data(data, axis=(0, 1, 2)):
return np.divide(data - data.mean(axis=axis), data.std(axis=axis))
def foreground_zero_mean_normalize_image_data(data, channel_dim=4, background_value=0, tolerance=1e-5):
data = np.copy(data)
if data.ndim == channel_dim or data.shape[channel_dim] == 1:
# only 1 channel, so the std and mean calculations are straight forward
foreground_mask = np.abs(data) > (background_value + tolerance)
foreground = data[foreground_mask]
mean = foreground.mean()
std = foreground.std()
data[foreground_mask] = np.divide(foreground - mean, std)
return data
else:
# std and mean need to be calculated for each channel in the 4th dimension
for channel in range(data.shape[channel_dim]):
channel_data = data[..., channel]
channel_mask = np.abs(channel_data) > (background_value + tolerance)
channel_foreground = channel_data[channel_mask]
channel_mean = channel_foreground.mean()
channel_std = channel_foreground.std()
channel_data[channel_mask] = np.divide(channel_foreground - channel_mean, channel_std)
data[..., channel] = channel_data
return data
def zero_floor_normalize_image_data(data, axis=(0, 1, 2), floor_percentile=1, floor=0):
floor_threshold = np.percentile(data, floor_percentile, axis=axis)
if data.ndim != len(axis):
floor_threshold_shape = np.asarray(floor_threshold.shape * data.ndim)
floor_threshold_shape[np.asarray(axis)] = 1
floor_threshold = floor_threshold.reshape(floor_threshold_shape)
background = data <= floor_threshold
data = np.ma.masked_array(data - floor_threshold, mask=background)
std = data.std(axis=axis)
if data.ndim != len(axis):
std = std.reshape(floor_threshold_shape)
return np.divide(data, std).filled(floor)
def zero_one_window(data, axis=(0, 1, 2), ceiling_percentile=99, floor_percentile=1, floor=0, ceiling=1,
channels_axis=None):
"""
:param data: Numpy ndarray.
:param axis:
:param ceiling_percentile: Percentile value of the foreground to set to the ceiling.
:param floor_percentile: Percentile value of the image to set to the floor.
:param floor: New minimum value.
:param ceiling: New maximum value.
:param channels_axis:
:return:
"""
data = np.copy(data)
if len(axis) != data.ndim:
floor_threshold = np.percentile(data, floor_percentile, axis=axis)
if channels_axis is None:
channels_axis = find_channel_axis(data.ndim, axis=axis)
data = np.moveaxis(data, channels_axis, 0)
for channel in range(data.shape[0]):
channel_data = data[channel]
# find the background
bg_mask = channel_data <= floor_threshold[channel]
# use background to find foreground
fg = channel_data[bg_mask == False]
# find threshold based on foreground percentile
ceiling_threshold = np.percentile(fg, ceiling_percentile)
# normalize the data for this channel
data[channel] = window_data(channel_data, floor_threshold=floor_threshold[channel],
ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling)
data = np.moveaxis(data, 0, channels_axis)
else:
floor_threshold = np.percentile(data, floor_percentile)
fg_mask = data > floor_threshold
fg = data[fg_mask]
ceiling_threshold = np.percentile(fg, ceiling_percentile)
data = window_data(data, floor_threshold=floor_threshold, ceiling_threshold=ceiling_threshold, floor=floor,
ceiling=ceiling)
return data
def find_channel_axis(ndim, axis):
for i in range(ndim):
if i not in axis and (i - ndim) not in axis:
# I don't understand the second part of this if statement
# answer: it is checking ot make sure that the axis is not indexed in reverse (i.e. axis 3 might be
# indexed as -1)
channels_axis = i
return channels_axis
def static_windows(data, windows, floor=0, ceiling=1):
"""
Normalizes the data according to a set of predefined windows. This is helpful for CT normalization where the
units are static and radiologists often have a set of windowing parameters that the use that allow them to look at
different features in the image.
:param data: 3D numpy array.
:param windows:
:param floor: defaults to 0.
:param ceiling: defaults to 1.
:return: Array with data windows listed in the final dimension
"""
data = np.squeeze(data)
normalized_data = np.ones(data.shape + (len(windows),)) * floor
for i, (l, w) in enumerate(windows):
normalized_data[..., i] = radiology_style_windowing(data, l, w, floor=floor, ceiling=ceiling)
return normalized_data
def radiology_style_windowing(data, l, w, floor=0, ceiling=1):
upper = l + w/2
lower = l - w/2
return window_data(data, floor_threshold=lower, ceiling_threshold=upper, floor=floor, ceiling=ceiling)
def window_data(data, floor_threshold, ceiling_threshold, floor, ceiling):
data = (data - floor_threshold) / (ceiling_threshold - floor_threshold)
# set the data below the floor to equal the floor
data[data < floor] = floor
# set the data above the ceiling to equal the ceiling
data[data > ceiling] = ceiling
return data
def hist_match(source, template):
"""
Source: https://stackoverflow.com/a/33047048
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
#
|
get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantile
|
s /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
|
SickGear/SickGear
|
lib/enzyme/mpeg.py
|
Python
|
gpl-3.0
| 31,404
| 0.00035
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import os
import struct
import logging
import stat
from .exceptions import ParseError
from . import core
from six import byte2int, indexbytes
# get logging object
log = logging.getLogger(__name__)
# #------------------------------------------------------------------------
# # START_CODE
# #
# # Start Codes, with 'slice' occupying 0x01..0xAF
# #------------------------------------------------------------------------
START_CODE = {
0x00: 'picture_start_code',
0xB0: 'reserved',
0xB1: 'reserved',
|
0xB2: 'user_data_start_code',
0xB3: 'sequence_header_code',
0xB4: 'sequence_error_code',
0xB5: 'extension_start_code',
0xB6: 'reserved',
0xB7: 'sequence end',
0xB8: 'group of pictures',
}
for i in range(0x01, 0xAF):
START_CODE[i] = 'slice_start_code'
# #------------------------------------------------------------------------
# # START CODES
# #---------------------------------
|
---------------------------------------
PICTURE = 0x00
USERDATA = 0xB2
SEQ_HEAD = 0xB3
SEQ_ERR = 0xB4
EXT_START = 0xB5
SEQ_END = 0xB7
GOP = 0xB8
SEQ_START_CODE = 0xB3
PACK_PKT = 0xBA
SYS_PKT = 0xBB
PADDING_PKT = 0xBE
AUDIO_PKT = 0xC0
VIDEO_PKT = 0xE0
PRIVATE_STREAM1 = 0xBD
PRIVATE_STREAM2 = 0xBf
TS_PACKET_LENGTH = 188
TS_SYNC = 0x47
# #------------------------------------------------------------------------
# # FRAME_RATE
# #
# # A lookup table of all the standard frame rates. Some rates adhere to
# # a particular profile that ensures compatibility with VLSI capabilities
# # of the early to mid 1990s.
# #
# # CPB
# # Constrained Parameters Bitstreams, an MPEG-1 set of sampling and
# # bitstream parameters designed to normalize decoder computational
# # complexity, buffer size, and memory bandwidth while still addressing
# # the widest possible range of applications.
# #
# # Main Level
# # MPEG-2 Video Main Profile and Main Level is analogous to MPEG-1's
# # CPB, with sampling limits at CCIR 601 parameters (720x480x30 Hz or
# # 720x576x24 Hz).
# #
# #------------------------------------------------------------------------
FRAME_RATE = [
0,
24000.0 / 1001, # # 3-2 pulldown NTSC (CPB/Main Level)
24, # # Film (CPB/Main Level)
25, # # PAL/SECAM or 625/60 video
30000.0 / 1001, # # NTSC (CPB/Main Level)
30, # # drop-frame NTSC or component 525/60 (CPB/Main Level)
50, # # double-rate PAL
60000.0 / 1001, # # double-rate NTSC
60, # # double-rate, drop-frame NTSC/component 525/60 video
]
# #------------------------------------------------------------------------
# # ASPECT_RATIO -- INCOMPLETE?
# #
# # This lookup table maps the header aspect ratio index to a float value.
# # These are just the defined ratios for CPB I believe. As I understand
# # it, a stream that doesn't adhere to one of these aspect ratios is
# # technically considered non-compliant.
# #------------------------------------------------------------------------
ASPECT_RATIO = (None, # Forbidden
1.0, # 1/1 (VGA)
4.0 / 3, # 4/3 (TV)
16.0 / 9, # 16/9 (Widescreen)
2.21 # (Cinema)
)
class MPEG(core.AVContainer):
"""
Parser for various MPEG files. This includes MPEG-1 and MPEG-2
program streams, elementary streams and transport streams. The
reported length differs from the length reported by most video
players but the provides length here is correct. An MPEG file has
no additional metadata like title, etc; only codecs, length and
resolution is reported back.
"""
def __init__(self, file):
core.AVContainer.__init__(self)
self.sequence_header_offset = 0
self.mpeg_version = 2
self.get_time = None
self.audio = []
self.video = []
self.start = None
self.__seek_size__ = None
self.__sample_size__ = None
self.__search__ = None
self.filename = None
self.length = None
self.audio_ok = None
# detect TS (fast scan)
if not self.isTS(file):
# detect system mpeg (many infos)
if not self.isMPEG(file):
# detect PES
if not self.isPES(file):
# Maybe it's MPEG-ES
if self.isES(file):
# If isES() succeeds, we needn't do anything further.
return
if file.name.lower().endswith('mpeg') or \
file.name.lower().endswith('mpg'):
# This has to be an mpeg file. It could be a bad
# recording from an ivtv based hardware encoder with
# same bytes missing at the beginning.
# Do some more digging...
if not self.isMPEG(file, force=True) or \
not self.video or not self.audio:
# does not look like an mpeg at all
raise ParseError()
else:
# no mpeg at all
raise ParseError()
self.mime = 'video/mpeg'
if not self.video:
self.video.append(core.VideoStream())
if self.sequence_header_offset <= 0:
return
self.progressive(file)
for vi in self.video:
vi.width, vi.height = self.dxy(file)
vi.fps, vi.aspect = self.framerate_aspect(file)
vi.bitrate = self.bitrate(file)
if self.length:
vi.length = self.length
if not self.type:
self.type = 'MPEG Video'
# set fourcc codec for video and audio
vc, ac = 'MP2V', 'MP2A'
if self.mpeg_version == 1:
vc, ac = 'MPEG', 0x0050
for v in self.video:
v.codec = vc
for a in self.audio:
if not a.codec:
a.codec = ac
def dxy(self, file):
"""
get width and height of the video
"""
file.seek(self.sequence_header_offset + 4, 0)
v = file.read(4)
x = struct.unpack('>H', v[:2])[0] >> 4
y = struct.unpack('>H', v[1:3])[0] & 0x0FFF
return x, y
def framerate_aspect(self, file):
"""
read framerate and aspect ratio
"""
file.seek(self.sequence_header_offset + 7, 0)
v = struct.unpack('>B', file.read(1))[0]
try:
fps = FRAME_RATE[v & 0xf]
except IndexError:
fps = None
if v >> 4 < len(ASPECT_RATIO):
aspect = ASPECT_RATIO[v >> 4]
else:
aspect = None
return fps, aspect
def progressive(self, file):
"""
Try to find out with brute force if the mpeg is interlaced or not.
Search for the Sequence_Extension in the extension header (01B5)
"""
file.seek(0)
buffer = ''
count = 0
while 1:
if len(buffer) < 1000:
count += 1
if count > 1000:
break
buffer += file.read(1024)
if len(buffer) < 1000:
break
pos = buffer.find('\x00\x00\x01\xb5')
if pos == -1 o
|
tartakynov/enso
|
enso/config.py
|
Python
|
bsd-3-clause
| 3,462
| 0.000578
|
# Configuration settings for Enso. Eventually this will take
# localization into account too (or we can make a separate module for
# such strings).
# The keys to start, exit, and cancel the quasimode.
# Their values are strings referring to the names of constants defined
# in the os-specific input module in use.
QUASIMODE_START_KEY = "KEYCODE_RCONTROL"
QUAS
|
IMODE_END_KEY = "KEYCODE_RETURN"
QUASIMODE_CANCEL_KEY1 = "KEYCODE_ESCAPE"
QUASIMODE_CANCEL_KEY2 = "KEYCODE_RCONTROL"
# Whether the Quasimode is actually modal ("sticky").
IS_QUASIMODE_MODAL = True
# Amount of time, in seconds (float), to wait from the time
# that the quasimode begins drawing to the time that the
# suggestion list begins to be displayed. Setting this to a
# value greater than 0 will effectively create a
# "spring-loaded suggestion list" beh
|
avior.
QUASIMODE_SUGGESTION_DELAY = 0.2
# The maximum number of suggestions to display in the quasimode.
QUASIMODE_MAX_SUGGESTIONS = 6
# The minimum number of characters the user must type before the
# auto-completion mechanism engages.
QUASIMODE_MIN_AUTOCOMPLETE_CHARS = 2
# The message displayed when the user types some text that is not a command.
BAD_COMMAND_MSG = "<p><command>%s</command> is not a command.</p>"\
"%s"
# Minimum number of characters that should have been typed into the
# quasimode for a bad command message to be shown.
BAD_COMMAND_MSG_MIN_CHARS = 2
# The captions for the above message, indicating commands that are related
# to the command the user typed.
ONE_SUGG_CAPTION = "<caption>Did you mean <command>%s</command>?</caption>"
# The string that is displayed in the quasimode window when the user
# first enters the quasimode.
QUASIMODE_DEFAULT_HELP = u"Welcome to Enso! Enter a command, " \
u"or type \u201chelp\u201d for assistance."
# The string displayed when the user has typed some characters but there
# is no matching command.
QUASIMODE_NO_COMMAND_HELP = "There is no matching command. "\
"Use backspace to delete characters."
# Message XML for the Splash message shown when Enso first loads.
OPENING_MSG_XML = "<p>Welcome to <command>Enso</command>!</p>" + \
"<caption>Copyright © 2008 Humanized, Inc.</caption>"
# Message XML displayed when the mouse hovers over a mini message.
MINI_MSG_HELP_XML = "<p>The <command>hide mini messages</command>" \
" and <command>put</command> commands control" \
" these mini-messages.</p>"
ABOUT_BOX_XML = u"<p><command>Enso</command> Community Edition</p>" \
"<caption> </caption>" \
"<p>Copyright © 2008 <command>Humanized, Inc.</command></p>" \
"<p>Copyright © 2008-2009 <command>Enso Community</command></p>" \
"<p>Version 1.0</p>"
# List of default platforms supported by Enso; platforms are specific
# types of providers that provide a suite of platform-specific
# functionality.
DEFAULT_PLATFORMS = ["enso.platform.win32"]
# List of modules/packages that support the provider interface to
# provide required platform-specific functionality to Enso.
PROVIDERS = []
PROVIDERS.extend(DEFAULT_PLATFORMS)
# List of modules/packages that support the plugin interface to
# extend Enso. The plugins are loaded in the order that they
# are specified in this list.
PLUGINS = ["enso.contrib.scriptotron",
"enso.contrib.help",
"enso.contrib.google",
"enso.contrib.evaluate"]
FONT_NAME = {"normal" : "Gentium (Humanized)", "italic" : "Gentium Italic"}
|
ricorx7/rti_python
|
ADCP/Predictor/Range.py
|
Python
|
bsd-3-clause
| 30,092
| 0.007344
|
import math
import json
import os
import pytest
import rti_python.ADCP.AdcpCommands
def calculate_predicted_range(**kwargs):
"""
:param SystemFrequency=: System frequency for this configuration.
:param CWPON=: Flag if Water Profile is turned on.
:param CWPBL=: WP Blank in meters.
:param CWPBS=: WP bin size in meters.
:param CWPBN=: Number of bins.
:param CWPBB_LagLength=: WP lag length in meters.
:param CWPBB=: WP broadband or narrowband.
:param CWPP=: Number of pings to average.
:param CWPTBP=: Time between each ping in the average.
:param CBTON=: Is Bottom Track turned on.
:param CBTBB=: BT broadband or narrowband.
:param BeamAngle=: Beam angle in degrees. Default 20 degrees.
:param BeamDiameter=: The beam diameter in meters.
:param CyclesPerElement=: Cycles per element.
:param Salinity=: Salinity in ppt.
:param Temperature=: Temperature in C.
:param XdcrDepth=: Tranducer Depth in meter.
:return: BT Range, WP Range, Range First Bin, Configured Ranges
"""
# Get the configuration from the json file
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error opening JSON file Range", e)
return (0.0, 0.0, 0.0, 0.0)
return _calculate_predicted_range(kwargs.pop('CWPON', config['DEFAULT']['CWPON']),
kwargs.pop('CWPBB', config['DEFAULT']['CWPBB']),
kwargs.pop('CWPBS', config['DEFAULT']['CWPBS']),
kwargs.pop('CWPBN', config['DEFAULT']['CWPBN']),
kwargs.pop('CWPBL', config['DEFAULT']['CWPBL']),
kwargs.pop('CBTON', config['DEFAULT']['CBTON']),
kwargs.pop('CBTBB', config['DEFAULT']['CBTBB']),
kwargs.pop('SystemFrequency', config['DEFAULT']['SystemFrequency']),
kwargs.pop('BeamDiameter', config["BeamDiameter"]),
kwargs.pop('CyclesPerElement', config["CyclesPerElement"]),
kwargs.pop('BeamAngle', config["BeamAngle"]),
kwargs.pop('SpeedOfSound', config["SpeedOfSound"]),
kwargs.pop('CWPBB_LagLength', config["DEFAULT"]["CWPBB_LagLength"]),
kwargs.pop('BroadbandPower', config["BroadbandPower"]),
kwargs.pop('Salinity', config["Salinity"]),
kwargs.pop('Temperature', config["Temperature"]),
kwargs.pop('XdcrDepth', config["XdcrDepth"]))
def _calculate_predicted_range(_CWPON_, _CWPBB_TransmitPulseType_, _CWPBS_, _CWPBN_, _CWPBL_,
_CBTON_, _CBTBB_TransmitPulseType_,
_SystemFrequency_, _BeamDiameter_, _CyclesPerElement_,
_BeamAngle_, _SpeedOfSound_, _CWPBB_LagLength_, _BroadbandPower_,
_Salinity_, _Temperature_, _XdcrDepth_):
"""
Get the predicted ranges for the given setup. This will use the parameter given to calculate
the bottom track predicted range, the water profile predicted range, range to the first bin and
the configured range. All results are in meters.
All values with underscores before and after the name are given variables by the user. All caps
variables are given by the JSON configuration. All other variables are calculated.
:param _CWPON_: Flag if Water Profile is turned on.
:param _CWPBB_TransmitPulseType_: WP broadband or narrowband.
:param _CWPBB_LagLength_: WP lag length in meters.
:param _CWPBS_: Bin size in meters.
:param _CWPBN_: Number of bins.
:param _CWPBL_: Blank distance in meters.
:param _CBTON_: Flag if Bottom Track is turned on.
:param _CBTBB_TransmitPulseType_: BT broadband or narrowband.
:param _SystemFrequency_: System frequency in hz.
:param _BeamDiameter_: Beam diameter in meters.
:param _CyclesPerElement_: Cycles per element.
:param _BeamAngle_: Beam angle in degrees.
:param _SpeedOfSound_: Speed of sound in m/s.
:param _BroadbandPower_: Broadband power.
:param _Salinity_: Salinity in ppt.
:param _Temperature_: Temperature in C.
:param _XdcrDepth_: Transducer Depth in meter.
:return: BT Range, WP Range, Range First Bin, Configured Range
"""
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
# Get the configuration from the json file
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error getting the configuration file. Range", e)
return (0.0, 0.0, 0.0, 0.0)
# Speed of sound must be a value
if _SpeedOfSound_ == 0:
_SpeedOfSound_ = 1490
# Wave length
waveLength = _SpeedOfSound_ / _SystemFrequency_
# DI
dI = 0.0
if waveLength == 0:
dI = 0.0
else:
dI = 20.0 * math.log10(math.pi * _BeamDiameter_ / waveLength)
# Absorption
absorption = calc_absorption(_SystemFrequency_, _SpeedOfSound_, _Salinity_, _Temperature_, _XdcrDepth_)
# 1200khz
btRange_1200000 = 0.0
wpRange_1200000 = 0.0
refBin_1200000 = 0.0
xmtW_1200000 = 0.0
rScale_1200000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["1200000"]["BEAM_ANGLE"] / 180.0 * math.pi);
dI_1200000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["1200000"]["DIAM"] / waveLength);
dB_1200000 = 0.0;
if (config["DEFAULT"]["1200000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_1200000 = 0.0
else:
dB_1200000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["1200000"]["BIN"]) + dI - dI_1200000 - 10.0 * math.log10(config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_)
absorption_range_1200000 = config["DEFAULT"]["1200000"]["RANGE"] + ((config["DEFAULT"]["1200000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["1200000"]["RANGE"])
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]:
# Ref in and xmt watt
refBin_1200000 = config["DEFAULT"]["1200000"]["BIN"]
xmtW_1200000 = config["DEFAULT"]["1200000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + 15.0 * config["DEFAULT"]["1200000"]["BIN"])
else:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
btRange_1200000 = 0.0
if _CWPON_:
# Check if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_1200000 = rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["1200000"]["BIN"])
|
else:
wpRange_1200000 = rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
wpRange_1200000 = 0.0
else:
btRange_1200000 = 0.0
wpRange_1200000 = 0.0
# 600khz
btRange_600000 = 0.0
wpRange_600000 = 0.0
refBin_600000 = 0.0
xmtW_600000 = 0.0
rScale_600
|
000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["600000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_600000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["600000"]["DIAM"] / waveLength)
dB_600000 = 0.0;
if config["DEFAULT"]["600000"]["BIN"] == 0 or _CyclesPe
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/co8infra/scr/py00416standard_equipment_chest.py
|
Python
|
mit
| 4,171
| 0.101415
|
from toee import *
from utilities import *
from Co8 import *
from py00439script_daemon import npc_set, npc_get
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
if (npc_get(attachee, 1) == 0):
triggerer.begin_dialog( attachee, 1 )
elif (npc_get(attachee, 1) == 1):
triggerer.begin_dialog( attachee, 100 )
return SKIP_DEFAULT
def san_start_combat( attachee, triggerer ):
leader = game.party[0]
StopCombat(attachee, 0)
leader.begin_dialog( attachee, 4000 )
return RUN_DEFAULT
def give_default_starting_equipment(x = 0):
for pc in game.party:
if pc.stat_level_get(stat_level_barbarian) > 0:
for aaa in [4074, 6059, 6011, 6216, 8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_bard) > 0:
for aaa in [4009, 6147, 6011, 4096 ,5005 ,5005 ,6012 ,6238 ,12564 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_druid) > 0:
for aaa in [6216 ,6217 ,4116 ,4115 ,5007 ,5007 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_cleric) > 0 or pc.divine_spell_level_can_cast() > 0:
for aaa in [6013 ,6011 ,6012 ,6059 ,4071 ,8014]:
|
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_fighter) > 0:
for aaa in [6013 ,6010 ,6011 ,6012 ,6059 ,4062 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_monk) > 0:
if pc.stat_level_g
|
et(stat_race) in [race_gnome, race_halfling]:
for aaa in [6205 ,6202 ,4060 ,8014]: # dagger (4060) instead of quarterstaff
create_item_in_inventory( aaa, pc )
else:
for aaa in [6205 ,6202 ,4110 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_paladin) > 0:
for aaa in [6013 ,6012 ,6011 ,6032 ,6059 ,4036 ,6124 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_ranger) > 0:
for aaa in [6013 ,6012 ,6011 ,6059 ,4049 ,4201 ,5004 ,5004 ,8014 ,6269]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_rogue) > 0:
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_swashbuckler) > 0:
for aaa in [6013 ,6045 ,6046 ,4009 ,4060 ,6238 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_sorcerer) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6211 ,6045 ,6046 ,6124 ,4060 ,4115 ,5007 ,5007 ,8014]: # dagger (4060) instead of spear
create_item_in_inventory( aaa, pc )
else:
for aaa in [6211 ,6045 ,6046 ,6124 ,4117 ,4115 ,5007 ,5007 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_warmage) > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [6013 ,6045 ,6046 ,6059, 4071 , 4115 ,5007 ,5007, 8014]: # mace (4071) instead of spear
create_item_in_inventory( aaa, pc )
else:
for aaa in [6013 ,6045 ,6046 ,6059, 4117 , 4115 ,5007 ,5007, 8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_beguiler) > 0:
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_wizard) > 0 or pc.arcane_spell_level_can_cast() > 0:
if pc.stat_level_get(stat_race) in [race_gnome, race_halfling]:
for aaa in [4060 ,4096 ,5005 ,5005 ,6081 ,6143 ,6038 ,6011 ,8014]:
create_item_in_inventory( aaa, pc )
else:
for aaa in [4110 ,4096 ,5005 ,5005 ,6081 ,6143 ,6038 ,6011 ,8014]:
create_item_in_inventory( aaa, pc )
elif pc.stat_level_get(stat_level_scout) > 0:
for aaa in [6013 ,6012 ,6011, 4049, 4201 ,5004 ,5004 ,8014, 6269, 12012]:
create_item_in_inventory( aaa, pc )
else: # default to rogue outfit
for aaa in [6042 ,6045 ,6046 ,4049 ,4060 ,6233 ,8014 ,4096 ,5005 ,5005 ,8014 ,12012]:
create_item_in_inventory( aaa, pc )
return
def defalt_equipment_autoequip():
for pc in game.party:
pc.item_wield_best_all()
|
raphaelvalentin/Utils
|
optimize/optlib2.py
|
Python
|
gpl-2.0
| 8,671
| 0.011302
|
from functions.science import rms, mae, average, nan, inf
from collections import OrderedDict
from rawdata.table import table
from numpy import array, log10
import cma
from time import time, strftime
__all__ = ['fmin', 'optimbox', 'box', 'array', 'log10', 'rms', 'mae', 'average', 'nan', 'inf']
def box(x, y, xmin=-inf, xmax=inf, ymin=-inf, ymax=inf):
xs, ys = [], []
for xi, yi in zip(x, y):
if xmin<=xi<=xmax and ymin<=yi<=ymax:
xs.append(xi)
ys.append(yi)
return array(xs), array(ys)
class optimbox(object):
"""optimbox is a class used for fitting curves and linked with the fmin decorator.
as input, it must contains a dictionary with the keys 'objective', 'goal'.
it can contain optionally the keys 'xlim', 'ylim', 'weight', 'yscale'.
if yscale is set to 'lin' (default), the error calculation is done by weight*(objective-goal)
if yscale is set to 'log', the fit is done by weight*(objective-goal)/goal.
if weight is not defined, weight is calculated when yscale='lin' as mae(goal)
if weight is not defined, weight is set when yscale='log' as 1.0.
the optimbox's error is returned using the class function self.error().
self.error() is used in fmin.
"""
def mean(self, x):
return mae(x)
def __init__(self, kwargs):
self._error = 0.0
if 'objective' in kwargs and 'goal' in kwargs:
x1, y1 = kwargs['objective']
x2, y2 = kwargs['goal']
else:
raise Exception('instances for the optimbox are not correct')
yscale = kwargs.get('yscale', 'lin')
xmin, xmax = kwargs.get('xlim', (-inf, inf))
ymin, ymax = kwargs.get('ylim', (-inf, inf))
x1, y1 = box(x1, y1, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
x2, y2 = box(x2, y2, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
if yscale == 'lin':
weight = kwargs.get('weight', self.mean(y2))
if hasattr(weight, '__iter__'):
raise Exception('weight cannot be a list of values')
error = weight*(y1-y2)
if hasattr(error, '__iter__'):
self._error = self.mean(error)
else:
self._error = abs(error)
elif yscale == 'log':
weight = kwargs.get('weight', 1.0)
if hasattr(weight, '__iter__'):
raise Exception('weight cannot be a list of values')
try:
error = weight*(y1-y2)/y2
except ZeroDivisionError:
ZeroDivisionError('at least one point of the scatter data is zero')
if hasattr(error, '__iter__'):
self._error = self.mean(error)
else:
self._error = abs(error)
def error(self):
return self._error
class fmin(object):
x = OrderedDict() # ordered dictionary
bounds = OrderedDict() # ordered dictionary
def __init__(self, method='cma-es', **options):
"""fmin is a function decorator used for minimization of function.
options:
for method = 'cma-es'
variables = 'all'
sigma0 = 0.1
tolx = 1e-3
tolfun = 1e-5
seed = 1234
maxiter = '100 + 50*(N+3)**2 // popsize**0.5'
maxfevals = inf
popsize = '4 + int(3*log(N))'
verbose = -1
fmin.x <- dict
fmin.bounds <- dict
"""
self.method = method
self.options = options
def __call__(self, func):
if self.method == 'cma-es':
results = self._fmin_cma_es(func=func, **dict(self.options))
return results
def _fmin_cma_es(self, func, variables='all', sigma0=0.1, tolx=1e-3, seed=1234,
maxiter='100+50*(N+3)**2//popsize**0.5', verbose=-1,
maxfevals=float('inf'), popsize='4+int(3*log(N))', tolfun=1e-5 ):
now = time()
def tf(X, bounds):
Y = []
for x, (xmin, xmax) in zip(X, bounds):
slope = 1./(xmax-xmin)
intercept = 1.0-slope*xmax
y = slope*x + intercept
Y.append(y)
return Y
def tfinv(Y, bounds):
X = []
for y, (xmin, xmax) in zip(Y, bounds):
|
slope = xmax-xmin
intercept = xmax-slope
x = slope*y + intercept
X.append(x)
return X
def eval_error(output):
if isinstance(output, dict):
return optimbox(output).error()
elif isinstance(outpu
|
t, (float, int)):
return float(abs(output))
elif isinstance(output, tuple):
return average([ eval_error(elt) for elt in output ])
elif hasattr(output, '__iter__'):
return mae(output)
else:
raise Exception('output must be based on optimbox, float, tuple or list/array')
# init
if variables == 'all':
variables = fmin.x.keys()
x0 = [fmin.x[key] for key in variables]
bounds = [fmin.bounds[key] for key in variables]
options = { 'boundary_handling' : 'BoundTransform ',
'bounds' : [[0]*len(x0), [1]*len(x0)],
'seed' : seed,
'verb_time' : False,
'scaling_of_variables' : None,
'verb_disp' : 1,
'maxiter' : maxiter,
'maxfevals' : maxfevals,
'signals_filename' : 'cmaes_signals.par',
'tolx' : tolx,
'popsize' : popsize,
'verbose' : verbose,
'ftarget': 1e-12,
'tolfun' : 1e-5,
}
es = cma.CMAEvolutionStrategy(tf(x0, bounds), sigma0, options)
# initial error with the original set of variables values
error = eval_error( func(**fmin.x) )
best_objective = error
print 'Start CMA-ES Optimizer...'
print
print '{step:>6}{residual:>11}{x}'.format(step='step', x='{:>11}'*len(variables), residual='residual').format(*variables)
print '-'*(6+11+11*len(variables))
print '{step:>6}{residual:>11.3e}{x}'.format(step=0, x='{:>11.3e}'*len(x0), residual=error).format(*x0)
while not es.stop():
solutions = es.ask() # provide a set of variables values
objectives = [] # init
for i, x in enumerate(solutions):
xt = { k:v for k, v in zip(variables, tfinv(x, bounds)) }
# add other keyword arguments
for key in fmin.x.keys():
if not(key in variables):
xt[key] = fmin.x[key]
error = eval_error( func(**xt) )
objectives.append( error )
# if error is better then update fmin.x
if error < best_objective:
fmin.x.update(xt)
best_objective = error
es.tell(solutions, objectives)
#es.disp(1)
if es.countiter%10==0:
print
print '{step:>6}{residual:>11}{x}'.format(step='step', x='{:>11}'*len(variables), residual='residual').format(*variables)
print '-'*(6+11+11*len(variables))
indx = objectives.index(min(objectives))
x = tfinv(solutions[indx], bounds)
isbest = ''
if objectives[indx] == best_objective:
isbest = '*'
print '{step:>6}{residual:>11.3e}{x} {isbest}'.format(step=es.countiter, x='{:>11.3e}'*len(x), residual=objectives[indx], isbest=isbest).format(*x)
#es.result_pretty()
xbest, f_xbest, evaluations_xbest, evaluations, iterations, pheno_xmean, effective_stds = es.result()
stop = es.stop()
print '-----------------'
print 'termination on %s=%.2e'%(stop.keys()[0], stop.values()[0])
|
Freso/listenbrainz-server
|
messybrainz/db/__init__.py
|
Python
|
gpl-2.0
| 1,338
| 0.003737
|
from __future__ import print_function
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import sqlalchemy
import sys
# This value must be incremented after schema changes on replicated tables!
SCHEMA_VERSION = 1
engine = None
def init_db_engine(connect_str):
global engine
engine = create_engine(connect_str, poolclass=NullPool)
def run_sql_script(sql_file_path):
with open(sql_file_path) as sql:
connection = engine.connect()
connection.execute(sql.read())
connection.close()
def run_sql_script_without_transaction(sql_file_path):
with open(sql_file_path) as sql:
connection = eng
|
ine.connect()
connection.connection
|
.set_isolation_level(0)
lines = sql.read().splitlines()
try:
for line in lines:
# TODO: Not a great way of removing comments. The alternative is to catch
# the exception sqlalchemy.exc.ProgrammingError "can't execute an empty query"
if line and not line.startswith("--"):
connection.execute(line)
except sqlalchemy.exc.ProgrammingError as e:
print("Error: {}".format(e))
return False
finally:
connection.connection.set_isolation_level(1)
connection.close()
return True
|
joaoperfig/mikezart
|
source/markovzart2.py
|
Python
|
mit
| 8,058
| 0.018367
|
import random
import musictheory
import filezart
import math
from pydub import AudioSegment
from pydub.playback import play
class Part:
def __init__(self, typ=None, intensity=0, size=0, gen=0, cho=0):
self._type = typ #"n1", "n2", "bg", "ch", "ge"
if intensity<0 or gen<0 or cho<0 or size<0 or intensity>1 or size>1 or gen>1 or cho>1:
raise ValueError ("Invalid Values for Structure Part")
self._intensity = intensity # [0-1]
self._size = size # [0-1]
self._genover = gen # [0-1] overlay of general type lines
self._chover = cho # [0-1] overlay of chorus type lines
def __repr__(self):
return "[" + self._type + "-" + str(self._intensity) + "-" + str(self._size) + "-" + str(self._genover) + "-" + str(self._chover) + "]"
@classmethod
def fromString(cls, string): # [n1-0.123-1-0.321-0.2] type, intensity, size, genoverlay, chooverlay
while string[0] == " ":
string = string[1:]
while string[0] == "\n":
string = string[1:]
while string[-1] == " ":
string = string[:-1]
while string[-1] == "\0":
string = string[:-1]
while string[-1] == "\n":
string = string[:-1]
if len(string)<8:
raise ValueError("Invalid Part string: "+string)
if string[0] == "[" and string[-1] == "]":
string = string[1:-1]
else:
raise ValueError("Invalid Part string: "+string)
typ = string[:2]
string = string[3:]
if not typ in ("n1", "n2", "bg", "ch", "ge"):
raise ValueError("Invalid Part Type string: "+typ)
valstrings = str.split(string, "-")
inten = eval(valstrings[0])
size = eval(valstrings[1])
gen = eval(valstrings[2])
cho = eval(valstrings[3])
return cls(typ, inten, size, gen, cho)
def getTheme(self, pal):
if self._type == "n1":
return pal._n1
if self._type == "n2":
return pal._n2
if self._type == "bg":
return pal._bg
if self._type == "ch":
return pal._ch
if self._type == "ge":
return pal._ge
def getAudio(self, pal, bpm):
base = self.baseDur(pal, bpm)
total = base + 3000 #extra time for last note to play
nvoic = math.ceil(self._intensity * self.getTheme(pal).countVoices())
try:
ngeno = math.ceil(self._genover * pal._ge.countVoices())
except:
ngeno = 0
try:
nchoo = math.ceil(self._chover * pal._ch.countVoices())
except:
nchoo = 0
sound = AudioSegment.silent(total)
them = self.getTheme(pal)
for i in range(nvoic):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ge
for i in range(ngeno):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ch
for i in range(nchoo):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
return sound
def baseDur(self, pal, bpm): #get the base duration of this part of the song
return self.getTheme(pal).baseDurForStruct(self._size, bpm)
class Structure:
def __init__(self):
self._parts = ()
def add(self, part):
self._parts = self._parts+(part,)
def __repr__(self):
return "@STRUCTURE:" + str(self._parts)
def baseDur(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
curTime = 0
for p in self._parts:
curTime = curTime + p.baseDur(pal, bpm)
return curTime
def songAudio(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
total = self.baseDur(pal, bpm) + 3000 # 3 seconds for last note to play
sound = AudioSegment.silent(total)
curTime = 0
for p in self._parts:
paudio = p.getAudio(pal, bpm)
sound = sound.overlay(paudio, curTime)
curTime = curTime + p.baseDur(pal, bpm)
print("curTime:",curTime)
return sound
# wselect WeightedSelect returns element of dictionary based on dict weights {element:weight}
def wselect(dicti):
total=0
for i in list(dicti):
total = total + dicti[i]
indice = total*random.random()
for i in list(dicti):
if dicti[i]>=indice:
return i
indice = indice - dicti[i]
raise ValueError ("something went wrong")
# rselect RandomSelect returns random element of list
def rselect(lista):
return random.choice(lista)
def lenweights():
return {3:1, 4:1, 5:2, 6:3, 7:4, 8:3, 9:2, 10:1, 11:1}
def stweights():
return {"n1":5, "n2":4, "ch":2, "bg":1}
def n1weights():
return {"n1":4, "n2":2, "ch":3, "bg":1}
def n2weights():
return {"n1":2, "n2":3, "ch":4, "bg":2}
def chweights():
return {"n1":2, "n2":1, "ch":4, "bg":1}
def bgweights():
return {"n1":1, "n2":1, "ch":20, "bg":8}
def typeSequence(size):
last = wselect(stweights())
sequence=(last,)
while len(sequence)<size:
if last == "n1":
last = wselect(n1weights())
elif last == "n2":
last = wselect(n2weights())
elif last == "ch":
last = wselect(chweights())
elif last == "bg":
last = wselect(bgweights())
sequence = sequence + (last,)
return sequence
def siweights():
return {0.1:1, 0.2:2, 0.3:4, 0.4:5, 0.5:5, 0.6:4, 0.7:3, 0.8:2, 0.9:1}
def deltaweights():
return {-0.3:1, -0.2:1, -0.1:1, 0:5, 0.1:3, 0.2:2, 0.3:2}
def intensitySequence(size):
val = wselect(siweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltaweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def soweights():
return {0:6, 0.1:2, 0.2:1}
def deltoweights():
return {-0.2:1, -0.1:1, 0:8, 0.1:2, 0.2:2}
def overlaySequence(size):
val = wselect(soweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltoweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def ssweights():
return {0.2:1, 0.4:1, 0.6:1, 0.8:1, 1:16}
def sizeSequence(size):
sequence = ()
while len(sequence)<size:
sequence = sequence + (wselect(ssweights()),)
return sequence
def makeStruct(size = None):
if size == None:
size = wselect(lenweights())
types = type
|
Sequence(size)
inten = intensitySequence(size)
sizes = sizeSequence
|
(size)
overl = overlaySequence(size)
return joinSeqs(types, inten, sizes, overl)
def joinSeqs(types, inten, sizes, overl):
struct = Structure()
for i in range(len(types)):
if types[i]=="bg":
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+"0"+"-"+str(overl[i])+"]" # If its a bridge it has chord overlay
pt = Part.fromString(string)
struct.add(pt)
else:
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+str(overl[i])+"-"+"0"+"]" # Else it has gen overlay
pt = Part.fromString(string)
struct.add(pt)
return struct
def pooptest():
for i in range(30):
print(makeStruct())
|
gaowhen/summer
|
summer/db/connect.py
|
Python
|
mit
| 485
| 0
|
# -*-
|
coding: utf-8 -*-
import sqlite3
from flask import g, current_app
def connect_db():
db = sqlite3.connect(current_app.config['DATABASE_URI'])
db.row_
|
factory = sqlite3.Row
return db
# http://flask.pocoo.org/docs/0.10/appcontext/
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_db()
return db
|
RedFoxPi/Playground
|
threadtest.py
|
Python
|
gpl-2.0
| 929
| 0.01507
|
import threading
import time
class Status:
lock = None
statusno =0
def __init__(self):
self.lock = threading.Lock()
def update(self, add):
self.lock.acquire()
self.statusno = self.statusno + add
self.lock.release()
def get(self):
self.lock.acquire()
n = self.statusno
self.lock.r
|
elease()
return n
def md5calc(status, args):
for i in args:
time.sleep (1)
#print i
status.update(1)
def show_status(status):
while threading.active_count() > 2:
|
time.sleep(1)
print status.get()
status = Status()
slaves = []
for i in range(5):
t = threading.Thread(target=md5calc, args=(status, [1,2,5]))
t.start()
slaves.append(t)
m = threading.Thread(target=show_status, args=(status,))
m.start()
m.join()
for t in slaves:
t.join()
|
usc-isi-i2/WEDC
|
spark_dependencies/python_lib/nose2/tests/functional/test_coverage.py
|
Python
|
apache-2.0
| 878
| 0.004556
|
import os.path
import platform
from nose2.compat import unittest
from nose2.tests._common import FunctionalTestCase
class TestCoverage(FunctionalTestCase):
@unittest.skipIf(
platform.python_version_tuple()[:2] == ('3', '2'),
'coverage package does not support python 3.2')
def test_run(self):
proc = self.runIn(
'scenario/test_with_module',
'-v',
'--with-coverage',
'--coverage=lib/'
)
STATS = '\s+8\s+5\s+38%'
expected = os.path.join('lib', 'mod1(.py)?')
expected = expected.replace('\\', r'\\')
expected = expected + STATS
|
stdout, stderr = proc.communicate()
self.assertTestRunOutputMatches(
proc,
|
stderr=expected)
self.assertTestRunOutputMatches(
proc,
stderr='TOTAL\s+' + STATS)
|
t3hi3x/p-k.co
|
shorturls/admin.py
|
Python
|
mit
| 1,450
| 0.008276
|
__author__ = 'Alex Breshears'
__license__ = '''
Copyright (C) 2012 Alex Breshears
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without
|
restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITE
|
D TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.contrib.admin import site
from django.contrib import admin
from shorturls.models import *
class LinkClickInline(admin.TabularInline):
model = LinkClick
extras = 0
class LinkAdmin(admin.ModelAdmin):
inlines = [LinkClickInline]
def save_model(self, request, obj, form, change):
obj.save()
site.register(Link, LinkAdmin)
|
danalec/dotfiles
|
sublime/.config/sublime-text-3/Packages/anaconda_go/plugin/handlers_go/commands/package_symbols.py
|
Python
|
mit
| 5,136
| 0
|
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from collections import defaultdict
from ..anagonda.context import guru
from commands.base import Command
class PackageSymbols(Command):
"""Run guru to get a detailed list of the package symbols
"""
def __init__(self, callback, uid, vid, scope, code, path, buf, go_env):
self.vid = vid
self.scope = scope
self.code = code
self.path = path
self.buf = buf
self.go_env = go_env
super(PackageSymbols, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
offset = getattr(self, 'offset', None)
if offset is None:
offset = self.code.find('package ') + len('package ') + 1
with guru.Guru(
self.scope, 'describe', self.path,
offset, self.buf, self.go_env) as desc:
symbols = []
for symbol in self._sort(desc):
path, line, col = symbol['pos'].split(':')
symbols.append({
'filename': path,
'line': int(line),
'col': int(col),
'ident': symbol['name'],
'full': symbol['type'],
'keyword': symbol['kind'],
'show_filename': True
})
self.callback({
'success': True,
'result': symbols,
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc())
self.callback({
'success': False,
'error': str(error),
'uid': self.uid,
'vid': self.vid
})
def _sort(self, desc):
"""Sort the output by File -> Vars -> Type -> Funcs
"""
symbols = []
aggregated_data = defaultdict(lambda: [])
for elem in desc.get('package', {}).get('members', []):
filename = elem['pos'].split(':')[0]
aggregated_data[filename].append(elem)
for filename, elems in aggregated_data.items():
symbols += sorted(
[e for e in elems if e['kind'] in ['var', 'const']],
key=lambda x: x['pos']
)
symbols += sorted(
[e for e in elems if e['kind'] == 'type'],
key=lambda x: x['pos']
)
symbols += sorted(
[e for e in elems if e['kind'] == 'func'],
key=lambda x: x['pos']
)
for e in elems:
if e['kind'] == 'type':
methods = []
for method in e.get('methods', []):
new_elem = method
new_elem['kind'] = 'func'
new_elem['type'] = method['name']
methods.append(new_elem)
symbols += sorted(methods, key=lambda x: x['pos'])
return symbols
class PackageSymbolsCursor(PackageSymbols):
"""Run guru to get detailed information about the symbol under cursor
"""
def __init__(self, cb, uid, vid, scope, code, path, buf, off, go_env):
self.offset = off
super(PackageSymbolsCursor, self).__init__(
cb, uid, vid, scope, code, path, buf, go_env
)
def _sort(self, desc):
"""Sort the output by File -> Vars -> Type -> Funcs
"""
if desc.get('package') is not None:
return super(PackageSymbolsCursor, self)._sort(desc)
symbols = []
aggregated_data = defaultdict(lambda: [])
detail_field = desc.get('detail')
if detail_field is None:
return symbols
details = desc.get(detail_field)
if details is None:
return symbols
if detail_field == 'type':
filename = details.get('namepos', desc['pos']).split(':')[0]
details['pos'] = details.get('namepos', desc['p
|
os'])
details['name'] = desc['desc']
details['kind'] = details['type']
aggregated_data[filename].append(details)
for elem in details.get('methods', []):
filename = elem['pos'].split(':')[0]
elem['type'] = elem['name']
elem['kind'] = elem['type']
aggregated_data[filename].append(elem)
else:
filename = deta
|
ils['objpos'].split(':')[0]
details['pos'] = details['objpos']
details['name'] = details['type']
details['kind'] = details['type']
aggregated_data[filename].append(details)
for filename, elems in aggregated_data.items():
symbols += sorted(elems, key=lambda x: x['pos'])
return symbols
|
DimiterM/santander
|
PeriodicValidation.py
|
Python
|
mit
| 2,287
| 0.00962
|
import time
import numpy as np
import keras
import tensorflow as tf
import keras.backend as K
from keras import optimizers
from keras.models import load_model
from keras.callbacks import Callback
from functions import calculate_top_k_new_only
"""
PeriodicValidation - Keras callback - checks val_loss periodically instead of using Model.fit() every epoch
"""
class PeriodicValidation(Callback):
def __init__(self, val_data, batch_size, filepath):
super(PeriodicValidation, self).__init__()
self.val_data = val_data
self.batch_size = batch_size
self.filepath = filepath
self.min_val_loss = np.Inf
def on_
|
epoch_end(self, epoch, logs={}):
if epoch % 5 == 4 or epoch % 5 == 2:
if self.filepath:
self.model.save(self.filepath+".ep_"+str(epoch)+".h5", overwrite=True)
if self.val_data is None:
return
h = self.model.evaluate(self.val_data[0], self.val_data[1], batch_size=self.batch_size, verbose=0)
print("validating on " + str(self.val_data[1].shape[0]) + " samples on epoch " +
|
str(epoch) + ": ", h)
y_top_k_new_only = calculate_top_k_new_only(self.model,
self.val_data[0][0], self.val_data[0][1], self.val_data[1], self.batch_size,
(not self.val_data[0][1].shape[2] == self.val_data[1].shape[1]))
print("testing MAP@K for NEW products: ", y_top_k_new_only)
if h[0] < self.min_val_loss:
if self.filepath:
self.model.save(self.filepath, overwrite=True)
print("val_loss improved from "+str(self.min_val_loss)+" to "+str(h[0])+", saving model to "+self.filepath)
else:
print("val_loss improved from "+str(self.min_val_loss)+" to "+str(h[0]))
self.min_val_loss = h[0]
def on_train_end(self, logs=None): # also log training metrics with higher decimal precision
print("epoch", [m for m in self.model.history.params['metrics']])
for epoch in self.model.history.epoch:
print(epoch, [self.model.history.history[m][epoch] for m in self.model.history.params['metrics']])
#
|
nosix/PyCraft
|
src/pycraft/service/whole/handler/__init__.py
|
Python
|
lgpl-3.0
| 171
| 0.005848
|
# -*- coding: utf8 -*-
fr
|
om .task import TaskID
from .core import Handler
from .queue import EventQueue
__all__ = [
'TaskID',
'Handler',
'
|
EventQueue',
]
|
samdmarshall/xcparse
|
xcparse/Xcode/PBX/PBX_Base.py
|
Python
|
bsd-3-clause
| 2,017
| 0.020327
|
from .PBXResolver import *
from .PBX_Constants import *
class PBX_Base(object):
def __init__(self, lookup_func, dictionary, project, identifier):
# default 'name' property of a PBX object is the type
self.name = self.__class__.__name__;
# this is the identifier for this object
self.identifier = str(identifier);
# set of any referenced identifiers on this object
self.referencedIdentifiers = set();
def __attrs(self):
return (self.identifier);
def __repr__(self):
return '(%s : %s : %s)' % (type(self), self.name, self.identifier);
def __eq__(self, other):
return isinstance(other, type(self)) and self.identifier == other.identifier;
def __hash__(self):
return hash(self.__attrs());
def resolve(self, type, item_list):
return filter(lambda item: isinstance(item, type), item_list)
|
;
def fetchObjectFromProje
|
ct(self, lookup_func, identifier, project):
find_object = project.objectForIdentifier(identifier);
if find_object == None:
result = lookup_func(project.contents[kPBX_objects][identifier]);
if result[0] == True:
find_object = result[1](lookup_func, project.contents[kPBX_objects][identifier], project, identifier);
project.objects.add(find_object);
return find_object;
def parseProperty(self, prop_name, lookup_func, dictionary, project, is_array):
dict_item = dictionary[prop_name];
if is_array == True:
property_list = [];
for item in dict_item:
self.referencedIdentifiers.add(item);
find_object = self.fetchObjectFromProject(lookup_func, item, project);
property_list.append(find_object);
return property_list;
else:
self.referencedIdentifiers.add(dict_item);
return self.fetchObjectFromProject(lookup_func, dict_item, project);
|
chippey/gaffer
|
python/GafferSceneTest/PrimitiveVariablesTest.py
|
Python
|
bsd-3-clause
| 2,924
| 0.027018
|
##########################################################################
#
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to
|
endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PRO
|
VIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class PrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
s = GafferScene.Sphere()
p = GafferScene.PrimitiveVariables()
p["in"].setInput( s["out"] )
self.assertScenesEqual( s["out"], p["out"] )
self.assertSceneHashesEqual( s["out"], p["out"] )
p["primitiveVariables"].addMember( "a", IECore.IntData( 10 ) )
self.assertScenesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertSceneHashesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertNotEqual( s["out"].objectHash( "/sphere" ), p["out"].objectHash( "/sphere" ) )
self.assertNotEqual( s["out"].object( "/sphere" ), p["out"].object( "/sphere" ) )
o1 = s["out"].object( "/sphere" )
o2 = p["out"].object( "/sphere" )
self.assertEqual( set( o1.keys() + [ "a" ] ), set( o2.keys() ) )
self.assertEqual( o2["a"], IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.IntData( 10 ) ) )
del o2["a"]
self.assertEqual( o1, o2 )
if __name__ == "__main__":
unittest.main()
|
IBM-Security/ibmsecurity
|
ibmsecurity/isds/available_updates.py
|
Python
|
apache-2.0
| 6,037
| 0.002319
|
import logging
logger = logging.getLogger(__name__)
def get(isdsAppliance, check_mode=False, force=False):
"""
Retrieve available updates
"""
return isdsAppliance.invoke_get("Retrieving available updates",
"/updates/available.json")
def discover(isdsAppliance, check_mode=False, force=False):
"""
Discover available updates
"""
return isdsAppliance.invoke_get("Discover available updates",
"/updates/available/discover")
def upload(isdsAppliance, file, check_mode=False, force=False):
"""
Upload Available Update
"""
if force is True or _check_file(isdsAppliance, file) is False:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post_files(
"Upload Available Update",
"/updates/available",
[{
'file_formfield': 'uploadedfile',
'filename': file,
'mimetype': 'application/octet-stream'
}],
{}, json_response=False)
return isdsAppliance.create_return_object()
def _check_file(isdsAppliance, file):
"""
Parse the file name to see if it is already uploaded - use version and release date from pkg file name
Also check to see if the firmware level is already uploaded
Note: Lot depends on the name of the file.
:param isdsAppliance:
:param file:
:return:
"""
import os.path
# If there is an exception then simply return False
# Sample filename - 8.0.1.9-ISS-ISDS_20181207-0045.pkg
logger.debug("Checking provided file is ready to upload: {0}".format(file))
try:
# Extract file name from path
f = os.path.basename(file)
fn = os.path.splitext(f)
logger.debug("File name without path: {0}".format(fn[0]))
# Split of file by '-' hyphen and '_' under score
import re
fp = re.split('-|_', fn[0])
firm_file_version = fp[0]
firm_file_product = fp[2]
firm_file_date = fp[3]
logger.debug("PKG file details: {0}: version: {1} date: {2}".format(firm_file_product, firm_file_version, firm_file_date))
# Check if firmware level already contains the update to be uploaded or greater, check Active partition
# firmware "name" of format - 8.0.1.9-ISS-ISDS_20181207-0045
import ibmsecurity.isds.firmware
ret_obj = ibmsecurity.isds.firmware.get(isdsAppliance)
for firm in ret_
|
obj['data']:
# Split of file by '-' hyphen and '_' under score
fp = re.split('-|_', firm['name'])
firm_appl_version = fp[0]
firm_appl_product = fp[2]
firm_appl_date = fp[3
|
]
logger.debug("Partition details ({0}): {1}: version: {2} date: {3}".format(firm['partition'], firm_appl_product, firm_appl_version, firm_appl_date))
if firm['active'] is True:
from ibmsecurity.utilities import tools
if tools.version_compare(firm_appl_version, firm_file_version) >= 0:
logger.info(
"Active partition has version {0} which is greater or equals than install package at version {1}.".format(
firm_appl_version, firm_file_version))
return True
else:
logger.info(
"Active partition has version {0} which is smaller than install package at version {1}.".format(
firm_appl_version, firm_file_version))
# Check if update uploaded - will not show up if installed though
ret_obj = get(isdsAppliance)
for upd in ret_obj['data']:
rd = upd['release_date']
rd = rd.replace('-', '') # turn release date into 20161102 format from 2016-11-02
if upd['version'] == fp[0] and rd == fp[3]: # Version of format 8.0.1.9
return True
except Exception as e:
logger.debug("Exception occured: {0}".format(e))
pass
return False
def install(isdsAppliance, type, version, release_date, name, check_mode=False, force=False):
"""
Install Available Update
"""
if force is True or _check(isdsAppliance, type, version, release_date, name) is True:
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
ret_obj = isdsAppliance.invoke_post("Install Available Update",
"/updates/available/install",
{"updates": [
{
"type": type,
"version": version,
"release_date": release_date,
"name": name
}
]
})
isdsAppliance.facts['version'] = version
return ret_obj
return isdsAppliance.create_return_object()
def _check(isdsAppliance, type, version, release_date, name):
ret_obj = get(isdsAppliance)
for upd in ret_obj['data']:
# If there is an installation in progress then abort
if upd['state'] == 'Installing':
logger.debug("Detecting a state of installing...")
return False
if upd['type'] == type and upd['version'] == version and upd['release_date'] == release_date and upd[
'name'] == name:
logger.debug("Requested firmware ready for install...")
return True
logger.debug("Requested firmware not available for install...")
return False
|
NYU-Molecular-Pathology/snsxt
|
snsxt/sns_tasks/DemoQsubAnalysisTask.py
|
Python
|
gpl-3.0
| 2,720
| 0.015074
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import task_classes
from task_classes import QsubAnalysisTask
class DemoQsubAnalysisTask(QsubAnalysisTask):
"""
Demo task that will submit a single qsub job for the analysis
"""
def __init__(self, analysis, taskname = 'DemoQsubAnalysisTask', config_file = 'DemoQsubAnalysisTask.yml', extra_handlers = None):
"""
Parameters
----------
analysis: SnsWESAnalysisOutput
the `sns` pipeline output object to run the task on. If ``None`` is passed, ``self.analysis`` is retrieved instead.
extra_handlers: list
a list of extra Filehandlers to use for logging
"""
QsubAnalysisTask.__init__(self, taskname = taskname, config_file = config_file, analysis = analysis, extra_handlers = extra_handlers)
def main(self, analysis):
"""
Main function for performing the analysis task on th
|
e entire analysis
Put your code for performing the analysis task on the entire analysis here
Parameters
----------
anal
|
ysis: SnsWESAnalysisOutput
the `sns` pipeline output object to run the task on. If ``None`` is passed, ``self.analysis`` is retrieved instead.
Returns
-------
qsub.Job
a single qsub job object
"""
self.logger.debug('Put your code for doing the analysis task in this function')
self.logger.debug('The global configs for all tasks will be in this dict: {0}'.format(self.main_configs))
self.logger.debug('The configs loaded from the task YAML file will be in this dict: {0}'.format(self.task_configs))
self.logger.debug('Analysis is: {0}'.format(analysis.id))
# output file
output_foo = self.get_analysis_file_outpath(file_basename = 'foo.txt')
output_bar = self.get_analysis_file_outpath(file_basename = 'bar.txt')
self.logger.debug('output_foo is: {0}'.format(output_foo))
self.logger.debug('output_bar is: {0}'.format(output_bar))
# get the dir for the qsub logs
qsub_log_dir = analysis.list_none(analysis.get_dirs('logs-qsub'))
self.logger.debug('qsub_log_dir is {0}:'.format(qsub_log_dir))
# make the shell command to run
command = 'touch "{0}"; touch "{1}"; sleep 10'.format(output_foo, output_bar)
self.logger.debug('command will be:\n{0}'.format(command))
# submit the command as a qsub job on the HPC
job = self.qsub.submit(command = command, name = self.taskname + '.' + analysis.id, stdout_log_dir = qsub_log_dir, stderr_log_dir = qsub_log_dir, verbose = True, sleeps = 1)
return(job)
|
Chavjoh/LinuxAuthenticationTester
|
LinuxAuthenticationTesterShadow.py
|
Python
|
apache-2.0
| 5,520
| 0.025915
|
๏ปฟ#!/usr/bin/python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------#
# Security - Linux Authentication Tester with /etc/shadow #
# ============================================================================ #
# Note: To be used for test purpose only #
# Developer: Chavaillaz Johan #
# Filename: LinuxAuthenticationTesterShadow.py #
# Version: 1.0 #
# #
# Licensed to the Apache Software Foundation (ASF) under one #
# or more contributor license agreements. See the NOTICE file #
# distributed with this work for additional information #
# regarding copyright ownership. The ASF licenses this file #
# to you under the Apache License, Version 2.0 (the #
# "License"); you may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, #
# software distributed under the License is distributed on an #
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #
# KIND, either express or implied. See the License for the #
# specific language governing permissions and limitations #
# under the License. #
# #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# #
# LIBRARIES IMPORT #
# #
#------------------------------------------------------------------------------#
import sys
import crypt
import spwd
import argparse
#------------------------------------------------------------------------------#
# #
# UTILITIES FUNCTIONS #
# #
#------------------------------------------------------------------------------#
def checkAuthentication(shadowPwdDb, password):
"""
Test authentication in linux
:param shadowPwdDb: Shadow password database entry for the user
:type shadowPwdDb: spwd
:param password: Account password to test
:type password: str
"""
if crypt.crypt(password, shadowPwdDb) == shadowPwdDb:
return True
else:
return False
def bruteForce(username, dictionary):
"""
Authentication test for each password in the dictionary
with the given user name on the current computer
:param username: Username used to test each password in given dictionary
:type username: str
:param dictionary: Dictionary file path that contains all password
:type dictionary: str
"
|
""
# Return the shadow password database entry for the given user name
shadowPw
|
dDb = spwd.getspnam(username)[1]
# Open dictionary file
with open(dictionary) as file:
# Read each line : One line = One password
for line in file:
# Delete new line character
password = line.rstrip('\n')
# Check authentication
if checkAuthentication(shadowPwdDb, password):
return password
return False
#------------------------------------------------------------------------------#
# #
# "MAIN" FUNCTION #
# #
#------------------------------------------------------------------------------#
# If this is the main module, run this
if __name__ == '__main__':
argsCount = len(sys.argv)
# Create argument parser to help user
parser = argparse.ArgumentParser(
description='Test user authentication with a given dictionary.'
)
parser.add_argument(
'username',
type=str,
help='Username used to test each password in given dictionary file.'
)
parser.add_argument(
'dictionary',
type=str,
help='Dictionary file path that contains all password to test.'
)
# Show help if one of the arguments is missing
if argsCount != 3:
parser.print_help()
sys.exit()
# User and dictionary file in scripts arguments
username = sys.argv[1]
dictionary = sys.argv[2]
# Launch script
try:
password = bruteForce(username, dictionary)
if not password:
print("Password not found in dictionary")
else:
print("Password found : " + password)
except (OSError, IOError) as e:
print("Dictionary not found")
except KeyError:
print("User '%s' not found" % username)
|
google/grr
|
grr/server/grr_response_server/flows/general/registry_test.py
|
Python
|
apache-2.0
| 12,992
| 0.002617
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for the registry flows."""
import os
from absl import app
from grr_response_client.client_actions import file_fingerprint
from grr_response_client.client_actions import searching
from grr_response_client.client_actions import standard
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import artifact
from grr_response_server import data_store
from grr_response_server.flows.general import registry
from grr_response_server.flows.general import transfer
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class RegistryFlowTest(flow_test_lib.FlowTestsBaseclass):
def setUp(self):
super().setUp()
vfs_overrider = vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegis
|
tryVFSHandler)
vfs_overrider.Start()
self.addCleanup(vfs_overrider.Stop)
class TestFakeRegistryFinderFlow(RegistryFlowTest):
"""Tests for the RegistryFinder flow."""
runkey = "HKEY_USERS/S-1-5-
|
20/Software/Microsoft/Windows/CurrentVersion/Run/*"
def RunFlow(self, client_id, keys_paths=None, conditions=None):
if keys_paths is None:
keys_paths = [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"
]
if conditions is None:
conditions = []
client_mock = action_mocks.ActionMock(
searching.Find,
searching.Grep,
)
session_id = flow_test_lib.TestFlowHelper(
registry.RegistryFinder.__name__,
client_mock,
client_id=client_id,
keys_paths=keys_paths,
conditions=conditions,
creator=self.test_username)
return session_id
def testFindsNothingIfNothingMatchesTheGlob(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/NonMatch*"
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeysWithSingleGlobWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeysWithTwoGlobsWithoutConditions(self):
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Side*",
"HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Mct*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 2)
# We expect Sidebar and MctAdmin keys here (see
# test_data/client_fixture.py).
basenames = [os.path.basename(r.stat_entry.pathspec.path) for r in results]
self.assertCountEqual(basenames, ["Sidebar", "MctAdmin"])
def testFindsKeyWithInterpolatedGlobWithoutConditions(self):
user = rdf_client.User(sid="S-1-5-20")
client_id = self.SetupClient(0, users=[user])
session_id = self.RunFlow(client_id, [
"HKEY_USERS/%%users.sid%%/Software/Microsoft/Windows/"
"CurrentVersion/*"
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
key = ("/HKEY_USERS/S-1-5-20/"
"Software/Microsoft/Windows/CurrentVersion/Run")
self.assertEqual(results[0].stat_entry.pathspec.CollapsePath(), key)
self.assertEqual(results[0].stat_entry.pathspec.path, key)
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfNothingMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10, bytes_after=10, literal=b"CanNotFindMe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesLiteralMatchCondition(self):
vlm = rdf_file_finder.FileFinderContentsLiteralMatchCondition(
bytes_before=10,
bytes_after=10,
literal=b"Windows Sidebar\\Sidebar.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_LITERAL_MATCH,
value_literal_match=vlm)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/"
"Windows/CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfRegexMatchesNothing(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b".*CanNotFindMe.*")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
self.assertFalse(flow_test_lib.GetFlowResults(client_id, session_id))
def testFindsKeyIfItMatchesRegexMatchCondition(self):
value_regex_match = rdf_file_finder.FileFinderContentsRegexMatchCondition(
bytes_before=10, bytes_after=10, regex=b"Windows.+\\.exe")
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.VALUE_REGEX_MATCH,
value_regex_match=value_regex_match)
])
results = flow_test_lib.GetFlowResults(client_id, session_id)
self.assertLen(results, 1)
self.assertLen(results[0].matches, 1)
self.assertEqual(results[0].matches[0].offset, 15)
self.assertEqual(results[0].matches[0].data,
b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun")
self.assertEqual(
results[0].stat_entry.pathspec.CollapsePath(),
"/HKEY_USERS/S-1-5-20/Software/Microsoft/Windows/"
"CurrentVersion/Run/Sidebar")
self.assertEqual(results[0].stat_entry.pathspec.pathtype,
rdf_paths.PathSpec.PathType.REGISTRY)
def testFindsNothingIfModiciationTimeConditionMatchesNothing(self):
modification_time = rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0),
max_last_modified_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
client_id = self.SetupClient(0)
session_id = self.RunFlow(client_id, [self.runkey], [
registry.RegistryFinderCondition(
condition_type=registry.RegistryFinderCondition.Type
.MODI
|
bitmovin/bitmovin-python
|
bitmovin/resources/models/encodings/sprite.py
|
Python
|
unlicense
| 2,500
| 0.002
|
from bitmovin.resources.models import AbstractModel
from bitmovin.resources import AbstractNameDescriptionResource
from bitmovin.errors import InvalidTypeError
from bitmovin.utils import Serializable
from .encoding_output import EncodingOutput
class Sprite(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, height, width, sprite_name, vtt_name, outputs, distance=None, id_=None, custom_data=None,
name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._outputs = None
self.height = height
self.width = width
self.distance = distance
self.spriteName = sprite_name
self.vttName = vtt_name
if outputs is not None and not isinstance(outputs, list):
raise InvalidTypeError('outputs must be a list')
self.outputs = outputs
@classmethod
def parse_from_json_object(cls, json_objec
|
t):
id_ = json_object['id']
custom_data = json_object.get('customData')
width = json_object.get('width')
height = json_object.get('height')
distance = json_object.get('distance')
sprite_name = json_object.get('spriteName')
vtt_name = json_object.get('vttName')
outputs = json_object.get('outputs')
name = json_object.get('name')
|
description = json_object.get('description')
sprite = Sprite(id_=id_, custom_data=custom_data, outputs=outputs, name=name, description=description,
height=height, width=width, sprite_name=sprite_name, vtt_name=vtt_name, distance=distance)
return sprite
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, new_outputs):
if new_outputs is None:
return
if not isinstance(new_outputs, list):
raise InvalidTypeError('new_outputs has to be a list of EncodingOutput objects')
if all(isinstance(output, EncodingOutput) for output in new_outputs):
self._outputs = new_outputs
else:
outputs = []
for json_object in new_outputs:
output = EncodingOutput.parse_from_json_object(json_object)
outputs.append(output)
self._outputs = outputs
def serialize(self):
serialized = super().serialize()
serialized['outputs'] = self.outputs
return serialized
|
icereval/osf.io
|
osf_tests/test_elastic_search.py
|
Python
|
apache-2.0
| 47,974
| 0.001772
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import unittest
import logging
import functools
from nose.tools import * # flake8: noqa (PEP8 asserts)
import mock
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate, migrate_collected_metadata
from osf.models import (
Retraction,
NodeLicense,
Tag,
QuickFilesNode,
CollectedGuidMetadata,
)
from addons.osfstorage.models import OsfStorageFile
from scripts.populate_institutions import main as populate_institutions
from osf_tests import factories
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.utils import mock_archive, run_celery_tasks
TEST_INDEX = 'test'
def query(term, raw=False):
results = search.search(build_query(term), index=elastic_search.INDEX, raw=raw)
return results
def query_collections(name):
term = 'category:collectionSubmission AND "{}"'.format(name)
return query(term, raw=True)
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
def query_file(name):
term = 'category:file AND "{}"'.format(name)
return query(term)
def query_tag_file(name):
term = 'category:file AND (tags:u"{}")'.format(name)
return query(term)
def retry_assertion(in
|
terval=0.3, retries=3):
def test_wrapper(func):
t_interval = interval
t_retries = retries
|
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError as e:
if retries:
time.sleep(t_interval)
retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs)
else:
raise e
return wrapped
return test_wrapper
class TestCollectionsSearch(OsfTestCase):
def setUp(self):
super(TestCollectionsSearch, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='Salif Keita')
self.node_private = factories.NodeFactory(creator=self.user, title='Salif Keita: Madan', is_public=False)
self.node_public = factories.NodeFactory(creator=self.user, title='Salif Keita: Yamore', is_public=True)
self.node_one = factories.NodeFactory(creator=self.user, title='Salif Keita: Mandjou', is_public=True)
self.node_two = factories.NodeFactory(creator=self.user, title='Salif Keita: Tekere', is_public=True)
self.provider = factories.CollectionProviderFactory()
self.collection_one = factories.CollectionFactory(title='Life of Salif Keita', creator=self.user, is_public=True, provider=self.provider)
self.collection_public = factories.CollectionFactory(title='Best of Salif Keita', creator=self.user, is_public=True, provider=self.provider)
self.collection_private = factories.CollectionFactory(title='Commentary: Best of Salif Keita', creator=self.user, is_public = False, provider=self.provider)
def test_only_public_collections_submissions_are_searchable(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
assert_false(self.node_one.is_collected)
assert_false(self.node_public.is_collected)
self.collection_one.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_public.is_collected)
self.collection_one.save()
self.collection_public.save()
assert_true(self.node_one.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
self.collection_private.collect_object(self.node_two, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
def test_index_on_submission_privacy_changes(self):
# test_submissions_turned_private_are_deleted_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_one.collect_object(self.node_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
with run_celery_tasks():
self.node_one.is_public = False
self.node_one.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_turned_public_are_added_to_index
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.node_private.is_public = True
self.node_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
def test_index_on_collection_privacy_changes(self):
# test_submissions_of_collection_turned_private_are_removed_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
with run_celery_tasks():
self.collection_public.is_public = False
self.collection_public.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_of_collection_turned_public_are_added_to_index
self.collection_private.collect_object(self.node_one, self.user)
self.collection_private.collect_object(self.node_two, self.user)
self.collection_private.collect_object(self.node_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_two.is_collected)
assert_true(self.node_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.collection_private.is_public = True
self.collection_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
def test_collection_submissions_are_removed_from_index_on_delete(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
self.collection_public.delete()
assert_true(self.collection_public.deleted)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_removed_submission_are_removed_from_index(self):
self.collection_public.collect_object(self.node_one, self.user)
assert_true(self.node_one.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
self.collection_public.remove_object(self.node_one)
assert_false(self.node_one.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(do
|
jeffery9/mixprint_addons
|
ineco_thai_account/report/jy_serv.py
|
Python
|
agpl-3.0
| 1,612
| 0.031638
|
#!/usr/bin/env jython
import sys
#sys.path.append("/usr/share/java/itextpdf-5.4.1.jar")
sys.path.append("itextpdf-5.4.1.jar")
#sys.path.append("/usr/share/java/itext-2.0.7.jar")
#sys.path.append("/usr/share/java/xercesImpl.jar")
#sys.path.append("/usr/share/java/xml-apis.jar")
from java.io import FileOutputStream
from com.itextpdf.text.pdf import PdfReader,PdfStamper,BaseFont
#from com.lowagie.text.pdf import PdfReader,PdfStamper,BaseFont
#import re
import time
#import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def pdf_fill(orig_pdf,new_pdf,vals):
#print "pdf_fill",orig_pdf,new_pdf,vals
t0=time.time()
#print orig_pdf
rd=PdfReader(orig_pdf)
#print new_pdf
#print t0
st=PdfStamper(rd,FileOutputStream(new_pdf))
font=BaseFont.createFont("/usr/share/fonts/truetype/thai/Garuda.ttf",BaseFont.IDENTITY_H,BaseFont.EMBEDDED)
form=st.getAcroFields()
for k,v in vals.items():
try:
form.setFieldProperty(k,"textfont",font,None)
form.setField(k,v.decode('utf-8'))
except Exception
|
,e:
raise Exception("Field %s: %s"%(k,str(e)))
st.setFormFlattening(True)
st.close()
t1=time.time()
#print "finished in %.2fs"%(t1-t0)
return True
def pdf_merge(pdf1,pdf2):
#print "pdf_merge",orig_pdf,vals
t0=time.time()
pdf=pdf1
t1=time.time()
#print "finished in %
|
.2fs"%(t1-t0)
return pdf
serv=SimpleXMLRPCServer(("localhost",9999))
serv.register_function(pdf_fill,"pdf_fill")
serv.register_function(pdf_merge,"pdf_merge")
print "waiting for requests..."
serv.serve_forever()
|
trivedi/sentapy
|
NaiveBayes.py
|
Python
|
mit
| 7,810
| 0.009091
|
from __future__ import print_function
import math, nltk
from termcolor import colored
from analyze import generate_stopwords, sanitize
from vector import Vector
class NaiveBayesClassifier():
def __init__(self):
"""
Creates:
"""
self.c = {"+" : Vector(), "-" : Vector()}
for vector in self.c.values():
vector.default = 1
self.classes = ["+", "-"]
self.prior = {"+" : 0.55, "-" : 0.45}
self.stopwords = generate_stopwords()
self.features = set()
f = open("data/features.txt", "r")
for line in f:
self.features.add(line.strip().lower())
f.close()
# Begin training
f_pos = open("data/train_pos.txt", "r")
f_neg = open("data/train_neg.txt", "r")
self.train("+", f_pos)
self.train("-", f_neg)
f_pos.close()
f_neg.close()
def train(self, sentiment, tweets):
"""
@param {string} sentiment = "+" || "-"
{iterable} tweets = file_with_tagged_tweets
@return None
"""
freq = self.c[sentiment]
total = 0.0
for tweet in tweets:
total += 1
words = sanitize(tweet, self.stopwords)
for word in words:
if word in self.features: # word in our pre-made features list
freq[word] += 100
for word in freq:
freq[word] = freq[word] / total
freq.default = 1/total
def posterior(self, sentiment, sanitized_tweet):
"""
Computes the posterior (Bayesian Probability term) of a sanitized tweet
Probability model for a classifier is a conditional model
p(C, F1,...,Fn) = ( p(c)p(F1,...,Fn|C) ) / p(F1,...,Fn)
...
In English, using Bayesian Probability terminology, the equation can be written as
prior * likelihood
posterior = --------------------
evidence
in our case, we have:
p(sentiment, sanitized_tweet)
@param {string} sentiment = "+" or "-"
{set} sanitized_tweet = set of sanitized words in tweet
@return {float}
"""
#print "sanitized tweet = %s" % sanitized_tweet
#print math.log(self.prior[sentiment])
#print "self.prior[sentiment] = %s" % self.prior[sentiment]
p = math.log(self.prior[sentiment])
values = self.c[sentiment]
#print "%s : original p: %f" % (sentiment, p)
for word in sanitized_tweet:
if word in self.features: # word is in the features list, so apply the score for the feature based on the sentiment
p += math.log(values[word])
# print "%s : %f" % (word, math.log(values[word]))
else:
p += math.log(.1 - values[word])
# print "%s : %f" % (word, math.log(.1 - values[word]))
#print p
return p
'''
for feature in self.features:
#print "c[%s] = %s" % (feature, c[feature])
if feature in sanitized_tweet:
p += math.log(1 - c[feature]) # add feature's score per the sentiment
else:
p += math.log(1 - c[feature])
return p
'''
def classify(self, tweet, verbose=False, eval=False):
"""
Classifies a text's sentiment given the posterior of of its class
Picks the largest posterior between that of "+" and "-"
However, if there is not enough confidence (i.e. if mpt posterior(c1|tweet) < 2*posterior(c2|tweet),
then we classify as neutral ("~") because we don't have conclusive evidence
@param {string} tweet
@return {string} sentiment = "+" || "-" || "~"
"""
sanitized = sanitize(tweet, self.stopwords)
# print sanitized
sentiment = {}
bigrams = nltk.bigrams(sanitized)
trigrams = nltk.trigrams(sanitized)
if len(sanitized) <= 22:
for s in self.classes:
sentiment[s] = self.posterior(s, sanitized) # Calculate posterior for positive and negative sentiment
if verbose: print(s, sanitized, self.posterior(s, sanitized))
elif len(sanitized) == 23:
for s in self.classes:
for pair in bigrams:
sentiment[s] = self.posterior(s, pair)
if verbose: print (s, pair, self.posterior(s, pair))
else:
# use trigram model
for s in self.classes:
for tri in trigrams:
sentiment[s] = self.posterior(s, tri)
if verbose: print (s, tri, self.posterior(s, tri))
|
positive = sentiment["+"] # Get calculated posterior of positive sentiment
negative = sentiment["-"] # Get calculated posterior fo negative sentiment
#print "positive: %s negative: %s" % (positive, negative)
if "not" in sanitized or "despite" in sanitized:
if positive > + math.log(1.3) + negative:
negative = abs(negative)
elif negative > math.log(9)
|
+ positive:
positive = abs(positive)
if verbose: print("positive: %f negative: %f" % (positive, negative))
if positive > + math.log(1.3) + negative:
if eval: return "+"
else: print(colored('+', 'green'))
elif negative > math.log(.9)+positive:
if eval: return "-"
else: print(colored('-', 'red'))
else:
if eval: return "~"
else: print(colored('~', 'white'))
def evaluate(self):
totalp = totaln = 0
t = w = 0 # total = 0, wrong = 0
fp = fn = 0 # false positive = 0, false negative = 0
for tweet in open("data/verify_pos.txt"):
t += 1.0
totalp += 1.0
e = self.classify(tweet, False, eval=True)
if e != "+":
if e == "-": fn += 1
w += 1.0
tp = t - w # true positive
print(colored('Positive', 'green'), end="")
print(" - accuracy: %.2f%%" % self.accuracy(w, t)) # make function that displays values correctly
t = w = 0
for tweet in open("data/verify_neg.txt"):
t += 1.0
totaln += 1.0
e = self.classify(tweet, False, eval=True)
if e != "-":
if e == "+": fp += 1
w += 1.0
tn = t - w # true negative
print(colored('Negative', 'red'), end="")
print(" - accuracy: %.2f%%" % self.accuracy(w, t))
w = t = 0
for tweet in open("data/verify_neutral.txt"):
t += 1.0
if "~" != self.classify(tweet, verbose=False, eval=True):
w += 1.0
# print "Neutral - accuracy: %s" % self.accuracy(w, t)
# Precision
# = TP / (TP + FP)
precision = (tp / (tp + fp))
print(colored("\nPrecision: ", "magenta") + "%.2f" % precision)
# Recall
# = TP / (TP + FN)
recall = (tp / (tp + fn))
print(colored("Recall: ", "magenta") + "%.2f" % recall)
# Accuracy
# = (TP + TN) / (P + N)
accuracy = (tp + tn) / (totalp + totaln) * 100
print(colored("Accuracy: ", "magenta") + "%.2f%%" % accuracy)
# F-score
# measure of test's accuracy - considers both the precision and recall
f_score = 2 * (precision*recall) / (precision+recall)
print(colored("\nF-Measure: ", "cyan") + "%.2f" % f_score)
def accuracy(self, w, t):
return (1 - (w/t)) * 100
def __repr__(self):
pass
c = NaiveBayesClassifier()
|
Eficent/odoo-operating-unit
|
account_invoice_merge_operating_unit/__init__.py
|
Python
|
agpl-3.0
| 202
| 0
|
# -*- coding: u
|
tf-8 -*-
# ยฉ 2015 Eficent Business and IT Consulting Services S.L. -
# Jordi Ballester Alomar
# License AGPL-3.0 or later (https:/
|
/www.gnu.org/licenses/agpl.html).
from . import models
|
pysam-developers/pysam
|
tests/compile_test.py
|
Python
|
mit
| 1,181
| 0.001693
|
'''
compile_test.py - check pyximport functionality with pysam
==========================================================
test script for checking if
|
compilation against
pysam and tabix works.
'''
# clean up previous compilation
import os
import unittest
import pysam
from TestUtils import make_data_files, BAM_DATADIR, TABIX_DATADIR
def setUpModule():
make_data_files(BAM_DATADIR)
make_data_files(TABIX_DATADIR)
try:
o
|
s.unlink('tests/_compile_test.c')
os.unlink('tests/_compile_test.pyxbldc')
except OSError:
pass
import pyximport
pyximport.install(build_in_temp=False)
import _compile_test
class BAMTest(unittest.TestCase):
input_filename = os.path.join(BAM_DATADIR, "ex1.bam")
def testCount(self):
nread = _compile_test.testCountBAM(
pysam.Samfile(self.input_filename))
self.assertEqual(nread, 3270)
class GTFTest(unittest.TestCase):
input_filename = os.path.join(TABIX_DATADIR, "example.gtf.gz")
def testCount(self):
nread = _compile_test.testCountGTF(
pysam.Tabixfile(self.input_filename))
self.assertEqual(nread, 237)
if __name__ == "__main__":
unittest.main()
|
ricardoalejos/RalejosMsrElcDsn
|
SmdAngPtnSnt/pkg/ExpFlows/Ch3LpfPlotSaEvoVsRes.py
|
Python
|
mit
| 5,718
| 0.03148
|
""" Title: Ch3LpfPlotResponse - Chapter 3: Plot filter response
Author: Ricardo Alejos
Date: 2016-09-20
Description: Plots the micro-strip filter response against the specifications
Version: 1.0.0
Comments: -
"""
# Import Python's built-in modules
import csv as _csv
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import math as _math
# Add project root directory to sys.path so other modules can be imported
_projectRoot = _os.path.abspath(__file__ + "\\..\\..\\..")
if _projectRoot not in _sys.path:
_sys.path.insert(0, _projectRoot)
_strThisFileName = _os.path.splitext(_os.path.basename(__file__))[0]
import pkg.Algorithm.SimAnnMin as _sam
import pkg.ObjectiveFunctions.MsLpf as _lpf
import pkg.Helpers.MatlabFunctions as _mf
def _initLogger():
global logger
logger = _logging.getLogger(_strThisFileName)
logger.setLevel(_logging.DEBUG)
map(logger.removeHandler, logger.handlers[:])
ch = _logging.StreamHandler(_sys.stdout)
ch.setLevel(_logging.INFO)
fh = _logging.FileHandler(_strThisFileName + ".log")
fh.setLevel(_logging.DEBUG)
formatter = _logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.debug("A new logger session has started.")
_initLogger()
cases = (
dict(
title = ("Filter response using","\\it{n}\\rm=2 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 2,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n2_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=4 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 4,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n4_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=8 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm_0"),
n = 8,
w1 = 1.222,
l1 = 5.4050,
w2 = 2.5944,
filename = "ch3_fresp_n8_x0"
),
dict(
title = ("Filter response using","\\it{n}\\rm=2 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 2,
w1 = 1.5242,
l1 = 4.9000,
w2 = 2.4500,
filename = "ch3_fresp_n2_xopt"
),
dict(
title = ("Filter response using","\\it{n}\\rm=4 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 4,
w1 = 1.4564,
l1 = 4.9000,
w2 = 2.4500,
file
|
name = "ch3_fresp_n4_xopt"
),
dict(
title = ("Filter response
|
using","\\it{n}\\rm=8 and \\bf\\it{x}\\rm=\\bf\\it{x}\\rm*"),
n = 8,
w1 = 1.3750,
l1 = 4.9000,
w2 = 3.0625,
filename = "ch3_fresp_n8_xopt"
),
)
def PlotResponse(w1, l1, w2, n, title, filename = None):
resp = _lpf.getRawResponseData([w1, l1, w2], n)
freq = resp["freq"]
s21m = [_math.sqrt(resp["s21r"][i]**2+resp["s21i"][i]**2) for i in range(len(freq))]
spec = (([0,0,6e9,6e9],[0.85,0.9,0.9,0.85]), ([8e9,8e9,10e9,10e9],[0.15,0.1,0.1,0.15]))
_mf.PlotVsSpecs(
freq,
s21m,
spec,
title,
"Frequency (Hz)",
"|S_{21}|",
filename
)
def main():
with open(_strThisFileName + "_" + _time.strftime('%Y%m%d%H%M%S') + ".csv", "wb") as fhReport:
lRptFld = [
"k",
"iter",
"ui",
"uo"
]
cwReport = _csv.DictWriter(fhReport, lRptFld)
cwReport.writeheader()
lstSaCfg = ["TTT"]
numItn = 50
dicTmeFun = dict(
T = _sam.TmeFns.typical(numItn),
F = _sam.TmeFns.fast(numItn),
S = _sam.TmeFns.slow(numItn)
)
dicSseFun = dict(
T = _sam.SseFns.typical,
F = _sam.SseFns.fast,
S = _sam.SseFns.slow
)
dicAceFun = dict(
T = _sam.AceFns.typical,
F = _sam.AceFns.fast,
S = _sam.AceFns.slow
)
lstK = [8] #[2,4,8]
for strSaCfg in lstSaCfg:
for k in lstK:
uk = _lpf.getInterfaceFunction(k)
logger.info("Running SAM using the %s configuration."%strSaCfg)
dReportRow = dict((key, None) for key in lRptFld)
dReportRow["k"] = k
SamObj = _sam.SimAnnMin()
SamObj.setObeFun(uk)
SamObj.setTmeLst(dicTmeFun[strSaCfg[0]])
SamObj.setSseFun(dicSseFun[strSaCfg[1]])
SamObj.setAceFun(dicAceFun[strSaCfg[2]])
SamObj.setX0([-0.7,0.5,0.1])
SamObj.runAll()
lstUi = SamObj._lUi
lstUo = SamObj._lUo
lstIter = range(len(lstUi))
_mf.Plot(lstIter,
lstUi,
"Evolution of \\it{u}\\rm_{%d} during SA optimiziation."%(k),
"Iteration",
"\\it{u}\\rm_{%d}"%(k),
"sa-evol_u%dall"%(k))
_mf.Plot(lstIter,
lstUo,
"Evolution of \\it{u}\\rm_{%d}* during SA optimization"%(k),
"Iteration",
"\\it{u}\\rm_{%d}"%(k),
"sa-evol_u%dopt"%(k))
for iter in lstIter:
dReportRow["iter"] = iter
dReportRow["ui"] = "%0.4f"%lstUi[iter]
dReportRow["uo"] = "%0.4f"%lstUo[iter]
logger.info("Finished processing of u%d"%k)
cwReport.writerow(dReportRow)
main()
|
hoechenberger/psychopy
|
psychopy/experiment/components/joystick/mouseJoystick.py
|
Python
|
gpl-3.0
| 1,348
| 0.009644
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
# Support for fake joystick/gamepad during devlopment
# if no 'real' joystick/gamepad is available use keyboard emulation
# 'ctrl' + 'alt' + numberKey
from __future__ import absolute_import, division, pri
|
nt_function
from psychopy import event
c
|
lass Joystick(object):
def __init__(self, device_number):
self.device_number = device_number
self.numberKeys = ['0','1','2','3','4','5','6','7','8','9']
self.modifierKeys = ['ctrl','alt']
self.mouse = event.Mouse()
def getNumButtons(self):
return(len(self.numberKeys))
def getAllButtons(self):
keys = event.getKeys(keyList=self.numberKeys, modifiers=True)
values = [key for key, modifiers in keys if all([modifiers[modKey] for modKey in self.modifierKeys])]
self.state = [key in values for key in self.numberKeys]
mouseButtons = self.mouse.getPressed()
self.state[:len(mouseButtons)] = [a or b != 0 for (a,b) in zip(self.state, mouseButtons)]
return(self.state)
def getX(self):
(x, y) = self.mouse.getPos()
return x
def getY(self):
(x, y) = self.mouse.getPos()
return y
|
MAECProject/pefile-to-maec
|
pefile_to_maec/mappings/file_object.py
|
Python
|
bsd-3-clause
| 586
| 0.001706
|
# -*- coding: Latin-1 -*-
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See License.txt for complete terms.
# file object ->
|
CybOX File Object mappings
file_object_mappings = {'file_format': 'file_format',
'type': 'type',
'file_name': 'file_name',
'file_path': 'file_path',
'size': 'size_in_bytes',
'magic_number': 'magic_number',
'file_extension': 'file_extension',
'entropy': 'peak_e
|
ntropy'}
|
SqueezeStudioAnimation/dpAutoRigSystem
|
dpAutoRigSystem/Extras/sqStickyLipsSetup.py
|
Python
|
gpl-2.0
| 20,524
| 0.007698
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###################################################################
#
# Company: Squeeze Studio Animation
#
# Author: Danilo Pinheiro
# Date: 2014-02-10
# Updated: 2014-02-24
#
# sqStickyLipsSetup.py
#
# This script will create a Sticky Lips setup.
#
#######################################
# importing libraries:
import maya.cmds as cmds
import maya.mel as mel
from functools import partial
# global variables to this module:
CLASS_NAME = "StickyLips"
TITLE = "m061_stickyLips"
DESCRIPTION = "m062_stickyLipsDesc"
ICON = "/Icons/sq_stickyLips.png"
SQSL_VERSION = "1.0"
class StickyLips():
def __init__(self, dpUIinst, langDic, langName):
# redeclaring variables
self.dpUIinst = dpUIinst
self.langDic = langDic
self.langName = langName
# call main function
self.dpMain(self)
def dpMain(self, *args):
self.edgeList = []
self.baseCurve = []
self.baseCurveA = []
self.baseCurveB = []
self.mainCurveA = []
self.mainCurveB = []
self.curveLenght = 0
self.maxIter = 0
self.clusterList = []
self.receptList = []
self.optionCtrl = "Option_Ctrl"
self.wireNodeList = []
if cmds.window('sqStickyLipsWindow', query=True, exists=True):
cmds.deleteUI('sqStickyLipsWindow', window=True)
cmds.window('sqStickyLipsWindow', title='sqStickyLips - v'+str(SQSL_VERSION)+' - UI', widthHeight=(300, 200), menuBar=False, sizeable=False, minimizeButton=True, maximizeButton=False)
cmds.showWindow('sqStickyLipsWindow')
slLayoutColumn = cmds.columnLayout('slLayoutColumn', adjustableColumn=True)
cmds.text("Load meshes:", align="left", parent=slLayoutColumn)
slLayoutA = cmds.rowColumnLayout('slLayoutA', numberOfColumns=2, columnWidth=[(1, 100), (2, 160)], parent=slLayoutColumn)
cmds.button(label="Recept A >>", command=partial(self.sqSLLoad, "A"), parent=slLayoutA)
self.receptA_TF = cmds.textField(parent=slLayoutA)
cmds.button(label="Recept B >>", command=partial(self.sqSLLoad, "B"), parent=slLayoutA)
self.receptB_TF = cmds.textField(parent=slLayoutA)
cmds.text("Select a closed edgeLoop and press the run button", parent=slLayoutColumn)
cmds.button(label="RUN - Generate Sticky Lips", command=self.sqGenerateStickyLips, backgroundColor=[0.3, 1, 0.7], parent=slLayoutColumn)
def sqSLLoad(self, recept, *args):
if recept == "A":
cmds.textField(self.receptA_TF, edit=True, text=cmds.ls(selection=True)[0])
if recept == "B":
cmds.textField(self.receptB_TF, edit=True, text=cmds.ls(selection=True)[0])
def sqGetRecepts(self, receptA=None, receptB=None, *args):
self.receptList = []
self.receptList.append(receptA)
self.receptList.append(receptB)
if receptA == None:
receptAName = cmds.textField(self.receptA_TF, query=True, text=True)
if cmds.objExists(receptAName):
self.receptList[0] = receptAName
if receptB == None:
receptBName = cmds.textField(self.receptB_TF, query=True, text=True)
if cmds.objExists(receptBName):
self.receptList[1] = receptBName
def sqGenerateCurves(self, *args):
self.edgeList = cmds.ls(selection=True, flatten=True)
if not self.edgeList == None and not self.edgeList == [] and not self.edgeList == "":
self.baseCurve = cmds.polyToCurve(name="baseCurve", form=2, degree=1)[0]
cmds.select(self.baseCurve+".ep[*]")
cmds.insertKnotCurve(cmds.ls(selection=True, flatten=True), constructionHistory=True, curveOnSurface=True, numberOfKnots=1, addKnots=False
|
, insertBetween=True, replaceOriginal=True)
pointListA, pointListB, sideA, sideB = self.sqGetPointLists()
toDeleteList = []
|
p = 2
for k in range((sideA+2), (sideB-1)):
if p%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(k)+"]")
toDeleteList.append(self.baseCurve+".cv["+str(k+len(pointListA)-1)+"]")
p = p+1
q = 2
m = sideA-2
if m >= 0:
while m >= 0:
if not m == sideA and not m == sideB:
if q%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(m)+"]")
m = m-1
q = q+1
cmds.delete(toDeleteList)
cmds.insertKnotCurve([self.baseCurve+".u["+str(len(pointListA)-1)+"]", self.baseCurve+".ep["+str(len(pointListA)-1)+"]"], constructionHistory=True, curveOnSurface=True, numberOfKnots=1, addKnots=False, insertBetween=True, replaceOriginal=True)
pointListA, pointListB, sideA, sideB = self.sqGetPointLists()
posListA, posListB = [], []
for i in range(0, len(pointListA)-1):
posListA.append(cmds.xform(pointListA[i], query=True, worldSpace=True, translation=True))
posListB.append(cmds.xform(pointListB[i], query=True, worldSpace=True, translation=True))
self.mainCurveA = cmds.curve(name="StickyLips_Main_A_Crv", degree=1, point=posListA)
self.mainCurveB = cmds.curve(name="StickyLips_Main_B_Crv", degree=1, point=posListB)
cmds.rename(cmds.listRelatives(self.mainCurveA, children=True, shapes=True)[0], self.mainCurveA+"Shape")
cmds.rename(cmds.listRelatives(self.mainCurveB, children=True, shapes=True)[0], self.mainCurveB+"Shape")
cmds.select(self.mainCurveA+".cv[*]")
self.curveLenght = len(cmds.ls(selection=True, flatten=True))
cmds.select(clear=True)
self.sqCheckCurveDirection(self.mainCurveA)
self.sqCheckCurveDirection(self.mainCurveB)
self.baseCurveA = cmds.duplicate(self.mainCurveA, name=self.mainCurveA.replace("_Main_", "_Base_"))[0]
self.baseCurveB = cmds.duplicate(self.mainCurveB, name=self.mainCurveB.replace("_Main_", "_Base_"))[0]
cmds.delete(self.baseCurve)
self.maxIter = len(posListA)
cmds.group(self.mainCurveA, self.mainCurveB, self.baseCurveA, self.baseCurveB, name="StickyLips_StaticData_Grp")
else:
mel.eval("warning \"Please, select an closed edgeLoop.\";")
def sqCheckCurveDirection(self, thisCurve, *args):
posMinX = cmds.xform(thisCurve+".cv[0]", query=True, worldSpace=True, translation=True)[0]
posMaxX = cmds.xform(thisCurve+".cv["+str(self.curveLenght-1)+"]", query=True, worldSpace=True, translation=True)[0]
if posMinX > posMaxX:
cmds.reverseCurve(thisCurve, constructionHistory=False, replaceOriginal=True)
def sqGetPointLists(self, *args):
cmds.select(self.baseCurve+".cv[*]")
pointList = cmds.ls(selection=True, flatten=True)
minX = 0
maxX = 0
sideA = 0
sideB = 0
for i in range(0, len(pointList)):
pointPosX = cmds.xform(pointList[i], query=True, worldSpace=True, translation=True)[0]
if pointPosX < minX:
minX = pointPosX
sideA = i
elif pointPosX > maxX:
maxX = pointPosX
sideB = i
if sideA > sideB:
sideC = sideA
sideA = sideB
sideB = sideC
pointListA = pointList[sideA:(sideB+1)]
pointListB = pointList[sideB:]
for j in range(0, (sideA+1)):
pointListB.append(pointList[j])
return pointListA, pointListB, sideA, sideB
def sqCreateClusters(self, curveA, curveB, *args):
self.clusterList = []
for i in range(1, self.curveLenght-1):
self.clusterList.append(cmds.cluster([curveA+".cv["+str(i)+"]",
|
corradio/electricitymap
|
parsers/NL.py
|
Python
|
gpl-3.0
| 9,768
| 0.005119
|
#!/usr/bin/env python3
import arrow
import math
from . import statnett
from . import ENTSOE
from . import DK
import logging
import pandas as pd
import requests
def fetch_production(zone_key='NL', session=None, target_datetime=None,
logger=logging.getLogger(__name__), energieopwek_nl=True):
if target_datetime is None:
target_datetime = arrow.utcnow()
else:
target_datetime = arrow.get(target_datetime)
r = session or requests.session()
consumptions = ENTSOE.fetch_consumption(zone_key=zone_key,
session=r,
target_datetime=target_datetime,
logger=logger)
if not consumptions:
return
for c in consumptions:
del c['source']
df_consumptions = pd.DataFrame.from_dict(consumptions).set_index(
'datetime')
# NL has exchanges with BE, DE, NO, GB, DK-DK1
exchanges = []
for exchange_key in ['BE', 'DE', 'GB']:
zone_1, zone_2 = sorted([exchange_key, zone_key])
exchange = ENTSOE.fetch_exchange(zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=target_datetime,
logger=logger)
if not exchange:
return
exchanges.extend(exchange or [])
# add NO data, fetch once for every hour
# This introduces an error, because it doesn't use the average power flow
# during the hour, but rather only the value during the first minute of the
# hour!
zone_1, zone_2 = sorted(['NO', zone_key])
|
exchange_NO = [statnett.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=dt.datetime,
logger=logger)
for dt in arrow.Arrow.range(
'hour',
arrow.get(min([e['datetime']
for e in exchanges])).replace(minute=0),
arrow.get(max([e['datetime']
|
for e in exchanges])).replace(minute=0))]
exchanges.extend(exchange_NO)
# add DK1 data (only for dates after operation)
if target_datetime > arrow.get('2019-08-24', 'YYYY-MM-DD') :
zone_1, zone_2 = sorted(['DK-DK1', zone_key])
df_dk = pd.DataFrame(DK.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=target_datetime,
logger=logger))
# Because other exchanges and consumption data is only available per hour
# we floor the timpstamp to hour and group by hour with averaging of netFlow
df_dk['datetime'] = df_dk['datetime'].dt.floor('H')
exchange_DK = df_dk.groupby(['datetime']).aggregate({'netFlow' : 'mean',
'sortedZoneKeys': 'max', 'source' : 'max'}).reset_index()
# because averaging with high precision numbers leads to rounding errors
exchange_DK = exchange_DK.round({'netFlow': 3})
exchanges.extend(exchange_DK.to_dict(orient='records'))
# We want to know the net-imports into NL, so if NL is in zone_1 we need
# to flip the direction of the flow. E.g. 100MW for NL->DE means 100MW
# export to DE and needs to become -100MW for import to NL.
for e in exchanges:
if(e['sortedZoneKeys'].startswith('NL->')):
e['NL_import'] = -1 * e['netFlow']
else:
e['NL_import'] = e['netFlow']
del e['source']
del e['netFlow']
df_exchanges = pd.DataFrame.from_dict(exchanges).set_index('datetime')
# Sum all exchanges to NL imports
df_exchanges = df_exchanges.groupby('datetime').sum()
# Fill missing values by propagating the value forward
df_consumptions_with_exchanges = df_consumptions.join(df_exchanges).fillna(
method='ffill', limit=3) # Limit to 3 x 15min
# Load = Generation + netImports
# => Generation = Load - netImports
df_total_generations = (df_consumptions_with_exchanges['consumption']
- df_consumptions_with_exchanges['NL_import'])
# Fetch all production
# The energieopwek_nl parser is backwards compatible with ENTSOE parser.
# Because of data quality issues we switch to using energieopwek, but if
# data quality of ENTSOE improves we can switch back to using a single
# source.
productions_ENTSOE = ENTSOE.fetch_production(zone_key=zone_key, session=r,
target_datetime=target_datetime, logger=logger)
if energieopwek_nl:
productions_eopwek = fetch_production_energieopwek_nl(session=r,
target_datetime=target_datetime, logger=logger)
# For every production value we look up the corresponding ENTSOE
# values and copy the nuclear, gas, coal, biomass and unknown production.
productions = []
for p in productions_eopwek:
entsoe_value = next((pe for pe in productions_ENTSOE
if pe["datetime"] == p["datetime"]), None)
if entsoe_value:
p["production"]["nuclear"] = entsoe_value["production"]["nuclear"]
p["production"]["gas"] = entsoe_value["production"]["gas"]
p["production"]["coal"] = entsoe_value["production"]["coal"]
p["production"]["biomass"] = entsoe_value["production"]["biomass"]
p["production"]["unknown"] = entsoe_value["production"]["unknown"]
productions.append(p)
else:
productions = productions_ENTSOE
if not productions:
return
# Flatten production dictionaries (we ignore storage)
for p in productions:
# if for some reason therรฉ's no unknown value
if not 'unknown' in p['production'] or p['production']['unknown'] == None:
p['production']['unknown'] = 0
Z = sum([x or 0 for x in p['production'].values()])
# Only calculate the difference if the datetime exists
# If total ENTSOE reported production (Z) is less than total generation
# (calculated from consumption and imports), then there must be some
# unknown production missing, so we add the difference.
# The difference can actually be negative, because consumption is based
# on TSO network load, but locally generated electricity may never leave
# the DSO network and be substantial (e.g. Solar).
if p['datetime'] in df_total_generations and Z < df_total_generations[p['datetime']]:
p['production']['unknown'] = round((
df_total_generations[p['datetime']] - Z + p['production']['unknown']), 3)
# Filter invalid
# We should probably add logging to this
return [p for p in productions if p['production']['unknown'] > 0]
def fetch_production_energieopwek_nl(session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
if target_datetime is None:
target_datetime = arrow.utcnow()
# Get production values for target and target-1 day
df_current = get_production_data_energieopwek(
target_datetime, session=session)
df_previous = get_production_data_energieopwek(
target_datetime.shift(days=-1), session=session)
# Concat them, oldest first to keep chronological order intact
df = pd.concat([df_previous, df_current])
output = []
base_time = arrow.get(target_datetime.date(), 'Europe/Paris').shift(days=-1).to('utc')
for i, prod in enumerate(df.to_dict(orient='records')):
output.append(
{
'zoneKey': 'NL',
'datetime': base_time.shift(minutes=i*15).datetime,
'production': prod,
'source': 'energieopwek.nl, entsoe.eu'
}
)
return output
def get_production_data_energieopwek(date, session=None):
r = session or request
|
sergiooramas/tartarus
|
src/load.py
|
Python
|
mit
| 6,438
| 0.005747
|
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from sklearn.preprocessing import StandardScaler, normalize
import sys
import common
FACT = 'pmi' # nmf/pmi_wl/pmi_wp/pmi_wlp
DIM = 200
DATASET = 'MSDmm'
WINDOW = 1
NSAMPLES = 'all' #all
MAX_N_SCALER = 300000
N_PATCHES = 3
def scale(X, scaler=None, max_N=MAX_N_SCALER):
shape = X.shape
X.shape = (shape[0], shape[2] * shape[3])
if not scaler:
sca
|
ler = StandardScaler()
N = pd.np.min([len(X), max_N]) # Limit the number of patches to fit
scaler.fit(X[:N])
X = scaler.transform(X)
X.shape = shape
return X, scaler
def load_X(args):
dat
|
a_path = '../data/patches_%s_%s/' % (DATASET, args.window)
progress_update = 1
data_files = glob.glob(os.path.join(data_path, "*.npy"))
#songs_in = set(open(common.DATASETS_DIR+'/trainset_%s.tsv' %
# (args.dataset)).read().splitlines())
if len(data_files) == 0:
raise ValueError("Error: Empty directory %s" % data_path)
index_factors = set(open(common.DATASETS_DIR+'/items_index_train_'+DATASET+'.tsv').read().splitlines())
data_files_in = []
for file in data_files:
filename = file[file.rfind('/')+1:-4]
item_id, npatch = filename.split('_')
if int(npatch) < args.npatches and item_id in index_factors:
data_files_in.append(file)
all_X = []
songs_dataset = []
X_mbatch = np.load(data_files_in[0])
X = np.zeros((len(data_files_in),1,X_mbatch.shape[0],X_mbatch.shape[1]))
for i, data_file in enumerate(data_files_in):
song_id = data_file[data_file.rfind('/')+1:data_file.rfind('_')]
X_mbatch = np.load(data_file)
X[i,0,:,:] = X_mbatch
#if len(all_Y) == 0:
# plt.imshow(X_mbatch,interpolation='nearest',aspect='equal')
# plt.show()
#all_X.append(X_mbatch.reshape(-1,X_mbatch.shape[0],X_mbatch.shape[1]))
songs_dataset.append(song_id)
if i % progress_update == 0:
sys.stdout.write("\rLoading Data: %.2f%%" % (100 * i / float(len(data_files_in))))
sys.stdout.flush()
sys.stdout.write("\rLoading Data: 100%")
sys.stdout.flush()
print "X data loaded"
output_suffix_X = '%s_%sx%s' % (args.dataset,args.npatches,args.window)
scaler_file=common.DATASETS_DIR+'/train_data/scaler_%s.pk' % output_suffix_X
X,scaler = scale(X)
pickle.dump(scaler,open(scaler_file,'wb'))
X_file = common.DATASETS_DIR+'/train_data/X_train_'+output_suffix_X
np.save(X_file,X)
fw=open(common.DATASETS_DIR+'/train_data/index_train_'+output_suffix_X+'.tsv','w')
fw.write("\n".join(songs_dataset))
def load_Y(args):
progress_update = 1
output_suffix_X = '%s_%sx%s' % (args.dataset,args.npatches,args.window)
index_X=open(common.DATASETS_DIR+'/train_data/index_train_'+output_suffix_X+'.tsv').read().splitlines()
song_factors=np.load(common.DATASETS_DIR+'/item_factors_%s_%s_%s.npy' % (args.fact,args.dim,args.dataset))
song_index=open(common.DATASETS_DIR+'/items_index_%s.tsv' % (args.dataset)).read().splitlines()
#print common.DATASETS_DIR+'/song_factors_%s_%s_%s.npy' % (args.fact,args.dim,args.dataset)
print len(song_index)
inv_song_index = dict()
for i,song_id in enumerate(song_index):
inv_song_index[song_id] = i
# Read all data into memory (this might need to change if data too large)
all_Y = []
songs_dataset = []
Y = np.zeros((len(index_X), int(args.dim)))
for i, song_id in enumerate(index_X):
# all_Y.append(song_factors[inv_song_index[song_id]])
Y[i, :] = song_factors[inv_song_index[song_id]]
if i % progress_update == 0:
sys.stdout.write("\rLoading Data: %.2f%%" %
(100 * i / float(len(index_X))))
sys.stdout.flush()
sys.stdout.write("\rLoading Data: 100%")
sys.stdout.flush()
print "Y data loaded"
output_suffix_Y = '%s_%s_%s_%sx%s' % (args.fact, args.dim, args.dataset,
args.npatches, args.window)
normalize(Y, copy=False)
Y_file = common.DATASETS_DIR+'/train_data/Y_train_'+output_suffix_Y
np.save(Y_file, Y)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Trains the model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d',
'--dataset',
dest="dataset",
type=str,
help='Dataset name',
default=DATASET)
parser.add_argument('-f',
'--fact',
dest="fact",
type=str,
help='Factorization method',
default=FACT)
parser.add_argument('-dim',
'--dim',
dest="dim",
type=str,
help='Factors dimensions',
default=DIM)
parser.add_argument('-w',
'--window',
dest="window",
type=str,
help='Patches window size in seconds',
default=WINDOW)
parser.add_argument('-np',
'--npatches',
dest="npatches",
type=str,
help='Number of patches',
default=N_PATCHES)
parser.add_argument('-x',
'--loadx',
dest="loadX",
help='Load X',
action='store_true',
default=False)
parser.add_argument('-y',
'--loady',
dest="loadY",
help='Load Y',
action='store_true',
default=False)
parser.add_argument('-all',
'--all',
dest="all_data",
help='All data, test and train set together',
action='store_true',
default=False)
args = parser.parse_args()
if args.loadX:
load_X(args)
if args.loadY:
load_Y(args)
|
bikoheke/hacktoberfest
|
scripts/hello_world_amlaanb.py
|
Python
|
gpl-3.0
| 34
| 0.029412
|
import s
|
ys
print("Hello, Wor
|
ld!")
|
jcdoll/PiezoD
|
python/archive/lbfgs.py
|
Python
|
gpl-3.0
| 752
| 0.009309
|
from cantilever_divingboard import *
# We need to scale the parameters before applying the optimization algorithm
# Normally there are about 20 orders of magnitude between the dimensions and
# the doping concentration,
|
so this is a critical step
# Run the script
freq_min = 1e3
freq_max = 1e5
omega_min = 100e3
initial_guess = (50e-6, 1e
|
-6, 1e-6,
30e-6, 1e-6, 1e-6, 500e-9, 5., 1e15)
constraints = ((30e-6, 100e-6), (500e-9, 20e-6), (1e-6, 10e-6),
(2e-6, 100e-6), (500e-9, 5e-6), (500e-9, 20e-6), (30e-9, 10e-6),
(1., 10.), (1e15, 4e19))
x = optimize_cantilever(initial_guess, constraints, freq_min, freq_max, omega_min)
c = cantilever_divingboard(freq_min, freq_max, x)
c.print_performance()
|
apache/incubator-airflow
|
airflow/example_dags/example_complex.py
|
Python
|
apache-2.0
| 7,913
| 0.004929
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows the complex DAG structure.
"""
from datetime import datetime
from airflow import models
from airflow.models.baseoperator import chain
from airflow.operators.bash import BashOperator
with models.DAG(
dag_id="example_complex",
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example', 'example2', 'example3'],
) as dag:
# Create
create_entry_group = BashOperator(task_id="create_entry_group", bash_command="echo create_entry_group")
create_entry_group_result = BashOperator(
task_id="create_entry_group_result", bash_command="echo create_entry_group_result"
)
create_entry_group_result2 = BashOperator(
task_id="create_entry_group_result2", bash_command="echo create_entry_group_result2"
)
create_entry_gcs = BashOperator(task_id="create_entry_gcs", bash_command="echo create_entry_gcs")
create_entry_gcs_result = BashOperator(
task_id="create_entry_gcs_result", bash_command="echo create_entry_gcs_result"
)
create_entry_gcs_result2 = BashOperator(
task_id="create_entry_gcs_result2", bash_command="echo create_entry_gcs_result2"
)
create_tag = BashOperator(task_id="create_tag", bash_command="echo create_tag")
create_tag_result = BashOperator(task_id="create_tag_result", bash_command="echo create_tag_result")
create_tag_result2 = BashOperator(task_id="create_tag_result2", bash_command="echo create_tag_result2")
create_tag_template = BashOperator(task_id="create_tag_template", bash_command="echo create_tag_template")
create_tag_template_result = BashOperator(
task_id="create_tag_template_result", bash_command="echo create_tag_template_result"
)
create_tag_template_result2 = BashOperator(
task_id="create_tag_template_result2", bash_command="echo create_tag_template_result2"
)
create_tag_template_field = BashOperator(
task_id="create_tag_template_field", bash_command="echo create_tag_template_field"
)
create_tag_template_field_result = BashOperator(
task_id="create_tag_template_field_result", bash_command="echo create_tag_template_field_result"
)
create_tag_template_field_result2 = BashOperator(
task_id="create_tag_template_field_result2", bash_command="echo create_tag_template_field_result"
)
# Delete
delete_entry = BashOperator(task_id="delete_entry", bash_command="echo delete_entry")
create_entry_gcs >> delete_entry
delete_entry_group = BashOperator(task_id="delete_entry_group", bash_command="echo delete_entry_group")
creat
|
e_entry_group >> delete_entry_group
delete_tag = BashOperator(task_id="delete_tag", bash_command="echo delete_tag")
create_tag >> delete_tag
delete_tag_template_field = BashOperator(
task_id="delete_tag_template_field", bash_command="echo delete_tag_t
|
emplate_field"
)
delete_tag_template = BashOperator(task_id="delete_tag_template", bash_command="echo delete_tag_template")
# Get
get_entry_group = BashOperator(task_id="get_entry_group", bash_command="echo get_entry_group")
get_entry_group_result = BashOperator(
task_id="get_entry_group_result", bash_command="echo get_entry_group_result"
)
get_entry = BashOperator(task_id="get_entry", bash_command="echo get_entry")
get_entry_result = BashOperator(task_id="get_entry_result", bash_command="echo get_entry_result")
get_tag_template = BashOperator(task_id="get_tag_template", bash_command="echo get_tag_template")
get_tag_template_result = BashOperator(
task_id="get_tag_template_result", bash_command="echo get_tag_template_result"
)
# List
list_tags = BashOperator(task_id="list_tags", bash_command="echo list_tags")
list_tags_result = BashOperator(task_id="list_tags_result", bash_command="echo list_tags_result")
# Lookup
lookup_entry = BashOperator(task_id="lookup_entry", bash_command="echo lookup_entry")
lookup_entry_result = BashOperator(task_id="lookup_entry_result", bash_command="echo lookup_entry_result")
# Rename
rename_tag_template_field = BashOperator(
task_id="rename_tag_template_field", bash_command="echo rename_tag_template_field"
)
# Search
search_catalog = BashOperator(task_id="search_catalog", bash_command="echo search_catalog")
search_catalog_result = BashOperator(
task_id="search_catalog_result", bash_command="echo search_catalog_result"
)
# Update
update_entry = BashOperator(task_id="update_entry", bash_command="echo update_entry")
update_tag = BashOperator(task_id="update_tag", bash_command="echo update_tag")
update_tag_template = BashOperator(task_id="update_tag_template", bash_command="echo update_tag_template")
update_tag_template_field = BashOperator(
task_id="update_tag_template_field", bash_command="echo update_tag_template_field"
)
# Create
create_tasks = [
create_entry_group,
create_entry_gcs,
create_tag_template,
create_tag_template_field,
create_tag,
]
chain(*create_tasks)
create_entry_group >> delete_entry_group
create_entry_group >> create_entry_group_result
create_entry_group >> create_entry_group_result2
create_entry_gcs >> delete_entry
create_entry_gcs >> create_entry_gcs_result
create_entry_gcs >> create_entry_gcs_result2
create_tag_template >> delete_tag_template_field
create_tag_template >> create_tag_template_result
create_tag_template >> create_tag_template_result2
create_tag_template_field >> delete_tag_template_field
create_tag_template_field >> create_tag_template_field_result
create_tag_template_field >> create_tag_template_field_result2
create_tag >> delete_tag
create_tag >> create_tag_result
create_tag >> create_tag_result2
# Delete
delete_tasks = [
delete_tag,
delete_tag_template_field,
delete_tag_template,
delete_entry_group,
delete_entry,
]
chain(*delete_tasks)
# Get
create_tag_template >> get_tag_template >> delete_tag_template
get_tag_template >> get_tag_template_result
create_entry_gcs >> get_entry >> delete_entry
get_entry >> get_entry_result
create_entry_group >> get_entry_group >> delete_entry_group
get_entry_group >> get_entry_group_result
# List
create_tag >> list_tags >> delete_tag
list_tags >> list_tags_result
# Lookup
create_entry_gcs >> lookup_entry >> delete_entry
lookup_entry >> lookup_entry_result
# Rename
create_tag_template_field >> rename_tag_template_field >> delete_tag_template_field
# Search
chain(create_tasks, search_catalog, delete_tasks)
search_catalog >> search_catalog_result
# Update
create_entry_gcs >> update_entry >> delete_entry
create_tag >> update_tag >> delete_tag
create_tag_template >> update_tag_template >> delete_tag_template
create_tag_template_field >> update_tag_template_field >> rename_tag_template_field
|
malinoff/amqproto
|
tests/conftest.py
|
Python
|
apache-2.0
| 288
| 0
|
def pytest_addoption(parser):
parser.addoption(
'--integra
|
tion',
action='store_true',
help='run integr
|
ation tests',
)
def pytest_ignore_collect(path, config):
if not config.getoption('integration') and 'integration' in str(path):
return True
|
4dn-dcic/tibanna
|
awsf3/log.py
|
Python
|
mit
| 1,003
| 0.004985
|
def read_logfile_by_line(logfile):
"""generator function that yields the log file content line by line"""
with open(logfile, 'r') as f:
for line in f:
yield line
yield None
def parse_commands(log_content):
"""
parse cwl commands from the line-by-line generator of log file content and
returns the commands as a list of command line lists, each corresponding to a step run.
"""
command_list = []
command = []
in_command = False
line = next(log_content)
while(line):
line = line.strip('\n')
if '[job' in line and line.endswith('docker \\'):
line = 'docker \\' # remove the other stuff
in_command = True
if in_command:
command.append(line.strip('\\').rstrip(' '))
if not line.endswith('\\'):
in_command = False
|
command_list.append(
|
command)
command = []
line = next(log_content)
return(command_list)
|
diegodelemos/cap-reuse
|
step-broker/app.py
|
Python
|
gpl-3.0
| 3,081
| 0
|
import copy
import json
import logging
import threading
import uuid
from flask import Flask, abort, jsonify, request
import kubernetes
app = Flask(__name__)
app.secret_key = "mega secret key"
JOB_DB = {}
def get_config(experiment):
with open('config_template.json', 'r') as config:
return json.load(config)[experiment]
def filter_jobs(job_db):
job_db_copy = copy.deepcopy(job_db)
for job_name in job_db_copy:
del(job_db_copy[job_name]['obj'])
del(job_db_copy[job_name]['deleted'])
if job_db_copy[job_name].get('pod'):
del(job_db_copy[job_name]['pod'])
return job_db_copy
@app.route('/api/v1.0/jobs', methods=['GET'])
def get_jobs():
return jsonify({"jobs": filter_jobs(JOB_DB)}), 200
@app.route('/api/v1.0/k8sjobs', methods=['GET'])
def get_k8sjobs():
return jsonify({"jobs": kubernetes.get_jobs()}), 200
@app.route('/api/v1.0/jobs', methods=['POST'])
def create_job():
if not request.json \
or not ('experiment') in request.json\
or not ('docker-img' in request.json):
print(request.json)
abort(400)
cmd = request.json['cmd'] if 'cmd' in request.json else None
env_vars = (request.json['env-vars']
if 'env-vars' in request.json else {})
experiment_config = get_config(request.json['experiment'])
k8s_volume = experiment_config['k8s_volume']
job_id = str(uuid.uuid4())
job_obj = kubernetes.create_job(job_id,
request.json['docker-img'],
cmd,
[(k8s_volume, '/data')],
env_vars,
request.json['experiment'])
if job_obj:
job = copy.deepcopy(request.json)
job['job-id'] = job_id
job['status'] = 'started'
job['restart_count'] = 0
job['max_restart_count'] = 3
job['obj'] = job_obj
job['deleted'] = False
JOB_DB[job_id] = job
return jsonify({'job-id': job_id}), 201
else:
return jsonify({'job': 'Could not be allocated'}), 500
@app.route('/api/v1.0/jobs/<job_id>', methods=['GET'])
def get_job(job_id):
if job_id in JOB_DB:
job_copy = copy.deepcopy(JOB_DB[job_id])
del(job_copy['obj'])
del(job_copy['deleted'])
if job_copy.get('pod'):
del(job_copy['pod'])
return jsonify({'job': job_copy}), 200
else:
abort(404)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(threadName)s - %(levelname)s: %(message)s'
)
job_event_reader_thread = threading.Thread(target=kubernetes.watch_jobs,
args=(JOB_DB,))
job_event_reader_thread.start()
|
pod_event_reade
|
r_thread = threading.Thread(target=kubernetes.watch_pods,
args=(JOB_DB,))
pod_event_reader_thread.start()
app.run(debug=True, port=5000,
host='0.0.0.0')
|
zyga/guacamole
|
guacamole/ingredients/test_cmdtree.py
|
Python
|
gpl-3.0
| 2,078
| 0
|
# encoding: utf-8
# This file is part of Guacamole.
#
# Copyright 2012-2015 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Guacamole is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3,
# as published by the Free Software Foundation.
#
# Guacamole is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Guacamole. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the cmdtree module."""
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from guacamole.core import Bowl
from guacamole.ingredients.cmdtree import CommandTreeBuilder
from guacamole.recipes.cmd import Command
class _sub(Command):
spices = ('mustard',)
class _cmd(Command):
spices = ('salt', 'pepper')
sub_commands = (('sub', _sub),)
class CommandTreeBuilderTests(unittest.TestCase):
"""Tests for the CommandTreeBuilder class."""
def setUp(self):
"""Common initialization method."""
self.bowl = Bowl([CommandTreeBuilder(_cmd())])
self.bowl.eat()
def test_build_command_tree(self):
"""check if a correct command tree is built."""
cmd_obj = self.bowl.context.cmd_tree[1]
sub_obj = self.bowl.context.cmd_tree[
|
2][0][1]
self.assertIsInstance(cmd_obj, _cmd)
self.assertIsInstance(sub_obj, _sub)
self.assertEqual(
self.bowl.context.cmd_tree,
(None, cmd_obj, (('sub', sub_obj, ()),)))
d
|
ef test_collect_spices(self):
"""check if spices are collected from top-level command only."""
self.assertTrue(self.bowl.has_spice('salt'))
self.assertTrue(self.bowl.has_spice('pepper'))
self.assertFalse(self.bowl.has_spice('mustard'))
|
belokop/indico_bare
|
indico/modules/attachments/controllers/compat.py
|
Python
|
gpl-3.0
| 3,649
| 0.001644
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __fu
|
ture__ import unicode_literals
from flask import current_app, redirect, request
from werkzeug.exceptions import NotFound
from indico.modules.attachments.controllers.util import SpecificA
|
ttachmentMixin
from indico.modules.attachments.models.legacy_mapping import LegacyAttachmentFolderMapping, LegacyAttachmentMapping
from indico.modules.events import LegacyEventMapping
from indico.util.string import is_legacy_id
from indico.web.flask.util import url_for
from MaKaC.webinterface.rh.base import RHSimple, RH
def _clean_args(kwargs):
if 'event_id' not in kwargs:
raise NotFound
if is_legacy_id(kwargs['event_id']):
mapping = LegacyEventMapping.find(legacy_event_id=kwargs['event_id']).first_or_404()
kwargs['event_id'] = mapping.event_id
if 'contrib_id' in kwargs:
kwargs['contribution_id'] = kwargs.pop('contrib_id')
if 'subcontrib_id' in kwargs:
kwargs['subcontribution_id'] = kwargs.pop('subcontrib_id')
# extension is just to make the links prettier
kwargs.pop('ext', None)
# session id is only used for actual sessions, not for stuff inside them
if 'contribution_id' in kwargs:
kwargs.pop('session_id', None)
@RHSimple.wrap_function
def compat_folder(**kwargs):
_clean_args(kwargs)
folder = LegacyAttachmentFolderMapping.find(**kwargs).first_or_404().folder
if folder.is_deleted:
raise NotFound
return redirect(url_for('attachments.list_folder', folder), 302 if current_app.debug else 301)
def compat_folder_old():
mapping = {'confId': 'event_id',
'sessionId': 'session_id',
'contribId': 'contrib_id',
'subContId': 'subcontrib_id',
'materialId': 'material_id'}
kwargs = {mapping[k]: v for k, v in request.args.iteritems() if k in mapping}
return compat_folder(**kwargs)
def _redirect_to_note(**kwargs):
del kwargs['material_id']
del kwargs['resource_id']
kwargs['confId'] = kwargs.pop('event_id')
return redirect(url_for('event_notes.view', **kwargs), 302 if current_app.debug else 301)
@RHSimple.wrap_function
def compat_attachment(**kwargs):
_clean_args(kwargs)
mapping = LegacyAttachmentMapping.find_first(**kwargs)
if mapping is None:
if kwargs['material_id'] == 'minutes' and kwargs['resource_id'] == 'minutes':
return _redirect_to_note(**kwargs)
raise NotFound
attachment = mapping.attachment
if attachment.is_deleted or attachment.folder.is_deleted:
raise NotFound
return redirect(attachment.download_url, 302 if current_app.debug else 301)
class RHCompatAttachmentNew(SpecificAttachmentMixin, RH):
normalize_url_spec = dict(SpecificAttachmentMixin.normalize_url_spec,
endpoint='attachments.download')
def _process(self):
raise Exception('This RH should only perform URL normalization!')
|
alexander-matsievsky/HackerRank
|
All_Domains/Python/Sets/symmetric-difference.py
|
Python
|
mit
| 194
| 0
|
import sys
[_, ms, _, n
|
s] = list(sy
|
s.stdin)
ms = set(int(m) for m in ms.split(' '))
ns = set(int(n) for n in ns.split(' '))
print(sep='\n', *sorted(ms.difference(ns).union(ns.difference(ms))))
|
andrzejgorski/whylog
|
whylog/log_reader/read_utils.py
|
Python
|
bsd-3-clause
| 2,366
| 0.000423
|
import os
from whylog.log_reader.exceptions import EmptyFile, OffsetBiggerThanFileSize
class ReadUtils(object):
STANDARD_BUFFER_SIZE = 512
@classmethod
def size_of_opened_file(cls, fh):
prev_position = fh.tell()
fh.seek(0, os.SEEK_END)
size = fh.tell()
fh.seek(prev_position)
return size
@classmethod
def _read_content(cls, fd, position, buf_size):
fd.seek(position)
return fd.read(buf_size)
@classmethod
def _read_split_lines(cls, fd, position, buf_size):
content = cls._read_content(fd, position, buf_size)
return content.split('\n')
@classmethod
def _join_results(cls, first_part, second_part):
if not first_part:
if not second_part:
return []
return second_part
if not second_part:
return first_part
return first_part[:-1] + ["".join((first_part[-1], second_part[0]))] + second_part[1:]
@classmethod
def _expand_after(cls, fd, position):
fd.seek(position)
line = fd.readline()
if not line:
raise OffsetBiggerThanFileSize(position)
return line.rstrip('\n')
@classmethod
def _expand_before(cls, fd, position, buf_size):
before = []
while len(before) < 2:
position -= buf_size
if position <= 0:
lines = cls._read_split_lines(fd, 0, position + buf_size)
before = cls._join_results(lines, before)
break
lines = cls._read_split_lines(fd, position, buf_size)
before = cls._join_results(lines, before)
if not before:
raise EmptyFile()
return before[-1]
@classmethod
def _read_entire_line(cls, fd, offset, buf_size):
after = cls._expand_after(fd, offset)
before = cls._expand_before(fd, offset, buf_size)
return before + after, offset - len(before), offset + len(after)
@classmethod
def get_line_containing_offset(cls, fd, offset, buf_size):
"""
returns line which c
|
ontains the specified offset
and returns also offsets of the first and the last sign of this line.
if there is '
|
\n' on specified offset, the previous line is returned
"""
return cls._read_entire_line(fd, offset, buf_size)
|
juhi24/ilmaruuvi
|
ilmaruuvi/systemd_service.py
|
Python
|
mit
| 455
| 0.006593
|
#!python
# -*- coding: utf-8 -*-
from os import path
import shutil
def install():
fil
|
ename = 'ilmaruuvi.service'
install_path = path.join('/etc/systemd/system', filename)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, filename), 'r') as f:
service = f.read()
service = service.format(working_dir=here, exec_start=shutil.which('ilmaruuvi'))
with open(install_pat
|
h, 'w') as f:
f.write(service)
|
stefanseefeld/numba
|
numba/cuda/stubs.py
|
Python
|
bsd-2-clause
| 9,284
| 0.002801
|
"""
This scripts specifies all PTX special objects.
"""
from __future__ import print_function, absolute_import, division
import operator
import numpy
import llvmlite.llvmpy.core as lc
from numba import types, ir, typing, macro
from .cudadrv import nvvm
class Stub(object):
'''A stub object to represent special objects which is meaningless
outside the context of CUDA-python.
'''
_description_ = '<ptx special value>'
__slots__ = () # don't allocate __dict__
def __new__(cls):
raise NotImplementedError("%s is not instantiable" % cls)
def __repr__(self):
return self._description_
#-------------------------------------------------------------------------------
# SREG
SREG_SIGNATURE = typing.signature(types.int32)
class threadIdx(Stub):
'''
The thread indices in the current thread block, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.blockDim` exclusive.
|
'''
_description_ = '<threadIdx.{x,y,z}>'
x = macro.Macro('tid.x', SREG_SIGNATURE)
y = macro.Macro('tid.y', SREG_SIGNATURE)
z = macro.Macro('tid.z', SREG_SIGNATURE)
class blockIdx(Stub):
'''
The block indices in the grid of thread blocks, accessed through the
attributes ``x``, ``y``, and ``z``. Each index is an integer
|
spanning the
range from 0 inclusive to the corresponding value of the attribute in
:attr:`numba.cuda.gridDim` exclusive.
'''
_description_ = '<blockIdx.{x,y,z}>'
x = macro.Macro('ctaid.x', SREG_SIGNATURE)
y = macro.Macro('ctaid.y', SREG_SIGNATURE)
z = macro.Macro('ctaid.z', SREG_SIGNATURE)
class blockDim(Stub):
'''
The shape of a block of threads, as declared when instantiating the
kernel. This value is the same for all threads in a given kernel, even
if they belong to different blocks (i.e. each block is "full").
'''
x = macro.Macro('ntid.x', SREG_SIGNATURE)
y = macro.Macro('ntid.y', SREG_SIGNATURE)
z = macro.Macro('ntid.z', SREG_SIGNATURE)
class gridDim(Stub):
'''
The shape of the grid of blocks, accressed through the attributes ``x``,
``y``, and ``z``.
'''
_description_ = '<gridDim.{x,y,z}>'
x = macro.Macro('nctaid.x', SREG_SIGNATURE)
y = macro.Macro('nctaid.y', SREG_SIGNATURE)
z = macro.Macro('nctaid.z', SREG_SIGNATURE)
#-------------------------------------------------------------------------------
# Grid Macro
def _ptx_grid1d(): pass
def _ptx_grid2d(): pass
def grid_expand(ndim):
"""grid(ndim)
Return the absolute position of the current thread in the entire
grid of blocks. *ndim* should correspond to the number of dimensions
declared when instantiating the kernel. If *ndim* is 1, a single integer
is returned. If *ndim* is 2 or 3, a tuple of the given number of
integers is returned.
Computation of the first integer is as follows::
cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.grid.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.grid.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.grid.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2, 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
grid = macro.Macro('ptx.grid', grid_expand, callable=True)
#-------------------------------------------------------------------------------
# Gridsize Macro
def gridsize_expand(ndim):
"""
Return the absolute size (or shape) in threads of the entire grid of
blocks. *ndim* should correspond to the number of dimensions declared when
instantiating the kernel.
Computation of the first integer is as follows::
cuda.blockDim.x * cuda.gridDim.x
and is similar for the other two indices, but using the ``y`` and ``z``
attributes.
"""
if ndim == 1:
fname = "ptx.gridsize.1d"
restype = types.int32
elif ndim == 2:
fname = "ptx.gridsize.2d"
restype = types.UniTuple(types.int32, 2)
elif ndim == 3:
fname = "ptx.gridsize.3d"
restype = types.UniTuple(types.int32, 3)
else:
raise ValueError('argument can only be 1, 2 or 3')
return ir.Intrinsic(fname, typing.signature(restype, types.intp),
args=[ndim])
gridsize = macro.Macro('ptx.gridsize', gridsize_expand, callable=True)
#-------------------------------------------------------------------------------
# synthreads
class syncthreads(Stub):
'''
Synchronize all threads in the same thread block. This function implements
the same pattern as barriers in traditional multi-threaded programming: this
function waits until all threads in the block call it, at which point it
returns control to all its callers.
'''
_description_ = '<syncthread()>'
# -------------------------------------------------------------------------------
# memory fences
class threadfence_block(Stub):
'''
A memory fence at thread block level
'''
_description_ = '<threadfence_block()>'
class threadfence_system(Stub):
'''
A memory fence at system level: across devices
'''
_description_ = '<threadfence_system()>'
class threadfence(Stub):
'''
A memory fence at device level
'''
_description_ = '<threadfence()>'
# -------------------------------------------------------------------------------
# shared
def _legalize_shape(shape):
if isinstance(shape, tuple):
return shape
elif isinstance(shape, int):
return (shape,)
else:
raise TypeError("invalid type for shape; got {0}".format(type(shape)))
def shared_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.smem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class shared(Stub):
"""
Shared memory namespace.
"""
_description_ = '<shared>'
array = macro.Macro('shared.array', shared_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a shared array of the given *shape* and *type*. *shape* is either
an integer or a tuple of integers representing the array's dimensions.
*type* is a :ref:`Numba type <numba-types>` of the elements needing to be
stored in the array.
The returned array-like object can be read and written to like any normal
device array (e.g. through indexing).
'''
#-------------------------------------------------------------------------------
# local array
def local_array(shape, dtype):
shape = _legalize_shape(shape)
ndim = len(shape)
fname = "ptx.lmem.alloc"
restype = types.Array(dtype, ndim, 'C')
sig = typing.signature(restype, types.UniTuple(types.intp, ndim), types.Any)
return ir.Intrinsic(fname, sig, args=(shape, dtype))
class local(Stub):
'''
Local memory namespace.
'''
_description_ = '<local>'
array = macro.Macro('local.array', local_array, callable=True,
argnames=['shape', 'dtype'])
'''
Allocate a local array of the given *shape* and *type*. The array is private
to the current thread, and resides in global memory. An array-like object is
returned which can be read and written to like any standard array (e.g.
through indexing).
'''
#-------------------------------------------------------------------------------
# const array
def const_array_like(ndarray):
fname = "ptx.cmem.arylike"
from .descriptor import CUDATargetDesc
aryty = CUDATargetDesc.typingctx.resolve_argument_type(ndarray)
sig = typing.signature(aryty, aryty)
return ir.Intrinsic(fname, sig, args=[n
|
mcleonard/sampyl
|
sampyl/samplers/NUTS.py
|
Python
|
mit
| 5,706
| 0.002103
|
"""
sampyl.samplers.NUTS
~~~~~~~~~~~~~~~~~~~~
This module implements No-U-Turn Sampler (NUTS).
:copyright: (c) 2015 by Mat Leonard.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division
import collections
from ..core import np
from .base import Sampler
from .hamiltonian import energy, leapfrog, initial_momentum
class NUTS(Sampler):
""" No-U-Turn sampler (Hoffman & Gelman, 2014) for sampling from a
probability distribution defined by a log P(theta) function.
For technical details, see the paper:
http://www.stat.columbia.edu/~gelman/research/published/nuts.pdf
:param logp: log P(X) function for sampling distribution
:param start:
Dictionary of starting state for the sampler. Should have one
element for each argument of logp.
:param grad_logp: (optional)
Function or list of functions that calculate grad log P(theta).
Pass functions here if you don't want to use autograd for the
gradients. If logp has multiple parameters, grad_logp must be
a list of gradient functions w.r.t. each parameter in logp.
If you wish to use a logp function that returns both the logp
value and the gradient, set grad_logp = True.
:param scale: (optional)
Dictionary with same format as start. Scaling for initial
momentum in Hamiltonian step.
:param step_size: (optional) *float.*
Initial step size for the deterministic proposals.
:param adapt_steps: (optional) *int.*
Integer number of steps used for adapting the step size to
achieve a target acceptance rate.
:param Emax: (optional) *float.* Maximum energy.
:param target_accept: (optional) *float.* Target acceptance rate.
:param gamma: (optional) *float.*
:param k: (optional) *float.* Scales the speed of step size
adaptation.
:param t0: (optional) *float.* Slows initial step size adaptation.
Example ::
def logp(x, y):
...
start = {'x': x_start, 'y': y_start}
nuts = sampyl.NUTS(logp, start)
chain = nuts.sample(1000)
"""
def __init__(self, logp, start,
step_size=0.25,
adapt_steps=100,
Emax=1000.,
target_accept=0.65,
gamma=0.05,
k=0.75,
t0=10.,
**kwargs):
super(NUTS, self).__init__(logp, start, **kwargs)
self.step_size = step_size / len(self.state.tovector())**(1/4.)
self.adapt_steps = adapt_steps
self.Emax = Emax
self.target_accept = target_accept
self.gamma = gamma
self.k
|
= k
self.t0 = t0
self.Hbar = 0.
self.ebar = 1.
self.mu = np.log(self.step_size*10)
def step(self):
""" Perform one NUTS step."""
H = self.model.logp
dH = self.model.grad
x = self.state
r0 = initial_momentum(
|
x, self.scale)
u = np.random.uniform()
e = self.step_size
xn, xp, rn, rp, y = x, x, r0, r0, x
j, n, s = 0, 1, 1
while s == 1:
v = bern(0.5)*2 - 1
if v == -1:
xn, rn, _, _, x1, n1, s1, a, na = buildtree(xn, rn, u, v, j, e, x, r0,
H, dH, self.Emax)
else:
_, _, xp, rp, x1, n1, s1, a, na = buildtree(xp, rp, u, v, j, e, x, r0,
H, dH, self.Emax)
if s1 == 1 and bern(np.min(np.array([1, n1/n]))):
y = x1
dx = (xp - xn).tovector()
s = s1 * (np.dot(dx, rn.tovector()) >= 0) * \
(np.dot(dx, rp.tovector()) >= 0)
n = n + n1
j = j + 1
if self._sampled >= self.adapt_steps:
self.step_size = self.ebar
else:
# Adapt step size
m = self._sampled + 1
w = 1./(m + self.t0)
self.Hbar = (1 - w)*self.Hbar + w*(self.target_accept - a/na)
log_e = self.mu - (m**.5/self.gamma)*self.Hbar
self.step_size = np.exp(log_e)
z = m**(-self.k)
self.ebar = np.exp(z*log_e + (1 - z)*np.log(self.ebar))
self.state = y
self._sampled += 1
return y
def bern(p):
return np.random.uniform() < p
def buildtree(x, r, u, v, j, e, x0, r0, H, dH, Emax):
if j == 0:
x1, r1 = leapfrog(x, r, v*e, dH)
E = energy(H, x1, r1)
E0 = energy(H, x0, r0)
dE = E - E0
n1 = (np.log(u) - dE <= 0)
s1 = (np.log(u) - dE < Emax)
return x1, r1, x1, r1, x1, n1, s1, np.min(np.array([1, np.exp(dE)])), 1
else:
xn, rn, xp, rp, x1, n1, s1, a1, na1 = \
buildtree(x, r, u, v, j-1, e, x0, r0, H, dH, Emax)
if s1 == 1:
if v == -1:
xn, rn, _, _, x2, n2, s2, a2, na2 = \
buildtree(xn, rn, u, v, j-1, e, x0, r0, H, dH, Emax)
else:
_, _, xp, rp, x2, n2, s2, a2, na2 = \
buildtree(xp, rp, u, v, j-1, e, x0, r0, H, dH, Emax)
if bern(n2/max(n1 + n2, 1.)):
x1 = x2
a1 = a1 + a2
na1 = na1 + na2
dx = (xp - xn).tovector()
s1 = s2 * (np.dot(dx, rn.tovector()) >= 0) * \
(np.dot(dx, rp.tovector()) >= 0)
n1 = n1 + n2
return xn, rn, xp, rp, x1, n1, s1, a1, na1
|
DataONEorg/d1_python
|
lib_common/src/d1_common/ext/mimeparser.py
|
Python
|
apache-2.0
| 6,325
| 0.003794
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2017 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MIME-Type Parser.
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared
against a list of media-ranges.
- quality_parsed(): Just like quality() except the secon
|
d parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q
|
') from a
list of candidates.
"""
from functools import reduce
__version__ = "0.1.2"
__author__ = "Joe Gregorio"
__email__ = "joe@bitworking.org"
__credits__ = ""
# TODO: Can probably delete this module.
def parse_mime_type(mime_type):
"""Carves up a mime-type and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/xhtml;q=0.5' would get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict([tuple([s.strip() for s in param.split("=")]) for param in parts[1:]])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a single "*"
# Turn it into a legal wildcard.
if full_type == "*":
full_type = "*/*"
(type, subtype) = full_type.split("/")
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Carves up a media range and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/\*;q=0.5' would get parsed into:
('application', '\*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if (
"q" not in params
or "q" not in params
or not float(params["q"])
or float(params["q"]) > 1
or float(params["q"]) < 0
):
params["q"] = "1"
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime-type against a list of media_ranges that
have already been parsed by parse_media_range().
Returns a tuple of the fitness value and the value of the 'q' quality parameter of
the best match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) = parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
if (type == target_type or type == "*" or target_type == "*") and (
subtype == target_subtype or subtype == "*" or target_subtype == "*"
):
param_matches = reduce(
lambda x, y: x + y,
[
1
for (key, value) in list(target_params.items())
if key != "q" and key in params and value == params[key]
],
0,
)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params["q"]
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime-type against a list of media_ranges that
have already been parsed by parse_media_range().
Returns the 'q' quality parameter of the best match, 0 if no match was found. This
function behaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Returns the quality 'q' of a mime-type when compared against the media- ranges in
ranges. For example:
>>> quality('text/html', 'text/*;q=0.3, text/html;q=0.7, text/html;level=1,
text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Takes a list of supported mime-types and finds the best match for all the media-
ranges listed in header. The value of header must be a string that conforms to the
format of the HTTP Accept: header. The value of 'supported' is a list of mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
parsed_header = [parse_media_range(r) for r in header.split(",")]
weighted_matches = [
(fitness_and_quality_parsed(mime_type, parsed_header), mime_type)
for mime_type in supported
]
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][1] or ""
|
SUSE/kiwi
|
kiwi/system/kernel.py
|
Python
|
gpl-3.0
| 5,997
| 0
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it
|
and/or modify
# it under the terms of the GNU General Public
|
License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import re
import os
from typing import (
NamedTuple, Optional
)
# project
from kiwi.command import Command
from kiwi.exceptions import KiwiKernelLookupError
kernel_type = NamedTuple(
'kernel_type', [
('name', str),
('filename', str),
('version', str)
]
)
xen_hypervisor_type = NamedTuple(
'xen_hypervisor_type', [
('filename', str),
('name', str)
]
)
class Kernel:
"""
**Implementes kernel lookup and extraction from given root tree**
:param str root_dir: root directory path name
:param list kernel_names: list of kernel names to search for
functions.sh::suseStripKernel() provides a normalized
file so that we do not have to search for many different
names in this code
"""
def __init__(self, root_dir: str):
self.root_dir = root_dir
self.kernel_names = self._setup_kernel_names_for_lookup()
def get_kernel(
self, raise_on_not_found: bool = False
) -> Optional[kernel_type]:
"""
Lookup kernel files and provide filename and version
:param bool raise_on_not_found: sets the method to raise an exception
if the kernel is not found
:raises KiwiKernelLookupError: if raise_on_not_found flag is active
and kernel is not found
:return: tuple with filename, kernelname and version
:rtype: tuple|None
"""
for kernel_name in self.kernel_names:
kernel_file = os.sep.join(
[self.root_dir, 'boot', kernel_name]
)
if os.path.exists(kernel_file):
version_match = re.match(
'.*?-(.*)', os.path.basename(kernel_file)
)
if version_match:
version = version_match.group(1)
return kernel_type(
name=os.path.basename(os.path.realpath(kernel_file)),
filename=kernel_file,
version=version
)
if raise_on_not_found:
raise KiwiKernelLookupError(
'No kernel found in {0}, searched for {1}'.format(
os.sep.join([self.root_dir, 'boot']),
','.join(self.kernel_names)
)
)
return None
def get_xen_hypervisor(self) -> Optional[xen_hypervisor_type]:
"""
Lookup xen hypervisor and provide filename and hypervisor name
:return: tuple with filename and hypervisor name
:rtype: tuple|None
"""
xen_hypervisor = self.root_dir + '/boot/xen.gz'
if os.path.exists(xen_hypervisor):
return xen_hypervisor_type(
filename=xen_hypervisor,
name='xen.gz'
)
return None
def copy_kernel(self, target_dir: str, file_name: str = None) -> None:
"""
Copy kernel to specified target
If no file_name is given the target filename is set
as kernel-<kernel.version>.kernel
:param str target_dir: target path name
:param str filename: base filename in target
"""
kernel = self.get_kernel()
if kernel:
if not file_name:
file_name = 'kernel-' + kernel.version + '.kernel'
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', kernel.filename, target_file])
def copy_xen_hypervisor(
self, target_dir: str, file_name: str = None
) -> None:
"""
Copy xen hypervisor to specified target
If no file_name is given the target filename is set
as hypervisor-<xen.name>
:param str target_dir: target path name
:param str filename: base filename in target
"""
xen = self.get_xen_hypervisor()
if xen:
if not file_name:
file_name = 'hypervisor-' + xen.name
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', xen.filename, target_file])
def _setup_kernel_names_for_lookup(self):
"""
The kernel image name is different per arch and distribution
This method returns a list of possible kernel image names in
order to search and find one of them
:return: list of kernel image names
:rtype: list
"""
kernel_names = []
kernel_dirs = sorted(
os.listdir(''.join([self.root_dir, '/lib/modules']))
)
if kernel_dirs:
# append lookup for the real kernel image names
# depending on the arch and os they are different
# in their prefix
kernel_prefixes = [
'uImage', 'Image', 'zImage', 'vmlinuz', 'image', 'vmlinux'
]
kernel_name_pattern = '{prefix}-{name}'
for kernel_prefix in kernel_prefixes:
for kernel_dir in kernel_dirs:
kernel_names.append(
kernel_name_pattern.format(
prefix=kernel_prefix, name=kernel_dir
)
)
return kernel_names
|
sol-ansano-kim/medic
|
plugins/Tester/faceAssigned.py
|
Python
|
mit
| 1,666
| 0.001801
|
import medic
from maya import OpenMaya
class FaceAssigned(medic.PyTester):
def __init__(self):
super(FaceAssigned, self).__init__()
def Name(self):
return "FaceAssigned"
def Description(self):
return "Face assigned mesh(s)"
def Match(self, node):
|
return node.object().hasFn(OpenMaya.MFn.kMesh) or node.object().hasFn(OpenMaya.MFn.kNurbsSurfaceGeom)
@staticmethod
def __TestObjGrp(node, parentPlug, childPlug):
dg = node.dg()
if not dg.hasAttribute(parentPlug) or not dg.hasAttribute(childPlug):
return False
io_plug = node.dg().findPlug(parentPlug)
og_obj = node.dg().attribute(childP
|
lug)
for i in range(io_plug.numElements()):
elm = io_plug.elementByPhysicalIndex(i)
og_plug = elm.child(og_obj)
if not og_plug.numConnectedElements():
continue
for j in range(og_plug.numElements()):
gelm = og_plug.elementByPhysicalIndex(j)
arr = OpenMaya.MPlugArray()
if not gelm.connectedTo(arr, False, True):
continue
for n in range(arr.length()):
if arr[n].node().hasFn(OpenMaya.MFn.kShadingEngine):
return True
return False
def test(self, node):
if FaceAssigned.__TestObjGrp(node, "compInstObjGroups", "compObjectGroups"):
return medic.PyReport(node)
if FaceAssigned.__TestObjGrp(node, "instObjGroups", "objectGroups"):
return medic.PyReport(node)
return None
def Create():
return FaceAssigned()
|
UbiCastTeam/candies
|
candies2/buttons.py
|
Python
|
lgpl-3.0
| 9,259
| 0.007884
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gobject
import clutter
from text import TextContainer
from roundrect import RoundRectangle, OutlinedRoundRectangle
from clutter import cogl
class ClassicButton(TextContainer):
__gtype_name__ = 'ClassicButton'
def __init__(self, label=' ', margin=0, padding=6, texture=None, rounded=True, crypted=False):
TextContainer.__init__(self, label, margin=margin, padding=padding, texture=texture, rounded=rounded, crypted=crypted)
self.set_reactive(True)
def set_lock(self, lock):
self.set_reactive(not lock)
self.set_opacity(128 if lock else 255)
class ImageButton(ClassicButton):
__gtype_name__ = 'ImageButton'
def __init__(self, label=' ', image_src=None, margin=0, padding=10, spacing=10, texture=None, has_text=True, expand=False):
ClassicButton.__init__(self, label, margin=margin, padding=padding, texture=texture)
self.spacing = spacing
self._has_text = has_text
self._expand = expand
self.image = clutter.Texture()
if image_src:
self.image.set_from_file(image_src)
self.image.set_parent(self)
self.set_font_name('16')
self.set_font_color('#000000ff')
self.set_inner_color('#aaaaaaff')
self.set_border_color('#888888ff')
def set_image_src(self, image_src):
self.image.set_from_file(image_src)
def do_allocate(self, box, flags):
btn_width = box.x2 - box.x1
btn_height = box.y2 - box.y1
inner_width = btn_width - 2*self._padding.x
inner_height = btn_height - 2*self._padding.y
# allocate background
self._allocate_rect(0, 0, btn_width, btn_height, flags)
# allocate image
if self._has_text:
label_height = ClassicButton.do_get_preferred_height(self, for_width=inner_width)[1]
remaining_height = btn_height - label_height - self.spacing
else:
label_height = 0
remaining_height = inner_height
image_preferred_size = self.image.get_preferred_size()
if image_preferred_size[3] > 0:
image_ratio = float(image_preferred_size[2]) / float(image_preferred_size[3])
if self._expand:
image_height = remaining_height
image_width = round(float(image_height) * float(image_ratio))
if image_width > inner_width:
image_width = inner_width
image_height = round(float(image_width) / float(image_ratio))
else:
image_height = image_preferred_size[3]
if remaining_height < image_height:
image_height = remaining_height
image_width = round(float(image_height) * float(image_ratio))
if image_width > inner_width:
image_width = inner_width
image_height = round(float(image_width) / float(image_ratio))
else:
image_width = 0
image_height = 0
x_padding = round((inner_width - image_width) / 2.0)
y_padding = round((remaining_height - image_height) / 2.0)
image_box = clutter.ActorBox()
image_box.x1 = self._padding.x + x_padding
image_box.y1 = self._padding.y + y_padding
image_box.x2 = image_box.x1 + image_width
image_box.y2 = image_box.y1 + image_height
self.image.allocate(image_box, flags)
# allocate label
if self._has_text:
base_y = image_height + self.spacing
label_height = btn_height - base_y
self._allocate_label(0, base_y, btn_width, label_height, flags)
clutter.Actor.do_allocate(self, box, flags)
def do_set_property(self, pspec, value):
return ClassicButton.do_set_property(self, pspec, value)
def do_get_property(self, pspec):
return ClassicButton.do_get_property(self, pspec)
def do_paint(self):
self.rect.paint()
self.image.paint()
if self._has_text:
self.label.paint()
def do_foreach(self, func, data=None):
ClassicButton.do_foreach(self, func, data)
func(self.image, data)
def do_destroy(self):
self.unparent()
if hasattr(self, 'image'):
if self.image:
self.image.unparent()
self.image.destroy()
try:
ClassicButton.do_destroy(self)
except:
pass
gobject.type_register(ImageButton)
if __name__ == '__main__':
from flowbox import FlowBox
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
#toto = cogl.Material()
texture_path = '/home/aviolo/sources/easycast/unstable/easycast/images/buttons/copy.png'
texture = clutter.cogl.texture_new_from_file(texture_path, clutter.cogl.TEXTURE_NO_SLICING, clutter.cogl.PIXEL_FORMAT_ANY)
#toto.set_layer(0, texture)
#stage.add(toto)
t = ClassicButton('test efopkzekfopzf opfzeopfkz opfzegjzeh guzehiug ezhgiozeghizeogh eziogzeoighze oigzeiogzeig opg jzeopgjzepogzzeogjze zeigergre ergerg', texture = texture, rounded = True)
t.set_size(640, 480)
stage.add(t)
'''
# Main flowbox
box0 = FlowBox()
box0.set_size(640, 640)
# Invisible rectangle for top margin
r = clutter.Rectangle()
r.set_size(640, 1)
box0.add(r)
# Button at natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(*b.get_preferred_size()[2:])
box0.add(b)
# Button larger than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(630, 50)
box0.add(b)
# Intermediate flowbox to force line wrapping
box1 = FlowBox()
box1.set_size(640, 50)
box0.add(box1)
# Button fitter than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.')
b.set_size(420, 50)
box1.add(b)
# Button more fitter than natural size
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.')
b.set_size(210, 50)
box0.add(b)
# Intermediate flowbox to force line wrapping
box2 = FlowBox()
box2.set_size(640, 50)
box0.add(box2)
# Button at m
|
inimal size (just suspension marks)
b = ClassicButton('Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.')
b.set_size(*b.get_preferred_size()[:2])
box2.add(b)
# Invisible rectangle for bottom margin
r = clutter.Rectangle()
r.set_size(640, 1)
box0.add(r)
# Testing buttons
b = ClassicButton('A')
b.set_size(15, 15)
b.set_position(5, 450)
stage.add(b)
b = ClassicButton('B')
b.set_size(25, 25)
b
|
.set_position(50, 425)
stage.add(b)
b = ClassicButton('C')
b.set_font_color('Yellow')
b.set_size(50, 50)
b.set_position(125, 375)
stage.add(b)
b = ClassicButton('D')
b.set_border_width(10)
b.set_border_color('Green')
b.set_size(100, 100)
b.set_position(250, 325)
stage.add(b)
b = ClassicButton('E', texture=texture)
b.set_inner_color('Pink')
b.set_size(170, 170)
b.set_position(425, 210)
stage.add(b)
stage.add(box0)
'''
test_memory_usage = False
if test_memory_usage:
import gc
gc.set_debug(gc.DEBUG_LEAK)
from pprint import pprint
max_count = 5000
#texture_path = '/home/sdiemer/sources/candies/main/candies2/effect_light.png'
texture = clutter.cogl.texture_new_from_file(texture_path, clutter.cogl.TEXTURE_NO_SLICING, clutter.cogl.PIXEL_FORMAT_ANY)
texture = None
def create_test_object():
t = ClassicButton('test efopkzekfopzf o
|
angr/cle
|
cle/backends/elf/relocation/pcc64.py
|
Python
|
bsd-2-clause
| 4,448
| 0.004946
|
import logging
from . import generic
from .elfreloc import ELFReloc
l = logging.getLogger(name=__name__)
# http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.pdf
arch = 'PPC64'
class R_PPC64_JMP_SLOT(ELFReloc):
def relocate(self):
if self.owner.is_ppc64_abiv1:
# R_PPC64_JMP_SLOT
# http://osxr.org/glibc/source/sysdeps/powerpc/powerpc64/dl-machine.h?v=glibc-2.15#0405
# copy an entire function descriptor struct
addr = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr)
toc = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr + 8)
aux = self.resolvedby.owner.memory.unpack_word(self.resolvedby.relative_addr + 16)
self.owner.memory.pack_word(self.relative_addr, addr)
self.owner.memory.pack_word(self.relative_addr + 8, toc)
self.owner.memory.pack_word(self.r
|
elative_addr + 16, aux)
else:
self.owner.memory.pack_word(self.relative_addr, self.resolvedby.rebased_addr)
return True
class R_PPC64_RELATIVE(generic.GenericRelativeReloc):
pass
class R_PPC64_IRELATIVE(generic.GenericIRelativeReloc):
pass
class R_PPC64_ADDR64(generic.GenericAbsoluteAddendReloc):
pass
class R_PPC64_GLOB_DAT(generic.GenericJumpslotReloc):
pass
class R_PPC64_DTPMOD64(generic.GenericTLSModIdReloc):
pass
class R_PPC64_
|
DTPREL64(generic.GenericTLSDoffsetReloc):
pass
class R_PPC64_TPREL64(generic.GenericTLSOffsetReloc):
pass
class R_PPC64_REL24(ELFReloc):
"""
Relocation Type: 10
Calculation: (S + A - P) >> 2
Field: low24*
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
P = self.rebased_addr
return (S + A - P) >> 2
def relocate(self):
if not self.resolved:
return False
instr = self.owner.memory.unpack_word(self.relative_addr, size=4) & 0b11111100000000000000000000000011
imm = self.value & 0xFFFFFF
self.owner.memory.pack_word(self.relative_addr, instr | (imm << 2), size=4)
return True
class R_PPC64_TOC16_LO(ELFReloc):
"""
Relocation Type: 48
Calculation: #lo(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return (S + A) & 0xFFFF
TOC = self.owner.ppc64_initial_rtoc
return (S + A - TOC) & 0xFFFF
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC16_HI(ELFReloc):
"""
Relocation Type: 49
Calculation: #hi(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return ((S + A) >> 16) & 0xFFFF
TOC = self.owner.ppc64_initial_rtoc
return ((S + A - TOC) >> 16) & 0xFFFF
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC16_HA(ELFReloc):
"""
Relocation Type: 50
Calculation: #ha(S + A - .TOC.)
Field: half16
"""
@property
def value(self):
A = self.addend
S = self.resolvedby.rebased_addr
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return ((((S + A) >> 16) + (1 if ((S + A) & 0x8000) else 0)) & 0xFFFF)
TOC = self.owner.ppc64_initial_rtoc
return ((((S + A - TOC) >> 16) + (1 if ((S + A - TOC) & 0x8000) else 0)) & 0xFFFF)
def relocate(self):
if not self.resolved:
return False
self.owner.memory.pack_word(self.relative_addr, self.value, size=2)
return True
class R_PPC64_TOC(ELFReloc):
"""
Relocation Type: 51
Calculation: .TOC.
Field: doubleword64
"""
@property
def value(self):
if self.owner.ppc64_initial_rtoc is None:
l.warning(".TOC. value not found")
return 0
return self.owner.ppc64_initial_rtoc
|
ai-se/Transfer-Learning
|
src/utils/misc_utils.py
|
Python
|
unlicense
| 344
| 0
|
def flatten(x):
"""
Takes an N times nested list of list like [[a
|
,b],[c, [d, e]],[f]]
and
|
returns a single list [a,b,c,d,e,f]
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
return result
|
brentp/gemini
|
gemini/annotation_provenance/gene_table/combined_gene_table.py
|
Python
|
mit
| 9,693
| 0.018983
|
"""
For a detailed gene table and a summary gene table
"""
#!/usr/bin/env python
from collections import defaultdict
filename = 'detailed_gene_table_v75'
detailed_out = open(filename, 'w')
file = 'summary_gene_table_v75'
summary_out = open(file, 'w')
# write out files for detailed and summary gene table
detailed_out.write("\t".join(["Chromosome","Gene_name","Is_hgnc","Ensembl_gene_id","Ensembl_transcript_id","Biotype",
"Transcript_status","CCDS_id","HGNC_id","CDS_length","Protein_length",
"Transcript_start","Transcript_end","strand","Synonyms",
"Rvis_pct","entrez_gene_id","mammalian_phenotype_id"]))
detailed_out.write("\n")
summary_out.write("\t".join(["Chromosome","Gene_name","Is_hgnc","Ensembl_gene_id",
"HGNC_id","Synonyms", "Rvis_pct","Strand","Transcript_min_start","Transcript_max_end","Mammalian_phenotype_id"]))
summary_out.write("\n")
mouse_phenotype = defaultdict(list)
genic_intolerance = defaultdict(list)
keygene = list_hgnc = []
#initializing values for the summary gene table
transcript_min = defaultdict(list)
transcript_max = defaultdict(list)
lines_seen = set()
for line in open("genic_intolerance_dataset2", 'r'):
if line.startswith("#") is False:
field = line.strip().split("\t")
name = str(field[0])
score = str(field[1])
percentile = str(field[2])
(key,value) = (name, percentile)
genic_intolerance[name].append(percentile)
#Phenotype data from MGI - Jax
for row in open("HMD_HumanPhenotype", 'r'):
col = row.strip().split("\t")
#Remove leading white spaces in the column
entrez_id = str(col[1]).lstrip()
#Remove leading white spaces in the column & join MP terms with a comma
mph = str(col[5]).lstrip().replace(' ',',') if str(col[5]) != '' else None
(key,value) = (entrez_id, mph)
mouse_phenotype[entrez_id].append(mph)
# Dictionary for summary gene table to handle transcript min, max co-ordinates
for each in open("raw_gene_table", 'r'):
if each.startswith("Chromosome") is False:
k = each.strip().split("\t")
chr = "chr"+str((k[0]))
ens = str(k[2])
start = str(k[10])
end = str(k[11])
transcript_min[(chr,ens)].append(start)
transcript_max[(chr,ens)].append(end)
for each in open("raw_gene_table", 'r'):
if each.startswith("Chromosome") is False:
k = each.strip().split("\t")
chrom = "chr"+str((k[0]))
hgnc = str(k[1])
ens_geneid = str(k[2])
ens_transid = str(k[3])
trans_biotype = str(k[4])
status = str(k[5])
ccds_id = str(k[6]) #these id's are unique to transcripts
hgnc_id = str(k[7])
cds_len = str(k[8])
protein_len = str(k[9])
transcript_start = str(k[10])
transcript_end = str(k[11])
strand = str(k[12])
#remove space between names
previous = str(k[13]).replace(" ","")
synonyms = str(k[14]).replace(" ","")
entrez = str(k[15])
# sort all transcript start and end positions for a gene (use ens_geneid, since HGNC is not always true)
# Capture the first and the last position from the sorted list to give min, max
if (chrom,ens_geneid) in transcript_min:
minmum = sorted(transcript_min[(chrom,ens_geneid)])[0]
if (chrom,ens_geneid) in transcript_max:
maxmum = sorted(transcript_max[(chrom,ens_geneid)])[-1]
rvis = genic_intolerance[hgnc][0] if hgnc in genic_intolerance else None
pheno = mouse_phenotype[entrez] if entrez in mouse_phenotype else None
if pheno is not None and len(pheno) == 1:
phenotype = pheno[0]
elif pheno is None:
phenotype = "None"
else:
if len(pheno) > 1:
#convert the list to a string
string = ",".join(pheno)
# store a None for multiple Nones
if "None" in string and "MP:" not in string:
phenotype = None
#remove redundancy in MP terms
if "None" not in string and "MP:" in string:
phenotype = ",".join(set(string.split(",")))
#remove nones when MP terms are available
if "None" in string and "MP:" in string:
|
phen = string.split(",")
phenotype = ",".join([x for x in phen if x != "None"])
if hgnc != "None":
list_hgnc.append(hgnc)
#we
|
don't want string of Nones
if "None" in previous and "None" in synonyms and "None" in hgnc:
string = None
else:
# We would like all genes names to be put together
gene_string = hgnc+","+previous+","+synonyms
#get rid of Nones in gene strings
if gene_string.startswith("None"):
string = gene_string.replace("None,","")
else:
string = gene_string.replace(",None","")
#Nonetype object has no attribute split
if string is not None:
genes = set(string.split(","))
if len(genes) > 1:
# We would like to represent each member of the gene list as a key and the remainder as synonyms each time
for each in genes:
keygene = set([each])
synonym = genes.difference(keygene)
gene_name = ','.join(keygene)
other_names = ','.join(synonym)
hgnc_flag = "1" if gene_name in list_hgnc else "0"
# only when the gene is a HGNC name, it would have an hgnc id
is_hgnc_id = hgnc_id if gene_name in list_hgnc else "None"
# handling duplicate lines (due to transcripts) in summary table (which we don't care for in this table)
# writing to outfile for the summary gene table
line = "\t".join([chrom,gene_name,hgnc_flag,ens_geneid,is_hgnc_id,
other_names,str(rvis),strand,minmum,maxmum,str(phenotype)])
if line not in lines_seen:
summary_out.write(line)
summary_out.write("\n")
lines_seen.add(line)
# Writing to out for detailed gene table
detailed_out.write("\t".join([chrom,gene_name,hgnc_flag,ens_geneid,ens_transid,trans_biotype,
status,ccds_id,is_hgnc_id,cds_len,protein_len,transcript_start,
transcript_end,strand,other_names,str(rvis),entrez,str(phenotype)]))
detailed_out.write("\n")
# if there is one gene name in the list, we just want it to be the key
elif len(genes) == 1:
gene_name = ','.join(genes)
other_names = "None"
hgnc_flag = "1" if gene_name in list_hgnc else "0"
is_hgnc_id = hgnc_id if gene_name in list_hgnc else "None"
# handling duplicate lines (due to transcripts) in summary table (which we don't care for in this table)
# writing to outfile for the summary gene table
line = "\t".join([chrom,str(gene_name),hgnc_flag,ens_geneid,is_hgnc_id,
other_names,str(rvis),strand,minmum,maxmum,str(phenotype)])
if line not in lines_seen:
summary_out.write(line)
summary_out.write("\n")
lines_seen.add(line)
# write to out for detailed gene table
detailed_out.write("\t".join([chrom,str(gene_name),hgnc_flag,ens_geneid,ens_transid,trans_biotype,
status,
|
nicolas471/Lecole
|
main/migrations/0015_auto_20160404_1648.py
|
Python
|
gpl-3.0
| 433
| 0.002309
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_generalsetting_titulo'),
|
]
operations = [
migrations.AlterField(
model_name='imagen',
name='img',
field=models.ImageField(upload_to=b'imgenEvento', verbose_name=b'Ruta
|
'),
),
]
|
kmahyyg/learn_py3
|
modules/mymodule2/__init__.py
|
Python
|
agpl-3.0
| 107
| 0.018692
|
#!/usr/bin
|
/env python3
# -*- coding : utf
|
-8 -*-
def mymodules2():
print("test module2!")
mymodules2()
|
wdv4758h/ZipPy
|
lib-python/3/idlelib/PyShell.py
|
Python
|
bsd-3-clause
| 52,145
| 0.001285
|
#! /usr/bin/env python3
import getopt
import os
import os.path
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import traceback
import types
import linecache
from code import InteractiveInterpreter
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkinter.messagebox as tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
if file is None:
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,
lineno, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the sav
|
e breaks functionality
# needs to be re-verified, since the breaks at the time the
# t
|
emp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
|
transifex/hermes
|
test_hermes/test_client.py
|
Python
|
bsd-3-clause
| 17,562
| 0.000228
|
from __future__ import absolute_import
from Queue import Empty
from random import randint
from time import sleep
import os
from unittest import TestCase, skipUnless
from signal import SIGINT, SIGCHLD
from select import error as select_error
from os import getpid
from mock import MagicMock, patch, PropertyMock
from psycopg2 import OperationalError
from hermes.client import Client
from hermes.components import Component
from hermes.connectors import PostgresConnector
from hermes.exceptions import InvalidConfigurationException
from hermes.strategies import TERMINATE
_WATCH_PATH = '/tmp/hermes_test'
_FAILOVER_FILES = ('recovery.conf', 'recovery.done')
_POSTGRES_DSN = {
'database': 'test_hermes'
}
class RunningClientTestCase(TestCase):
def setUp(self):
# Create the folder
if not os.path.exists(_WATCH_PATH):
os.makedirs(_WATCH_PATH)
self.client = Client(_POSTGRES_DSN, _WATCH_PATH, _FAILOVER_FILES)
self.client.log = MagicMock()
def tearDown(self):
if self.client.is_alive():
self.client.terminate()
# Remove the folder
if not os.path.exists(_WATCH_PATH):
os.removedirs(_WATCH_PATH)
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_master(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
self.client._start_components = MagicMock(return_value=None)
# Start the client and allow to settle
self.client._start_observer()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertTrue(self.client._start_components.called)
PostgresConnector.is_server_master = old_func
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_slave(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=False)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(self.client.is_alive())
PostgresConnector.is_server_master = old_func
def test_client_directory_watcher_when_file_incorrect(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, 'random_file.rand')
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(PostgresConnector.is_server_master.called)
PostgresConnector.is_server_master = old_func
class ClientComponentTestCase(TestCase):
def test_add_listener_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_listener,
3)
def test_add_processor_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_processor,
3)
def test_add_listener_accepts_component(self):
client = Client(MagicMock())
client.add_listener(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._listener, Component)
def test_add_processor_accepts_component(self):
client = Client(MagicMock(), MagicMock())
client.add_processor(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._processor, Component)
class ValidateComponentsTestCase(TestCase):
def test_throws_on_non_listener(self):
client = Client(MagicMock())
client._processor = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_non_processor(self):
client = Client(MagicMock())
client._listener = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_different_queue(self):
client = Client(MagicMock())
client._listener = MagicMock()
client._processor = MagicMock()
client._listener.error_queue = MagicMock(
return_value=True
)
client._listener.error_queue = MagicMock(
return_value=False
)
self.assertRaises(InvalidConfigurationException,
client._validate_components)
class WatchdogObserverTestCase(TestCase):
def setUp(self):
self.client = Client(MagicMock())
|
self.client.directory_observer = MagicMock()
def test_start_schedules_obeserver_if_watch_path(self):
self.client._watch_path = randint(50, 1000)
self.client._start_observer()
self.client.directory_observer.schedule.assert_called_once_with(
self.client, self.client._watch_path, recursive=False
)
self.cli
|
ent.directory_observer.start.assert_called_once_with()
def test_start_not_schedule_observer_if_none_watch_path(self):
self.client._watch_path = None
self.client._start_observer()
self.assertEqual(self.client.directory_observer.schedule.call_count, 0)
self.assertEqual(self.client.directory_observer.start.call_count, 0)
def test_stop_stops_observer_if_watch_path_and_observer(self):
self.client.directory_observer.is_alive.return_value = True
self.client._watch_path = True
self.client._stop_observer()
self.client.directory_observer.stop.assert_called_once_with()
def test_stop_does_not_stop_observer_on_none(self):
self.client._watch_path = None
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
def test_stop_does_not_stop_on_dead(self):
self.client._watch_path = True
self.client.directory_observer.is_alive.return_value = False
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
class ClientStartupTestCase(TestCase):
def test_startup_functions_are_called(self):
with patch('multiprocessing.Process.start') as mock_process_start:
with patch('hermes.client.signal') as mock_signal:
client = Client(MagicMock())
client._validate_components = MagicMock()
client.start()
self.assertEqual(mock_signal.call_count, 2)
client._validate_components.assert_called_once_with()
mock_process_start.assert_called_once_with()
def test_initial_start_components(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
|
wtolson/gnsq
|
gnsq/backofftimer.py
|
Python
|
bsd-3-clause
| 899
| 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import random
class BackoffTimer(object):
def __init__(self, ratio=1, max_interval=None, min_interval=None):
self.c = 0
self.ratio = ratio
self.max_interval = max_interval
self.min_interval = min_interval
def is_reset(self):
return self.c == 0
def reset(self):
self.c = 0
return self
def success(self):
self.c = max(self.c - 1, 0)
return self
def failure(self):
self.c += 1
return self
def get_interval(self):
k = pow(2, self.c) - 1
interval = random.random() * k * self
|
.ratio
if self.max_interval is not None:
interval = min(interval, self.max_interval)
if self.min_interval is not
|
None:
interval = max(interval, self.min_interval)
return interval
|
tesb/flask-crystal
|
venv/Lib/site-packages/pip/commands/wheel.py
|
Python
|
apache-2.0
| 7,402
| 0.003513
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.log import logger
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.util import normalize_path
from pip.wheel import WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
class WheelCommand(Command):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not recompiling your software during every install.
For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help="Build wheels into <dir>, where the default is '<cwd>/wheelhouse'.")
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# confirm requirements
try:
import wheel.bdist_wheel
except ImportError:
raise CommandError("'pip wheel' requires the 'wheel' package. To fix this, run: pip install wheel")
try:
import pkg_resources
except ImportError:
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
else:
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_unverified=options.allow_all_unverified,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
options.build_dir = os.path.abspath(options.build_dir)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=None,
download_dir=None,
download_cache=options.download_cache,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
session=session,
wheel_download_dir=options.wheel_dir
)
# make the wheelhouse
if not os.path.exists(options.wheel_dir):
os.makedirs(options.wheel_dir)
#parse args and/or requirements files
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder,
options=options,
session=session):
if req.editable:
logger.notify("ignoring %s" % req.url)
continue
requirement_set.add_requirement(req)
#fail if no requirements
if not requirement_set.has_requirements:
opts = {'name': self.name}
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.error(msg)
return
try:
#build wheels
wb = WheelBuilder(
requirement_set,
|
finder,
options.wheel_dir,
build_options = options.build_options or [],
global_options = options.global_options or []
)
wb.build()
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cle
|
anup_files()
|
holloway/docvert-python3
|
core/pipeline_type/generatepostconversioneditorfiles.py
|
Python
|
gpl-3.0
| 263
| 0.003802
|
# -*- coding: utf-8 -*-
import os
impo
|
rt lxml.etree
import io
from . import pipeline_item
import core.docvert_exception
class GeneratePostConversionEditorFiles(pipeline_item.pipeline_stage):
def stage(self, p
|
ipeline_value):
return pipeline_value
|
k4cg/Rezeptionistin
|
plugins/flatter.py
|
Python
|
mit
| 687
| 0.016012
|
import random
from plugin import Plugin
class Flatter(Plugin):
def help_text(self, bot):
return bot.translate("flatter_help")
def on_msg(self, bot, user_nick, host, channel, message):
if message.lower(
|
).startswith(bot.translate("flatter_cmd")):
if len(message.split()) >= 2:
if bot.getlanguage() == "de":
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery.txt'))), user_nick)
elif bot.getlanguage() == "en":
# Source http://www.pickuplinesgalore.com/cheesy.html
|
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery_en.txt'))), user_nick)
|
BorisJeremic/Real-ESSI-Examples
|
motion_one_component/Deconvolution_DRM_Propagation_Northridge/python_plot_parameteric_study.py
|
Python
|
cc0-1.0
| 5,870
| 0.019591
|
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sys
import matplotlib.lines as lines
import h5py
from matplotlib.font_manager import FontProperties
import matplotlib.ticker as ticker
from scipy.fftpack import fft
axial_label_font = FontProperties()
axial_label_font.set_family('sans-serif')
axial_label_font.set_style('normal')
axial_label_font.set_weight('bold')
# axial_label_font.set_size('x-large')
axial_label_font.set_size(20)
legend_label_font = FontProperties()
legend_label_font.set_family('sans-serif')
legend_label_font.set_style('normal')
legend_label_font.set_weight('normal')
# legend_label_font.set_size('large')
legend_label_font.set_size(16)
def node_response_extraction_sequential(node_ID, file_name, num_DOF):
h5_file = h5py.File(file_name, 'r');
Time = h5_file['time'][:];
displacement_index = int(h5_file['Model/Nodes/Index_to_Generalized_Displacements'][node_ID]);
displacement_component = h5_file['Model/Nodes/Generalized_Displacements'][int(displacement_index):int(displacement_index+num_DOF), :];
acceleration_component = h5_file['Model/Nodes/Generalized_Accelerations'][int(displacement_index):int(displacement_index+num_DOF), :];
for x1 in xrange(0,num_DOF):
displacement_component[x1,:] = displacement_component[x1,:]-displacement_component[x1,0]; ### in case self weight loading stage, get relative displacement
return Time, displacement_component, acceleration_component;
numbercol = 1;
surface_node_ID = 252; ## 252, 250, 249, 251
node_ID = [252, 212, 172, 132, 92, 52, 12]; ## node ID from surface to bottom
depth = [0, 2, 4, 6, 8, 10, 12];
bottom_node_ID = 6; ## node just beyond DRM layer
file_name = 'Motion1C_DRM_propagation.h5.feioutput' ##
parameteric_case = 'Motion1C_Northridge' ##
### ==========================================================================
postfix = '.feioutput';
middle_name_less_than_ten = '0';
num_DOF = 3;
Time, displacement_component_surface, acceleration_component_surface = node_response_extraction_sequential(surface_node_ID, file_name, num_DOF);
Time, displacement_component_bottom, acceleration_component_bottom = node_response_extraction_sequential(bottom_node_ID, file_name, num_DOF);
# surface_acc = np.loadtxt('Kobe_acc.txt');
# surface_disp = np.loadtxt('Kobe_disp.txt');
surface_acc = np.loadtxt('scaled_northridge_acc.dat');
surface_disp = np.loadtxt('scaled_northridge_dis.dat');
########################################################################################
#######===== Print acceleration of nodes ===== ######
########################################################################################
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(surface_acc[:, 0], surface_acc[:, 1], '-r', label='surface analytical', linewidth= 1.5);
ax.plot(Time[200:]-2.0, acceleration_component_surface[0, 200:], '-k', label='DRM propagation', linewidth= 0.5);
plt.gca().set_xlim([0,38]);
# plt.gca().set_ylim([-10,10]);
# plt.gca().get_xaxis().set_ticks(np.arange(0, 60.1, 10))
# plt.gca().get_yaxis().set_ticks(np.arange(-15, 3.1, 3))
plt.gca().get_yaxis().set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
plt.gca().get_xaxis().set_tick_params(direction='in',labelsize='x-large')
plt.gca().get_yaxis().set_tick_params(direction='in',labelsize='x-large')
plt.xlabel('Time [s]', fontproperties=axial_label_font);
plt.ylabel('Acc. [$m/s^2$]', fontproperties=axial_label_font);
plt.grid(True);
plt.legend(ncol= numbercol, loc='upper right', prop=legend_label_font);
filename = 'acc_check_'+ parameteric_case + '.pdf'
plt.savefig(filename, bbox_inches='tight');
plt.show();
# # # ########################################################################################
# # # #######======================== Print Time series response along the depth ===== ######
# # # ########################################################################################
# print "Plot acceleration records along depth!";
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # scale_meter = 7;
# # plt.gca().text(32.7, 1.25, '$1g$', fontsize=20)
# # l1 = lines.Line2D([32, 32], [0.5, 0.5+10/scale_meter], color='k', linewidth=2.0)
# # l2 = lines.Line2D([31.7, 32.3], [0.5, 0.5], color='k', linewidth=0.5)
# # l3 = lines.Line2D([31.7, 32.3], [0.5+10/scale_meter, 0.5+10/scale_meter], color='k', linewidth=0.5)
# # plt.gca().add_line(l1);
# # plt.gca().add_line(l2);
# # plt.gca().add_line(l3);
# PGA_depth = sp.zeros(len(depth));
# for x in xrange(0,len(node_ID)):
# current_node = node_ID[x];
# current_depth = depth[x];
# Time, current_displacement_component, current_acceleration_component = node_response_extraction_sequential(current_node, file_name, num_DOF);
# plot_current_acceleration = current_depth + current_acceleration_component/15.0; ## scale acceleration
# PGA_dept
|
h[x] = max(abs(current_ac
|
celeration_component[0, :]));
# ax.plot(Time, plot_current_acceleration[0, :], '-k', linewidth= 1);
# plt.gca().set_ylim([-1,13]);
# plt.gca().invert_yaxis()
# # plt.gca().get_xaxis().set_ticks(np.arange(0, 60.1, 10))
# # plt.gca().get_yaxis().set_ticks(np.arange(-15, 3.1, 3))
# plt.gca().get_yaxis().set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
# plt.gca().get_xaxis().set_tick_params(direction='in',labelsize='x-large')
# plt.gca().get_yaxis().set_tick_params(direction='in',labelsize='x-large')
# plt.xlabel('Time [s]', fontproperties=axial_label_font);
# plt.ylabel('Depth. [m]', fontproperties=axial_label_font);
# plt.grid(True);
# plt.legend(ncol= numbercol, loc='upper right', prop=legend_label_font);
# filename = 'acc_depth_'+ parameteric_case + '.pdf'
# plt.savefig(filename, bbox_inches='tight');
# plt.show();
|
jocelynmass/nrf51
|
toolchain/arm_cm0/arm-none-eabi/lib/thumb/v7-m/libstdc++.a-gdb.py
|
Python
|
gpl-2.0
| 2,482
| 0.006446
|
# -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sy
|
s
import gdb
import os
import os.path
pythondir = '/Users/build/work/GCC-7-build/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/work/GCC-7-build/install-native/arm-none-eabi/lib/thumb/v7-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this cas
|
e we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
davidgardenier/frbpoppy
|
tests/monte_carlo/simulations.py
|
Python
|
mit
| 14,711
| 0
|
"""Run Monte Carlo simulations."""
from joblib import Parallel, delayed
from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint
from datetime import datetime
from copy import deepcopy
from glob import glob
import frbpoppy.paths
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import uuid
POP_SIZE = 5e7
class SimulationOverview:
"""Given values, return uid
Load from file, or make."""
def __init__(self, load_csv=True):
p = frbpoppy.paths.populations()
self.filename = f'{p}mc/simluation_overview.csv'
if load_csv and os.path.isfile(self.filename):
self.load()
else:
self.df = pd.DataFrame()
def load(self):
self.df = pd.read_csv(self.filename, index_col=0)
self.df = self.df.loc[:, ~self.df.columns.str.contains('^Unnamed')]
def save(self):
self.df.to_csv(self.filename)
def append(self, df):
self.df = self.df.append(df, ignore_index=True)
def map_surveys(self, ix, names):
mapping = dict(zip(ix, names))
self.df.replace({"survey": mapping}, inplace=True)
class MonteCarlo:
def __init__(self, pop_size=1e2, load_csv=True):
self.survey_names = ['parkes-htru',
'chime-frb',
'askap-incoh',
'wsrt-apertif']
self.load_csv = load_csv
self.pop_size = pop_size
self.survey_ix = [i for i in range(len(self.survey_names))]
self.surveys = self.set_up_surveys()
self.so = SimulationOverview(load_csv=self.load_csv)
self.set_up_dirs()
def set_up_surveys(self):
"""Set up surveys."""
surveys = []
for name in self.survey_names:
survey = Survey(name=name)
survey.set_beam(model='airy', n_sidelobes=1)
if name in ('chime-frb', 'wsrt-apertif', 'parkes-htru'):
survey.set_beam(model=name)
surveys.append(survey)
return surveys
def set_up_dirs(self, run=np.nan):
"""Create subdirectory for saving populations.
Returns True if directory had to be set up."""
f = f'{frbpoppy.paths.populations()}mc/'
if not os.path.isdir(f):
os.mkdir(f)
return True
if not np.isnan(run):
f = f'{frbpoppy.paths.populations()}mc/run_{run}/'
if not os.path.isdir(f):
os.mkdir(f)
return True
return False
def gen_par_set_1(self,
parallel=True,
lum_min=np.nan,
lum_max=np.nan,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=0):
alphas = np.linspace(-2.5, -1, 11)
sis = np.linspace(-2, 2, 11)
lis = np.linspace(-2, 0, 11)
# Put all options into a dataframe
if 'run' in self.so.df:
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
df['run'] = run
df['par_set'] = 1
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
def iter_alpha(i):
alpha = alphas[i]
pop = CosmicPopulation.complex(self.pop_size)
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
for si in sis:
pop.set_si(model='constant', value=si)
pop.gen_si()
for li in lis:
pop.set_lum(model='powerlaw',
low=1e40,
high=1e45, power=li)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min,
high=lum_max, index=li)
pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 1)
mask &= (self.so.df.run == run)
mask &= (self.so.df.alpha == alpha)
mask &= (self.so.df.si == si)
mask &= (self.so.df.li == li)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
if parallel:
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
r = range(len(al
|
phas))
Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
else:
[iter_alpha(i) for i in tqdm(range(len(alphas)))]
def gen_par_set_2(self,
parallel=True,
alpha=-1.5,
si=0,
w_mean=np.nan,
w_s
|
td=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
lis = np.linspace(-1.5, 0, 11)
lum_mins = 10**np.linspace(38, 46, 11)
lum_maxs = 10**np.linspace(38, 46, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(lis, lum_mins, lum_maxs, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
cols = ('li', 'lum_min', 'lum_max', 'survey')
df = pd.DataFrame(options, columns=cols)
df['par_set'] = 2
df['run'] = run
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
df = df[~(df.lum_max < df.lum_min)]
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
li, lum_min, lum_max = e
if lum_max < lum_min:
return
t_pop = deepcopy(pop)
t_pop.set_lum(model='powerlaw', low=lum_min, high=lum_max,
power=li)
t_pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 2)
mask &= (self.so.df.run == run)
|
YiqunPeng/Leetcode-pyq
|
solutions/661ImageSmoother.py
|
Python
|
gpl-3.0
| 684
| 0.010234
|
class Solution(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
ro
|
w, col = len(M), len(M[0])
ans = [[0]*col
|
for i in xrange(row)]
for i in xrange(row):
for j in xrange(col):
cnt = 0
val = 0
for p in xrange(-1, 2):
for q in xrange(-1, 2):
if ((i+p)<0) or ((i+p)>=row) or ((j+q)<0) or ((j+q)>=col):
continue
cnt += 1
val += M[i+p][j+q]
ans[i][j] = val / cnt
return ans
|
nbr23/nemubot
|
modules/rnd.py
|
Python
|
agpl-3.0
| 1,491
| 0.003353
|
"""Help to make choice"""
# PYTHON STUFFS #######################################################
import random
import shlex
from nemubot import context
from nemubot.exception import IMException
from nemubot.hooks import hook
from nemubot.module.more import Response
# MODULE INTERFACE #################################
|
###################
@hook.command("choice")
def cmd_choice(msg):
if not len(msg.args):
raise IMException("indicate some terms to pick!")
return Response(random.choice(msg.args),
channel=msg.channel,
nick=msg.frm)
@hook.command("choicecmd")
def cmd_choicecmd(msg):
if not len(msg.args):
raise IMException("indicate some command to pick!")
choice = shl
|
ex.split(random.choice(msg.args))
return [x for x in context.subtreat(context.subparse(msg, choice))]
@hook.command("choiceres")
def cmd_choiceres(msg):
if not len(msg.args):
raise IMException("indicate some command to pick a message from!")
rl = [x for x in context.subtreat(context.subparse(msg, " ".join(msg.args)))]
if len(rl) <= 0:
return rl
r = random.choice(rl)
if isinstance(r, Response):
for i in range(len(r.messages) - 1, -1, -1):
if isinstance(r.messages[i], list):
r.messages = [ random.choice(random.choice(r.messages)) ]
elif isinstance(r.messages[i], str):
r.messages = [ random.choice(r.messages) ]
return r
|
stopstop/duvet
|
duvet/objects.py
|
Python
|
gpl-3.0
| 1,485
| 0.003367
|
from datetime import datetime
import uuid
class Torrent(object):
def __init__(self):
self.tracker = None
self.url = None
self.title = None
self.magnet = None
self.seeders = None
self.leechers = None
self.size = None
self.date = None
self.details = None
self.uuid = uuid.uuid4().hex
self._remove = False
@property
def human_age(self):
if self.date:
age = datetime.now() - self.date
return "%s days" % (int(age.total_seconds()/(60*60*24)))
else:
return "Unknown"
@property
def human_size(self):
if self.size:
if self.size > 1000000000:
return "%.2f GB" % (self.size / 1000000000)
elif self.size > 1000000:
return "%.2f MB" % (self.size/1000000)
else:
return "%s KB" % (self.size/1000)
@property
def html_friendly_title(self):
return self.title.replace('.', '.​').replace('[', '​[').replace(']', ']​')
def __unicode__(self):
return
|
"%s Size: %s Seeders: %s Age: %s %s" % (self.title.ljust(60)[0:60], str(self.human_size).ljust(12),
str(self.seeders).ljust(6), self.human_age,
|
self.tracker)
def __str__(self):
return self.__unicode__()
|
Kromey/piroute
|
iptables/utils.py
|
Python
|
mit
| 808
| 0.001238
|
from . import services
def prep_rules(rules):
prepped = []
for rule in rules:
if rule['enabled']:
prepped.append(prep_rule(rule))
return prepped
def prep_rule(raw_rule):
rule = dict(raw_rule)
if rule['service'] != 'custom':
proto, port = services.decode_service(rule['service'])
|
if not (proto and port):
raise ValueError("Unknown service: {service}".format(
servi
|
ce=rule['service']
))
rule['proto'] = proto
rule['port'] = port
if not rule['comment']:
rule['comment'] = "{service} service ({proto}:{port})".format(
service=rule['service'],
proto=proto,
port=port
)
return rule
|
alonho/pql
|
pql/matching.py
|
Python
|
bsd-3-clause
| 14,159
| 0.00678
|
"""
The parser:
1. gets and expression
2. parses it
3. handles all boolean logic
4. delegates operator and rvalue parsing to the OperatorMap
SchemaFreeOperatorMap
supports all mongo operators for all fields.
SchemaAwareOperatorMap
1. verifies fields exist.
2. verifies operators are applied to fields of correct type.
currently unsupported:
1. $where - kind of intentionally against injections
2. geospatial
"""
import ast
import bson
import datetime
import dateutil.parser
from calendar import timegm
def parse_date(node):
if hasattr(node, 'n'): # it's a number!
return datetime.datetime.fromtimestamp(node.n)
try:
return dateutil.parser.parse(node.s)
except Exception as e:
raise ParseError('Error parsing date: ' + str(e), col_offset=node.col_offset)
class AstHandler(object):
def get_options(self):
return [f.replace('handle_', '') for f in dir(self) if f.startswith('handle_')]
def resolve(self, thing):
thing_name = thing.__class__.__name__
try:
handler = getattr(self, 'handle_' + thing_name)
except AttributeError:
raise ParseError('Unsupported syntax ({0}).'.format(thing_name,
self.get_options()),
col_offset=thing.col_offset if hasattr(thing, 'col_offset') else None,
options=self.get_options())
return handler
def handle(self, thing):
return self.resolve(thing)(thing)
def parse(self, string):
ex = ast.parse(string, mode='eval')
return self.handle(ex.body)
class ParseError(Exception):
def __init__(self, message, col_offset, options=[]):
super(ParseError, self).__init__(message)
self.message = message
self.col_offset = col_offset
self.options = options
def __str__(self):
if self.options:
return '{0} options: {1}'.format(self.message, self.options)
return self.message
class Parser(AstHandler):
def __init__(self, operator_map):
self._operator_map = operator_map
def get_options(self):
return self._operator_map.get_options()
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_And(self, op):
'''and'''
return '$and'
def handle_Or(self, op):
'''or'''
return '$or'
def handle_UnaryOp(self, op):
operator = self.handle(op.operand)
field, value = list(operator.items())[0]
return {field: {self.handle(op.op): value}}
def handle_Not(self, not_node):
'''not'''
return '$not'
def handle_Compare(self, compare):
if len(compare.comparators) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(compare.comparators)),
col_offset=compare.comparators[1].col_offset)
return self._operator_map.handle(left=compare.left,
operator=compare.ops[0],
right=compare.comparators[0])
class SchemaFreeParser(Parser):
def __init__(self):
super(SchemaFreeParser, self).__init__(SchemaFreeOperatorMap())
class SchemaAwareParser(Parser):
def __init__
|
(self, *a, **k):
super(SchemaAwareParser, self).__init__(SchemaAwareOperatorMap(*a, **k))
class FieldName(AstHandler):
def handle_Str(self, node):
return node.s
def handle_Name(self, name):
return name.id
def
|
handle_Attribute(self, attr):
return '{0}.{1}'.format(self.handle(attr.value), attr.attr)
class OperatorMap(object):
def resolve_field(self, node):
return FieldName().handle(node)
def handle(self, operator, left, right):
field = self.resolve_field(left)
return {field: self.resolve_type(field).handle_operator_and_right(operator, right)}
class SchemaFreeOperatorMap(OperatorMap):
def get_options(self):
return None
def resolve_type(self, field):
return GenericField()
class SchemaAwareOperatorMap(OperatorMap):
def __init__(self, field_to_type):
self._field_to_type = field_to_type
def resolve_field(self, node):
field = super(SchemaAwareOperatorMap, self).resolve_field(node)
try:
self._field_to_type[field]
except KeyError:
raise ParseError('Field not found: {0}.'.format(field),
col_offset=node.col_offset,
options=self._field_to_type.keys())
return field
def resolve_type(self, field):
return self._field_to_type[field]
#---Function-Handlers---#
class Func(AstHandler):
@staticmethod
def get_arg(node, index):
if index > len(node.args) - 1:
raise ParseError('Missing argument in {0}.'.format(node.func.id),
col_offset=node.col_offset)
return node.args[index]
@staticmethod
def parse_arg(node, index, field):
return field.handle(Func.get_arg(node, index))
def handle(self, node):
try:
handler = getattr(self, 'handle_' + node.func.id)
except AttributeError:
raise ParseError('Unsupported function ({0}).'.format(node.func.id),
col_offset=node.col_offset,
options=self.get_options())
return handler(node)
def handle_exists(self, node):
return {'$exists': self.parse_arg(node, 0, BoolField())}
def handle_type(self, node):
return {'$type': self.parse_arg(node, 0, IntField())}
class StringFunc(Func):
def handle_regex(self, node):
result = {'$regex': self.parse_arg(node, 0, StringField())}
try:
result['$options'] = self.parse_arg(node, 1, StringField())
except ParseError:
pass
return result
class IntFunc(Func):
def handle_mod(self, node):
return {'$mod': [self.parse_arg(node, 0, IntField()),
self.parse_arg(node, 1, IntField())]}
class ListFunc(Func):
def handle_size(self, node):
return {'$size': self.parse_arg(node, 0, IntField())}
def handle_all(self, node):
return {'$all': self.parse_arg(node, 0, ListField())}
def handle_match(self, node):
return {'$elemMatch': self.parse_arg(node, 0, DictField())}
class DateTimeFunc(Func):
def handle_date(self, node):
return parse_date(self.get_arg(node, 0))
class IdFunc(Func):
def handle_id(self, node):
return self.parse_arg(node, 0, IdField())
class EpochFunc(Func):
def handle_epoch(self, node):
return self.parse_arg(node, 0, EpochField())
class EpochUTCFunc(Func):
def handle_epoch_utc(self, node):
return self.parse_arg(node, 0, EpochUTCField())
class GeoShapeFuncParser(Func):
def handle_Point(self, node):
return {'$geometry':
{'type': 'Point',
'coordinates': [self.parse_arg(node, 0, IntField()),
self.parse_arg(node, 1, IntField())]}}
def handle_LineString(self, node):
return {'$geometry':
{'type': 'LineString',
'coordinates': self.parse_arg(node, 0, ListField(ListField(IntField())))}}
def handle_Polygon(self, node):
return {'$geometry':
{'type': 'Polygon',
'coordinates': self.parse_arg(node, 0, ListField(ListField(ListField(IntField()))))}}
def handle_box(self, node):
return {'$box': self.parse_arg(node, 0, ListField(ListField(IntField())))}
def handle_polygon(self, node):
return {'$polygon': self.parse_arg(node, 0, ListField(ListField(IntField())))}
def _any_center(self, node, center_name):
return {center_name: [self.parse_arg(node, 0, ListField(IntField())),
self.parse_arg(node, 1, IntField())]}
def handle_center(self, node):
return self._any_center(node, '$center')
def handle_centerSphere(self, node):
|
mattdennewitz/python-acoustid-api
|
acoustid_api/consts.py
|
Python
|
mit
| 819
| 0
|
from __future__ import unicode_literals
from . import exceptions
DEFAULT_HOST = 'http://api.acoustid.org/'
FORMATS = ('json', 'jsonp', 'xml')
META = (
'recordings', 'recordingids', 'releases', 'releaseids',
'releasegroups', 'releasegroupids', 'tracks', 'compress',
'usermeta', 'sources'
)
ERRORS
|
= {
1: exceptions.UnknownFormat,
2: exceptions.MissingParameter,
3: e
|
xceptions.InvalidFingerprint,
4: exceptions.InvalidClientKey,
5: exceptions.InternalError,
6: exceptions.InvalidUserApiKey,
7: exceptions.InvalidUUID,
8: exceptions.InvalidDuration,
9: exceptions.InvalidBitrate,
10: exceptions.InvalidForeignID,
11: exceptions.InvalidMaxDurationDiff,
12: exceptions.NotAllowed,
13: exceptions.ServiceUnavailable,
14: exceptions.TooManyRequests,
}
|
ioanaantoche/muhaha
|
ioana/RecordAudio.py
|
Python
|
gpl-2.0
| 757
| 0.018494
|
import s
|
ys
import time
from naoqi import ALProxy
IP = "nao.local"
PORT = 9559
if (len(sys.argv) < 2):
print "Usage: 'python RecordAudio.py nume'"
sys.exit(1)
fileName = "/home/nao/" + sys.argv[1] + ".wav"
aur = ALProxy("ALAudioRecorder", IP, PORT)
channels = [0,0,1,0]
aur.startMicrophonesRecording(fileName, "wav", 160000, channels)
c=raw_input("Sfarsit?")
aur.stopMicrophonesRecording()
c=raw_input("play?")
aup = ALProxy("ALA
|
udioPlayer", IP, PORT)
#Launchs the playing of a file
aup.playFile(fileName,0.5,-1.0)
c=raw_input("gata?")
#Launchs the playing of a file
#aup.playFile("/usr/share/naoqi/wav/random.wav")
#Launchs the playing of a file on the left speaker to a volume of 50%
#aup.playFile("/usr/share/naoqi/wav/random.wav",0.5,-1.0)
|
nuigroup/pymt-widgets
|
pymt/tools/packaging/win32/build.py
|
Python
|
lgpl-3.0
| 6,313
| 0.010771
|
import os, sys, shutil
import zipfile
from zipfile import ZipFile
from urllib import urlretrieve
from subprocess import Popen, PIPE
from distutils.cmd import Command
def zip_directory(dir, zip_file):
zip = ZipFile(zip_file, 'w', compression=zipfile.ZIP_DEFLATED)
root_len = len(os.path.abspath(dir))
for root, dirs, files in os.walk(dir):
archive_root = os.path.abspath(root)[root_len:]
for f in files:
fullpath = os.path.join(root, f)
archive_name = os.path.join(archive_root, f)
zip.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
class WindowsPortableBuild(Command):
description = "custom build command that builds portable win32 package"
user_options = [
('dist-dir=', None,
"path of dist directory to use for building portable pymt, the end result will be output to this driectory. default to cwd."),
('deps-url=', None,
"url of binary dependancies for portable pymt package default: http://pymt.googlecode.com/files/portable-deps-win32.zip"),
('no-cext', None,
"flag to disable building of c extensions"),
('no-mingw', None,
"flag to disable bundling of mingw compiler for compiling c/cython extensions")
]
def initialize_options(self):
self.dist_dir = None
self.deps_url = None
self.no_cext = None
self.no_mingw = None
def finalize_options(self):
if not self.deps_url:
self.deps_url = 'http://pymt.googlecode.com/files/portable-deps-win32.zip'
if not self.dist_dir:
self.dist_dir = os.getcwd()
self.src_dir = os.path.dirname(sys.modules['__main__'].__file__)
self.dist_name = self.distribution.get_fullname() # e.g. PyMT-0.5 (name and verison passed to setup())
self.build_dir = os.path.join(self.dist_dir, self.dist_name+'-w32')
def run(self):
print "---------------------------------"
print "Building PyMT Portable for Win 32"
print "---------------------------------"
print "\nPreparing Build..."
print "---------------------------------------"
if os.path.exists(self.build_dir):
print "*Cleaning old build dir"
shutil.rmtree(self.build_dir, ignore_errors=True)
print "*Creating build directory:"
print " "+self.build_dir
os.makedirs(self.build_dir)
print "\nGetting binary dependencies..."
print "---------------------------------------"
print "*Downloading:", self.deps_url
#report_hook is called every time a piece of teh file is downloaded to print progress
def report_hook(block_count, block_size, total_size):
p = block_count*block_size*100.0/total_size
print "\b\b\b\b\b\b\b\b\b", "%06.2f"%p +"%",
print " Progress: 000.00%",
urlretrieve(self.deps_url, #location of binary dependencioes needed for portable pymt
os.path.join(self.build_dir,'deps.zip'), #tmp file to store teh archive
reporthook=report_hook)
print " [Done]"
print "*Extracting binary dependencies..."
zf = ZipFile(os.path.join(self.build_dir,'deps.zip'))
zf.extractall(self.build_dir)
zf.close()
if self.no_mingw:
print "*Excluding MinGW from portable distribution (--no-mingw option is set)"
shutil.rmtree(os.path.join(self.build_dir, 'MinGW'), ignore_errors=True)
print "\nPutting pymt into portable environment"
print "---------------------------------------"
print "*Building pymt source distribution"
sdist_cmd = [sys.executable, #path to python.exe
os.path.join(self.src_dir,'setup.py'), #path to setup.py
'sdist', #make setup.py create a src distribution
'--dist-dir=%s'%self.build_dir] #put it into build folder
Popen(sdist_cmd, stdout=PIPE, stderr=PIPE).communicate()
print "*Placing pymt source distribution in portable context"
src_dist = os.path.join(self.build_dir,self.dist_name)
zf = ZipFile(src_dist+'.zip')
zf.extractall(self.build_dir)
zf.close()
if self.no_mingw or self.no_cext:
print "*Skipping C Extens
|
ion build (either --no_cext or --no_mingw option set)"
else:
print "*Compiling C Extensions inplace for portable distribution"
cext_cmd = [sys.executable, #path to python.exe
'setup.py',
'build_ext', #make setup.py create a src distribution
|
'--inplace'] #do it inplace
#this time it runs teh setup.py inside the source distribution
#thats has been generated inside the build dir (to generate ext
#for teh target, instead of the source were building from)
Popen(cext_cmd, cwd=src_dist, stdout=PIPE, stderr=PIPE).communicate()
print "\nFinalizing pymt portable distribution..."
print "---------------------------------------"
print "*Copying scripts and resources"
#copy launcher script and readme to portable root dir/build dir
pymt_bat = os.path.join(src_dist,'pymt','tools','packaging','win32', 'pymt.bat')
shutil.copy(pymt_bat, os.path.join(self.build_dir, 'pymt.bat'))
readme = os.path.join(src_dist,'pymt','tools','packaging','win32', 'README.txt')
shutil.copy(readme, os.path.join(self.build_dir, 'README.txt'))
#rename pymt directory to "pymt"
os.rename(src_dist, os.path.join(self.build_dir,'pymt'))
print "*Removing intermediate file"
os.remove(os.path.join(self.build_dir,'deps.zip'))
os.remove(os.path.join(self.build_dir,src_dist+'.zip'))
print "*Compressing portable distribution target"
target = os.path.join(self.dist_dir, self.dist_name+"-w32.zip")
zip_directory(self.build_dir, target)
print "*Writing target:", target
print "*Removing build dir"
shutil.rmtree(self.build_dir, ignore_errors=True)
|
samsu/neutron
|
plugins/nuage/common/exceptions.py
|
Python
|
apache-2.0
| 919
| 0
|
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions an
|
d limitations
# under the License.
''' Nuage specific exceptions '''
from neutron.common import exceptions as n_exc
class OperationNotSupported(n_exc.InvalidConfigurationOption):
message = _("Nuage Plugin does not support this operation: %(msg)s")
class NuageBadRequest(n_exc.BadRequest):
message = _("Bad reques
|
t: %(msg)s")
|
christianknu/eitu
|
eitu/migrations/0001_initial.py
|
Python
|
mit
| 662
| 0.001511
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-26 14:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Occupancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize
|
=False, verbose_name='ID')),
('room_name', models.CharField(max_length=255)),
('occupancy', models.IntegerField()),
('timestamp', models.DateF
|
ield()),
],
),
]
|
troeger/opensubmit
|
web/opensubmit/templatetags/projecttags.py
|
Python
|
agpl-3.0
| 1,869
| 0.000535
|
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
import os
register = template.Library()
@register.filter(name='basename')
@stringfilter
def basename(value):
return os.path.basename(value)
@register.filter(name='replace_macros')
@stringfilter
def replace_macros(value, user_dict):
return value.replace("#FIRSTNAME#", user_dict['first_name'].strip()) \
.replace("#LASTNAME#", user_dict['last_name'].strip())
@register.filter(name='state_label_css')
def state_label_css(subm):
green_label = "badge label label-success"
red_label = "badge label label-important"
grey_label = "badge label label-info"
# We expect a submission as input
if subm.is_closed() and subm.grading:
if subm.grading.means_passed:
return green_label
else:
return red_label
if subm.state in [subm.SUBMITTED_TESTED,
subm.SUBMITTED,
subm.TEST_FULL_PENDING,
subm.GRADED,
subm.TEST_FULL_FAILED]:
return green_label
if subm.state == subm.TEST_VALIDITY_FAILED:
return red_label
return grey_label
@register.assignment_tag
def setting(name):
return getattr(settings, name, "")
@register.inclusion_tag('inclusion_tags/details_table.ht
|
ml')
def details_table(submission):
return {'submission': submission}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline_timeout(assignment):
return {'assignment': assignment, 'show_timeout': True}
@register.inclusion_tag('inclusion_tags/deadline.html')
def deadline(assignment):
return {'assignment': assignment, 'show_timeout': False}
@register.inclusion_tag('inclusion_tags/grading
|
.html')
def grading(submission):
return {'submission': submission}
|
capybaralet/Blocks_quickstart
|
basic_blocks_script.py
|
Python
|
mit
| 3,082
| 0.00292
|
"""
This script is a starting point for new Blocks users already familiar with
Machine Learning and Theano.
We demonstrate how to use blocks to train a generic set of parameters (theano
shared variables) that influence some arbitrary cost function (theano
symbolic variable), so you can start using blocks featu
|
res (e.g. monitoring,
extensions, training algorithms) with your Theano code toda
|
y.
To run an experiment, we simply construct a main_loop.MainLoop and call its
run() method. It suffices to pass the MainLoop a blocks.model.Model
(which needs the cost), a blocks.algorithms.TrainingAlgorithm (which needs the
cost and parameters), and a fuel.streams.DataStream*
As it is the script will run indefinitely, with no output. You can interrupt
training training anytime with Ctrl+C, or termination conditions can be added
via extensions.
*The DataStream object is part of the partner library Fuel
(https://github.com/mila-udem/fuel).
"""
import numpy
np = numpy
import theano
import theano.tensor as T
# (Here we make a toy dataset of two 2D gaussians with different means.)
num_examples = 1000
batch_size = 100
means = np.array([[-1., -1.], [1, 1]])
std = 0.5
labels = np.random.randint(size=num_examples, low=0, high=1)
features = means[labels, :] + std * np.random.normal(size=(num_examples, 2))
labels = labels.reshape((num_examples, 1)).astype(theano.config.floatX)
features = features.astype(theano.config.floatX)
# Define "data_stream"
from collections import OrderedDict
from fuel.datasets import IndexableDataset
# The names here (e.g. 'name1') need to match the names of the variables which
# are the roots of the computational graph for the cost.
dataset = IndexableDataset(
OrderedDict([('name1', features), ('name2', labels)]))
from fuel.streams import DataStream, ForceFloatX
from fuel.schemes import SequentialScheme
data_stream = ForceFloatX(DataStream(dataset,
iteration_scheme=SequentialScheme(
dataset.num_examples, batch_size)))
# Define "cost" and "parameters"
# (We use logistic regression to classify points by distribution)
inputs = T.matrix('name1')
targets = T.matrix('name2')
ninp, nout = 2, 1
W = theano.shared(.01*np.random.uniform(
size=((ninp, nout))).astype(theano.config.floatX))
b = theano.shared(np.zeros(nout).astype(theano.config.floatX))
output = T.nnet.sigmoid(T.dot(inputs, W) + b)
# a theano symbolic expression
cost = T.mean(T.nnet.binary_crossentropy(output, targets))
# a list of theano.shared variables
parameters = [W, b]
# wrap everything in Blocks objects and run!
from blocks.model import Model
model = Model([cost])
from blocks.algorithms import GradientDescent, Scale
algorithm = GradientDescent(cost=cost,
parameters=parameters,
step_rule=Scale(learning_rate=.01))
from blocks.main_loop import MainLoop
my_loop = MainLoop(model=model,
data_stream=data_stream,
algorithm=algorithm)
my_loop.run()
|
TonyWu386/redshift-game
|
main.py
|
Python
|
gpl-2.0
| 49,393
| 0.000121
|
# -----------------------------------------------------------------------------
# File name: main.py #
# Date created: 3/20/2014 #
# Date last modified: 1/18/2015 #
# #
# Author: Tony Wu (Xiangbo) #
# Email: xb.wu@mail.utoronto.ca #
# #
# Python version: developed under 3.4, additionally tested under 2.7 #
# Dependencies: Pygame 1.9.2, rsclasses.py #
# #
# License: GNU GPL v2.0 #
# #
# Copyright (c) 2014-2015 [Tony Wu], All Right Reserved #
# -----------------------------------------------------------------------------
if __name__ == "__main__":
import pygame
import sys
import time
import pygame.mixer
from math import *
from pygame.locals import *
pygame.init()
pygame.mixer.init(frequency=44100, size=-16, channels=2, buffer=4096)
# rscalsses.py must be present
from rsclasses import *
# Constants - use default value unless debugging
HORI_RES = 800 # Horizontal Resolution
VERT_RES = 600 # Vertical Resolution
FONT = "timesnewroman" # Game font
FPS = 60 # Frames-per-second
# The following image asset files must present
bg = "background.jpg"
wstar = "whitestar.png"
rstar = "redstar.png"
ystar = "yellowstar.png"
bstar = "bluestar.png"
bkship1 = "minienemy1.png"
pship = "pship.png"
pshipfl = "pshipfirelaser.png"
pshipfly = "pshipfly.png"
pshipflyback = "pshipflyback.png"
pro1 = "projectile1.png"
pro1f = "projectile1flash.png"
las1 = "laser1.png"
lasr = "laserred.png"
em1 = "enemy1.png"
em2 = "enemy2.png"
em3 = "enemy3.png"
em3f = "enemy3fire.png"
em4 = "enemy4.png"
ex1 = "explosion.png"
bs1 = "boss1.png"
bs2 = "boss2.png"
bs2shoot = "boss2shoot.png"
bs3 = "boss3.png"
bs4 = "boss4.png"
bs4r = "boss4ram.png"
bf = "bossfinalyellow.png"
bfr = "bossfinal.png"
isplash = "introsplash.jpg"
isplash2 = "poweredbysource.jpg"
sscreen = "startscreen.jpg"
hscreen = "helpscreen.jpg"
b1w = "boss1red.png"
b2w = "boss2red.png"
b2sw = "boss2shootred.png"
b3w = "boss3red.png"
b4w = "boss4red.png"
hbar = "healthbar.png"
ebar = "energybar.png"
eunit = "energyunit.png"
eunitred = "energyunitred.png"
efire = "enginefire.png"
efireb = "enginefireblue.png"
menus = "menuselector.png"
menusf = "menuselectorflash.png"
creds = "creditscreen.jpg"
dscreen = "deathscreen.jpg"
efl = "enginefirelow.png"
wscrn = "winscreen.png"
# The following sound asset files must present
introsound = pygame.mixer.Sound("introlow.wav")
menutheme = pygame.mixer.Sound("menutheme.wav")
bossfight = pygame.mixer.Sound("bossfight.wav")
boss2fight = pygame.mixer.Sound("boss2theme.wav")
explosionS = pygame.mixer.Sound("explosion.wav")
laserFX = pygame.mixer.Sound("laserfx.wav")
leveltheme = pygame.mixer.Sound("leveltheme.wav")
boss3fight = pygame.mixer.Sound("boss3theme.wav")
boss4fight = pygame.mixer.Sound("boss4theme.wav")
bombFX = pygame.mixer.Sound("nuke.wav")
explosionS.set_volume(0.15)
laserFX.set_volume(1.0)
# Setting up game window
screen = pygame.display.set_mode((HORI_RES, VERT_RES), 0, 32)
# Setting up fonts
stdfont = pygame.font.SysFont(FONT, 24)
stdfont_bold = pygame.font.SysFont(FONT, 24)
stdfont_bold.set_bold(True)
# Generating pygame surfaces
# Stars
background = pygame.image.load(bg).convert()
whitestar = pygame.image.load(wstar).convert_alpha()
redstar = pygame.image.load(rstar).convert_alpha()
yellowstar = pygame.image.load(ystar).convert_alpha()
bluestar = pygame.image.load(bstar).convert_alpha()
# Ships and projectiles
backgroundship1 = pygame.image.load(bkship1).convert_alpha()
playership = pygame.image.load(pship).convert_alpha()
playershipfirelaser = pygame.image.load(pshipfl).convert_alpha()
playershipfly = pygame.image.load(pshipfly).convert_alpha()
playershipflyback = pygame.image.load(pshipflyback).convert_alpha()
rocket = pygame.image.load(pro1).convert_alpha()
rocketflash = pygame.image.load(pro1f).convert_alpha()
enemy1 = pygame.image.load(em1).convert_alpha()
enemy2 = pygame.image.load(em2).convert_alpha()
enemy3 = pygame.image.load(em3).convert_alpha()
enemy3fire = pygame.image.load(em3f).convert_alpha()
enemy4 = pygame.image.load(em4).convert_alpha()
explosion = pygame.image.load(ex1).convert_alpha()
boss1 = pygame.image.load(bs1).convert_alpha()
boss2 = pygame.image.load(bs2).convert_alpha()
boss2shoot = pygame.image.load(bs2shoot).convert_alpha()
boss3 = pygame.image.load(bs3).convert_alpha()
boss4 = pygame.image.load(bs4).convert_alpha()
boss4ram = pygame.image.load(bs4r).convert_alpha()
bossfinal = pygame.image.load(bf).convert_alpha()
bossfinalred = pygame.image.load(bfr).convert_alpha
|
()
introsplash = pygame.image.load(isplash).convert()
introsplash2 = pygame.image.load(isplash2).convert()
startscreen = pygame.image.load(sscreen).convert()
helpscreen = pygame.image.load(hscreen).convert()
boss1white = pygame.image.load(b1w).convert_alpha()
boss2white = pygame.image.load(b2w).convert_alpha()
boss2shootwhite = pygame.image.load(b2sw).convert_alpha()
boss3white = pygame.image.load(b3w).convert_alpha()
boss4white = pygame.image.load(b4w).c
|
onvert_alpha()
laser1 = pygame.image.load(las1).convert_alpha()
laserred = pygame.image.load(lasr).convert_alpha()
laserredver = pygame.transform.rotate(pygame.image.load(lasr).
convert_alpha(), 90)
enginefire = pygame.image.load(efire).convert_alpha()
enginefireblue = pygame.image.load(efireb).convert_alpha()
enginefirebig = pygame.transform.scale2x(enginefire).convert_alpha()
enginefirelow = pygame.image.load(efl).convert_alpha()
# In-game UI
ui_healthbar = pygame.image.load(hbar).convert_alpha()
ui_energybar = pygame.image.load(ebar).convert_alpha()
ui_energyunit = pygame.image.load(eunit).convert_alpha()
ui_energyunitred = pygame.image.load(eunitred).convert_alpha()
# Menu UI
ui_menuselector = pygame.image.load(menus).convert_alpha()
ui_menuselectorflash = pygame.image.load(menusf).convert_alpha()
creditscreen = pygame.image.load(creds).convert()
deathscreen = pygame.image.load(dscreen).convert()
winscreen = pygame.image.load(wscrn).convert()
clock = pygame.time.Clock()
pause = False
a = cstar(30, HORI_RES, VERT_RES)
laser = claser()
# For movement
wkey = False
akey = False
skey = False
dkey = False
win = False
# For missile weapon
isfire = False
# For laser weapon
islaser = False
timer = -400
(ex, ey) = (0, 0) # Used for temp store of explosion locations
score = 0 # Player's score
hitinframe = False # Used to trigger collision warning
collidelabeldelay = 0
stage = 0
# 1 -- FIRST WAVE
# 2 -- BOSS (#1)
# 3 -- SECOND WAVE
# 4 -- BOSS (#2)
# 5 -- THIRD WAVE
# 6 -- BOSS (#3)
# 7 -- FOURTH WAVE
# 8 -- BOSS (#4)
quota = 0
flash = 0
# introsplash
pygame.display.set_caption("REDSHIFT v1.1")
introsound.set_volume(1.0)
introsound.play()
for i in range(0, 5
|
jeremiedecock/snippets
|
python/datetime_snippets.py
|
Python
|
mit
| 605
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
# Datetime ############################
dt = datetime.datetime.now()
print(dt)
dt = datetime.datetime(year=2018, month=8, day=30, hour=13, minute=30)
print(dt)
print(dt.isoformat())
# Date ################################
d = datetime.date.today()
print(d)
d = datetime.datetime.now().date()
print(d)
d = datetime.date(year=2018, month=8, day=30)
print(d)
print(d.isoformat())
# Time ####
|
############################
t = datetime.datetime.now().time()
print(t)
t = datet
|
ime.time(hour=1, minute=30)
print(t)
print(t.isoformat())
|
azaghal/ansible
|
test/lib/ansible_test/_internal/cloud/aws.py
|
Python
|
gpl-3.0
| 3,937
| 0.002286
|
"""AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclud
|
e: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if aci.available:
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
|
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html#aws-permissions-for-integration-tests.'
% target.name)
|
chryswoods/SireTests
|
unittests/SireMM/testgridff2.py
|
Python
|
gpl-2.0
| 3,699
| 0.011895
|
from Sire.IO import *
from Sire.MM import *
from Sire.System import *
from Sire.Mol import *
from Sire.Maths import *
from Sire.FF import *
from Sire.Move import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Qt import *
import os
coul_cutoff = 20 * angstrom
lj_cutoff = 10 * angstrom
amber = Amber()
(molecules, space) = amber.readCrdTop("test/io/waterbox.crd", "test/io/waterbox.top")
system = System()
swapwaters = MoleculeGroup("swapwaters")
waters = MoleculeGroup("waters")
molnums = molecules.molNums();
for molnum in molnums:
water = molecules[molnum].molecule()
if water.residue().number() == ResNum(2025):
center_water = water
swapwaters.add(center_water)
center_point = center_water.evaluate().center()
for molnum in molnums:
if molnum != center_water.number():
water = molecules[molnum].molecule()
if Vector.distance(center_point, water.evaluate().center()) < 7.5:
water = water.residue().edit().setProperty("PDB-residue-name", "SWP").commit()
swapwaters.add(water)
else:
waters.add(water)
system.add(swapwaters)
system.add(waters)
gridff = GridFF("gridff")
gridff.setCombiningRules("arithmetic")
print("Combining rules are %s" % gridff.combiningRules())
gridff.setBuffer(2 * angstrom)
gridff.setGridSpacing( 0.5 * angstrom )
gridff.setLJCutoff(lj_cutoff)
gridff.setCoulombCutoff(coul_cutoff)
gridff.setShiftElectrostatics(True)
#gridff.setUseAtomisticCutoff(True)
#gridff.setUseReactionField(True)
cljgridff = CLJGrid()
cljgridff.setCLJFunction( CLJShiftFunction(coul_cutoff,lj_cutoff) )
cljgridff.setFixedAtoms( CLJAtoms(waters.molecules()) )
cljatoms = CLJAtoms(swapwaters.molecules())
cljgridff.setGridDimensions( cljatoms, 0.5 * angstrom, 2 * angstrom )
print("Grid box equals %s" % cljgridff.grid())
cljboxes = CLJBoxes(cljatoms)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
cljgridff.setUseGrid(False)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
gridff.add(swapwaters, MGIdx(0))
gridff.add(waters, MGIdx(1))
gridff.setSpace( Cartesian() )
gridff2 = GridFF2("gridff2")
gridff2.setCombiningRules("arithmetic")
gridff2.setBuffer(2*angstrom)
gridff2.setGridSpacing( 0.5 * angstrom )
gridff2.setLJCutoff(lj_cutoff)
gridff2.setCoulombCutoff(coul_cutoff)
gridff2.setShiftElectrostatics(True)
#gridff2.setUseAtomisticCutoff(True)
#gridff2.setUseReactionField(True)
gridff2.add( swapwaters, MGIdx(0) )
gridff2.addFixedAtoms(waters.molecules())
gridff2.setSpace( Cartesian() )
testff = TestFF()
testff.add( swapwaters.molecules() )
testff.addFixedAtoms(waters.molecules())
testff.setCutoff(coul_cutoff, lj_cutoff)
cljff = InterGroupCLJFF("cljff")
cljff.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff.add(swapwaters, MGIdx(0))
cljff.add(waters, MGIdx(1))
cljff.setShiftElectrostatics(True)
#cljff.setUseAtomisticCutoff(True)
#cljff.setUseReactionField(True)
cljff.setSpace( Cartesian() )
cljff2 = InterCLJFF("cljff2")
cljff2.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff2.add(waters)
cljff2.setShiftElectrostatics(True)
cljff2.setSpace( Cartesian() )
print(gridff.energies())
print(gridff2.energies())
print("
|
\nEnergies")
print(gridff.energies())
print(gridff2.energies())
t = QTime()
t.start()
nrgs = cljff.energies()
ms = t.elapsed()
print(cl
|
jff.energies())
print("Took %d ms" % ms)
testff.calculateEnergy()
t.start()
nrgs = cljff2.energies()
ms = t.elapsed()
print("\nExact compare")
print(cljff2.energies())
print("Took %d ms" % ms)
|
nikesh-mahalka/cinder
|
cinder/volume/drivers/dothill/dothill_client.py
|
Python
|
apache-2.0
| 14,785
| 0
|
# Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from hashlib import md5
import math
import time
from lxml import etree
from oslo_log import log as logging
import requests
import six
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._login = login
self._password = password
self._base_url = "%s://%s/api" % (protocol, host)
self._session_key = None
self.ssl_verify = ssl_verify
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
tree = etree.XML(xml)
if tree.findtext(".//PROPERTY[@name='response-type']") == "success":
self._session_key = tree.findtext(".//PROPERTY[@name='response']")
def login(self):
"""Authenticates the service on the device."""
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify)
except requests.exceptions.RequestException:
raise exception.DotHillConnectionError
self._get_auth_token(xml.text.encode('utf8'))
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0
or if the return code is None.
"""
# Get the return code for the operation, raising an exception
# if it is not present.
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if not return_code:
raise exception.DotHillRequestError(message="No status found")
# If no error occurred, just return.
if return_code == '0':
return
# Format a message for the status code.
msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"),
return_code)
raise exception.DotHillRequestError(message=msg)
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an HTTP request on the device.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
LOG.debug("DotHill Request URL: %s", url)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers, verify=self.ssl_verify)
tree = etree.XML(xml.text.encode('utf8'))
except Exception:
raise exception.DotHillConnectionError
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify)
return True
except Exception:
return False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
self._request("/create/volume", name, **path_dict)
return None
def delete_volume(self, name):
self._request("/delete/volumes", name)
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
self._request("/create/snapshots", snap_name, volumes=volume_name)
def delete_snapshot(self, snap_name):
self._request("/delete/snapshot", "cleanup", snap_name)
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (10 ** 9)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
|
for prop in obj.iter("PROPERTY")
|
if prop.get('name') in
|
tensorflow/probability
|
tensorflow_probability/python/distributions/moyal_test.py
|
Python
|
apache-2.0
| 10,137
| 0.002861
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Moyal."""
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
class _MoyalTest(object):
def make_tensor(self, x):
x = tf.cast(x, self.dtype)
return tf1.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
def testMoyalShape(self):
loc = np.array([3.0] * 5, dtype=self.dtype)
scale = np.array([3.0] * 5, dtype=self.dtype)
moyal = tfd.Moyal(loc=loc, scale=scale, validate_args=True)
self.assertEqual((5,), self.evaluate(moyal.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([5]), moyal.batch_shape)
self.assertAllEqual([], self.evaluate(moyal.event_shape_tensor()))
self.assertEqual(tf.TensorShape([]), moyal.event_shape)
def testInvalidScale(self):
scale = [-.01, 0., 2.]
with self.assertRaisesOpError('Argument `scale` must be positive.'):
moyal = tfd.Moyal(loc=0., scale=scale, validate_args=True)
self.evaluate(moyal.mean())
scale = tf.Variable([.01])
self.evaluate(scale.initializer)
moyal = tfd.Moyal(loc=0., scale=scale, validate_args=True)
self.assertIs(scale, moyal.scale)
self.evaluate(moyal.mean())
with tf.control_dependencies([scale.assign([-.01])]):
with self.assertRaisesOpError('Argument `scale` must be positive.'):
self.evaluate(moyal.mean())
def testMoyalLogPdf(self):
batch_size = 6
loc = np.array([0.] * batch_size, dtype=self.dtype)
scale = np.array([3.] * batch_size, dtype=self.dtype)
x = np.array([2., 3., 4., 5., 6., 7.], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_pdf = moyal.log_prob(self.make_tensor(x))
self.assertAllClose(
stats.moyal.logpdf(x, loc=loc, scale=scale),
self.evaluate(log_pdf))
pdf = moyal.prob(x)
self.assertAllClose(
stats.moyal.pdf(x, loc=loc, scale=scale), self.evaluate(pdf))
def testMoyalLogPdfMultidimensional(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=self.dtype).T
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_pdf = moyal.log_prob(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_pdf), stats.moyal.logpd
|
f(x, loc=loc, scale=scale))
pdf = moyal.prob(self.
|
make_tensor(x))
self.assertAllClose(
self.evaluate(pdf), stats.moyal.pdf(x, loc=loc, scale=scale))
def testMoyalCDF(self):
batch_size = 6
loc = np.array([0.] * batch_size, dtype=self.dtype)
scale = np.array([3.] * batch_size, dtype=self.dtype)
x = np.array([2., 3., 4., 5., 6., 7.], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_cdf = moyal.log_cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_cdf), stats.moyal.logcdf(x, loc=loc, scale=scale))
cdf = moyal.cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(cdf), stats.moyal.cdf(x, loc=loc, scale=scale))
def testMoyalCdfMultidimensional(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=self.dtype).T
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
log_cdf = moyal.log_cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(log_cdf),
stats.moyal.logcdf(x, loc=loc, scale=scale))
cdf = moyal.cdf(self.make_tensor(x))
self.assertAllClose(
self.evaluate(cdf),
stats.moyal.cdf(x, loc=loc, scale=scale))
def testMoyalMean(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.mean()),
stats.moyal.mean(loc=loc, scale=scale))
def testMoyalVariance(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.variance()),
stats.moyal.var(loc=loc, scale=scale))
def testMoyalStd(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.stddev()),
stats.moyal.std(loc=loc, scale=scale))
def testMoyalMode(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0], dtype=self.dtype)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
self.assertAllClose(self.evaluate(moyal.mode()), self.evaluate(moyal.loc))
def testMoyalSample(self):
loc = self.dtype(4.0)
scale = self.dtype(1.0)
n = int(3e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
stats.moyal.mean(loc=loc, scale=scale),
sample_values.mean(), rtol=.01)
self.assertAllClose(
stats.moyal.var(loc=loc, scale=scale),
sample_values.var(), rtol=.01)
def testMoyalSampleMultidimensionalMean(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0, 0.8, 0.5], dtype=self.dtype)
n = int(2e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# TODO(b/157561663): Remove the masking once tf.math.special.erfcinv exists.
sample_values = np.ma.masked_invalid(sample_values)
self.assertAllClose(
stats.moyal.mean(loc=loc, scale=scale),
sample_values.mean(axis=0),
rtol=.03,
atol=0)
def testMoyalSampleMultidimensionalVar(self):
batch_size = 6
loc = np.array([[2.0, 4.0, 5.0]] * batch_size, dtype=self.dtype)
scale = np.array([1.0, 0.8, 0.5], dtype=self.dtype)
n = int(1e5)
moyal = tfd.Moyal(
loc=self.make_tensor(loc),
scale=self.make_tensor(scale),
validate_args=True)
samples = moyal.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
# TODO(b/157561663): R
|
martynbristow/gabbi-examples
|
test_server.py
|
Python
|
mit
| 367
| 0.016349
|
""" Unit
|
Tests for the SimpleHTTPServer
"""
import mock
import unittest
class TestHTTPServerHandler(unittest.TestCase):
"""
"""
def setUp(self):
self.handler = mock.Mock()
def test_do_GET(self):
pass
def test_do_POST(self):
pass
def tearDown(self):
self.handler()
if __name__ == "__main__":
unittest.mai
|
n()
|
krishnakantkumar0/Simple-Python
|
13.py
|
Python
|
gpl-3.0
| 147
| 0.040816
|
#a=[int(x) for x in input().split()]
#print (a)
x=5
|
y=10
b=[int(y) for y in input().split()]
#a=[i
|
nt(x) for x in input().split()]
dir(_builtins_)
|
JaeGyu/PythonEx_1
|
20170106.py
|
Python
|
mit
| 455
| 0.004728
|
a = "python"
print(a*2)
try:
p
|
rint(a[-10])
except IndexError as e:
print("์ธ๋ฑ์ค ๋ฒ์๋ฅผ ์ด๊ณผ ํ์ต๋๋ค.")
print(e)
print(a[0:4])
print(a[1:-2])
# -10์ hi๋ค๋ก 10์นธ
print("%-10sjane." % "hi")
b = "Python is best choice."
print(b.find("b"))
print(b.find("B"))
try:
print(b.index("B"))
except ValueError as e:
print(e)
c = "hi"
print(c.upper())
a = " hi"
print("kk",a.lstrip())
a = " hi "
print
|
(a.strip())
|
SmartElect/SmartElect
|
rollgen/generate_pdf.py
|
Python
|
apache-2.0
| 5,841
| 0.003938
|
# 3rd party imports
from reportlab.platypus import Image, Paragraph, PageBreak, Table, Spacer
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
# Django imports
from django.conf import settings
# Project imports
from .arabic_reshaper import reshape
from .pdf_canvas import NumberedCanvas, getArabicStyle, getHeaderStyle, getTableStyle, \
get_hnec_logo_fname, drawHnecLogo
from .strings import STRINGS
from .utils import chunker, format_name, CountingDocTemplate, build_copy_info, \
truncate_center_name, out_of_disk_space_handler_context
from libya_elections.constants import MALE, FEMALE
def generate_pdf(filename, center, voter_roll, gender, center_book=False):
# filename: the file to which the PDF will be written
# center: a data_pull.Center instance
# voter_roll: list of registration dicts --
# {national_id, first_name, father_name, grandfather_name, family_name, gender}
# gender: one of the MALE/FEMALE constants. UNISEX is not valid.
# center_book: ???
#
# separates by gender code using one of the constants in utils.Gender
# sorts by name fields in query
# assembles display string from parts
# writes to filename
#
# returns number of pages in the PDF
if gender not in (MALE, FEMALE):
raise ValueError("generate_pdf() gender must be MALE or FEMALE")
# set styles
styles = getArabicStyle()
# get strings
mf_string = STRINGS['female'] if (gender == FEMALE) else STRINGS['male']
cover_string = STRINGS['center_book_cover'] if center_book else STRINGS['center_list_cover']
header_string = STRINGS['center_book_header'] if center_book else STRINGS['center_list_header']
# cover page
center_name = reshape(center.name)
template = '%s: %s / %s'
subconstituency_name = reshape(center.subconstituency.name_arabic)
params = (STRINGS['subconstituency_name'], center.subconstituency.id, subconstituency_name)
subconstituency = template % params
center_info = {
'gender': '%s: %s' % (STRINGS['gender'], mf_string),
'number': '%s: %d' % (STRINGS['center_number'], center.center_id),
'name': '%s: %s' % (STRINGS['center_name'], center_name),
'name_trunc': '%s: %s' % (STRINGS['center_name'], truncate_center_name(center_name)),
'subconstituency': subconstituency,
'copy_info': build_copy_info(center),
}
# create document
doc = CountingDocTemplate(filename, pagesize=A4, topMargin=1 * cm, bottomMargin=1 * cm,
leftMargin=1.5 * cm, rightMargin=2.54 * cm)
# elements, cover page first
with open(get_hnec_logo_fname(), 'rb') as hnec_f:
elements = [
Image(hnec_f, width=10 * cm, height=2.55 * cm),
Spacer(48, 48),
Paragraph(cover_string, styles['Title']),
Spacer(18, 18),
Paragraph(center_info['gender'], styles['CoverInfo-Bold']),
Paragraph(center_info['number'], styles['CoverInfo']),
Paragraph(center_info['name'], styles['CoverInfo']),
Paragraph(center_info['copy_info'], styles['CoverInfo']),
Paragraph(center_info['subconstituency'], styles['CoverInfo']),
|
PageBreak(),
]
# Focus on one specific gender.
voter_roll = [voter for voter in voter_roll if voter.gender == gender]
# We wrap the page header in a table becau
|
se we want the header's gray background to extend
# margin-to-margin and that's easy to do with a table + background color. It's probably
# possible with Paragraphs alone, but I'm too lazy^w busy to figure out how.
# It's necessary to wrap the table cell text in Paragraphs to ensure the base text direction
# is RTL. See https://github.com/hnec-vr/libya-elections/issues/1197
para_prefix = Paragraph(STRINGS['center_header_prefix'], styles['InnerPageHeader'])
para_header = Paragraph(header_string, styles['InnerPageHeader'])
page_header = Table([[para_prefix], [para_header]], 15 * cm, [16, 24])
page_header.setStyle(getHeaderStyle())
n_pages = 0
for page in chunker(voter_roll, settings.ROLLGEN_REGISTRATIONS_PER_PAGE_REGISTRATION):
n_pages += 1
elements.append(page_header)
elements += [Paragraph(center_info['gender'], styles['CenterInfo-Bold']),
Paragraph(center_info['number'], styles['CenterInfo']),
Paragraph(center_info['name_trunc'], styles['CenterInfo']),
]
elements.append(Spacer(10, 10))
# The contents of each table cell are wrapped in a Paragraph to set the base text
# direction.
# See https://github.com/hnec-vr/libya-elections/issues/1197
data = [[Paragraph(reshape(format_name(voter)), styles['TableCell'])] for voter in page]
# Insert header before the data.
data.insert(0, [Paragraph(STRINGS['the_names'], styles['TableCell'])])
table = Table(data, 15 * cm, 0.825 * cm)
table.setStyle(getTableStyle())
elements.append(table)
elements.append(Paragraph(mf_string, styles['PageBottom']))
elements.append(PageBreak())
if not n_pages:
# When there are no pages (==> no registrants for this gender), we need to emit a page
# that states that.
elements.append(page_header)
key = 'no_male_registrants' if gender == MALE else 'no_female_registrants'
elements.append(Paragraph(STRINGS[key], styles['BlankPageNotice']))
with out_of_disk_space_handler_context():
doc.build(elements, canvasmaker=NumberedCanvas, onLaterPages=drawHnecLogo)
return doc.n_pages
|
Geosyntec/python-tidegates
|
tidegates/__init__.py
|
Python
|
bsd-3-clause
| 67
| 0
|
from .analysis import *
f
|
rom .toolbox import *
from . import
|
utils
|
pantsbuild/pex
|
pex/vendor/_vendored/setuptools/setuptools/archive_util.py
|
Python
|
apache-2.0
| 6,730
| 0.000594
|
"""Utilities for extracting common archive formats"""
import zipfile
import tarfile
import os
import shutil
import posixpath
import contextlib
from distutils.errors import DistutilsError
if "__PEX_UNVENDORED__" in __import__("os").environ:
from pkg_resources import ensure_directory # vendor:skip
else:
from pex.third_party.pkg_resources import ensure_directory
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src, dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(filename, extract_dir, progress_filter=default_filter,
drivers=None):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % filename)
paths = {
filename: ('', extract_dir),
}
for base, dirs, files in os.walk(filename):
src, dst = paths[base]
for d in dirs:
paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
for f in files:
target = os.path.join(dst, f)
target = progress_filter(src + f, target)
if not target:
# skip non-files
continue
ensure_directory(target)
f = os.path.join(base, f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
with zipfile.ZipFile(filename) as z:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.f
|
ilename)
with open(target, 'wb') as f:
f.write(data)
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename`
|
to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
with contextlib.closing(tarobj):
# don't do any chowning!
tarobj.chown = lambda *args: None
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/'))
# resolve any links and to extract the link targets as normal
# files
while member is not None and (member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
base = posixpath.dirname(member.name)
linkpath = posixpath.join(base, linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
# XXX Ugh
tarobj._extract_member(member, final_dst)
except tarfile.ExtractError:
# chown/chmod/mkfifo/mknode/makedev failed
pass
return True
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
|
python-astrodynamics/astrodynamics
|
tests/test_constants.py
|
Python
|
mit
| 2,079
| 0.000962
|
# coding: utf-8
# These tests are taken from astropy, as with the astrodynamics.constant.Constant
# class. It retains the original license (see licenses/ASTROPY_LICENSE.txt)
from __future__ import absolute_import, division, print_function
import copy
import astropy.units as u
from astropy.units import Quantity
import astrodynamics.constants as const
from astrodynamics.constants import J2, Constant
def test_units():
"""Confirm that none of the constants defined in astrodynamics have invalid
units.
"""
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
copied = copy.deepcopy(J2)
assert copied == J2
copied = copy.copy(J2)
assert copied == J2
def test_view():
"""Check that Constant and Quantity views can be taken."""
x = J2
x2 = x.view(Constant)
assert x2 == x
assert x2.value == x.value
# make sure it has the necessary attributes and they're not blank
assert x2.uncertainty
assert x2.name == x.name
assert x2.reference == x.reference
assert x2.unit == x.unit
q1 = x.view(Quantity)
assert q1 == x
assert q1.value == x.value
assert type(q1) is Quantity
assert not hasattr(q1, 'reference')
q2 = Quantity(x)
assert q2 == x
assert q
|
2.value == x.value
assert type(q2) is Quantity
assert not hasattr(q2, 'reference')
x3 = Quantity(x, subok=True)
assert x3 == x
assert x3.value == x.
|
value
# make sure it has the necessary attributes and they're not blank
assert x3.uncertainty
assert x3.name == x.name
assert x3.reference == x.reference
assert x3.unit == x.unit
x4 = Quantity(x, subok=True, copy=False)
assert x4 is x
def test_repr():
a = Constant('the name', value=1, unit='m2', uncertainty=0.1, reference='me')
s = ("Constant(name='the name', value=1, unit='m2', uncertainty=0.1, "
"reference='me')")
assert repr(a) == s
|
rsignell-usgs/notebook
|
CSW/data.ioos.us-pycsw.py
|
Python
|
mit
| 2,570
| 0.01323
|
# coding: utf-8
# # Query `apiso:ServiceType`
# In[43]:
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import numpy as np
# The GetCaps request for these services looks
|
like this:
# http://catalog.data.gov/csw-all/csw?SERVICE=CSW&VERSION=2.0.2&REQUEST=GetCapabilities
# In[56]:
endpoint = 'http://data.ioos.us/csw' # FAILS apiso:ServiceType
#endpoint = 'http://catalog.data.gov/csw-a
|
ll' # FAILS apiso:ServiceType
#endpoint = 'http://geoport.whoi.edu/csw' # SUCCEEDS apiso:ServiceType
csw = CatalogueServiceWeb(endpoint,timeout=60)
print csw.version
# In[57]:
csw.get_operation_by_name('GetRecords').constraints
# Search first for records containing the text "COAWST" and "experimental".
# In[45]:
val = 'coawst'
filter1 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [ filter1 ]
# In[46]:
val = 'experimental'
filter2 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2])]
# In[47]:
csw.getrecords2(constraints=filter_list,maxrecords=100,esn='full')
print len(csw.records.keys())
for rec in list(csw.records.keys()):
print csw.records[rec].title
# Now let's print out the references (service endpoints) to see what types of services are available
# In[48]:
choice=np.random.choice(list(csw.records.keys()))
print(csw.records[choice].title)
csw.records[choice].references
# In[49]:
csw.records[choice].xml
# We see that the `OPeNDAP` service is available, so let's see if we can add that to the query, returning only datasets that have text "COAWST" and "experimental" and that have an "opendap" service available.
#
# We should get the same number of records, as all COAWST records have OPeNDAP service endpoints. If we get no records, something is wrong with the CSW server.
# In[50]:
val = 'OPeNDAP'
filter3 = fes.PropertyIsLike(propertyname='apiso:ServiceType',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2, filter3])]
csw.getrecords2(constraints=filter_list, maxrecords=1000)
# In[51]:
print(len(csw.records.keys()))
for rec in list(csw.records.keys()):
print('title:'+csw.records[rec].title)
print('identifier:'+csw.records[rec].identifier)
print('modified:'+csw.records[rec].modified)
print(' ')
# In[53]:
print(csw.request)
# In[ ]:
|
chipx86/reviewboard
|
reviewboard/webapi/tests/test_remote_repository.py
|
Python
|
mit
| 5,858
| 0
|
from __future__ import unicode_literals
import json
from django.utils import six
from kgb import SpyAgency
from reviewboard.hostingsvcs.github import GitHub
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.repository import RemoteRepository
from reviewboard.hostingsvcs.utils.paginator import APIPaginator
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
remote_repository_item_mimetype,
remote_repository_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_remote_repository_item_url,
get_remote_repository_list_url)
def _compare_item(self, item_rsp, remote_repository):
self.assertEqual(item_rsp['id'], remote_repository.id)
self.assertEqual(item_rsp['name'], remote_repository.name)
self.assertEqual(item_rsp['owner'], remote_repository.owner)
self.assertEqual(item_rsp['scm_type'], remote_repository.scm_type)
self.assertEqual(item_rsp['path'], remote_repository.path)
self.assertEqual(item_rsp['mirror_path'], remote_repository.mirror_path)
class RemoteRepositoryTestPaginator(APIPaginator):
def __init__(self, results):
self.results = results
super(RemoteRepositoryTestPaginator, self).__init__(client=None,
url='')
def fetch_url(self, url):
return {
'data': self.results,
}
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(SpyAgency, BaseWebAPITestCase):
"""Testing the RemoteRepositoryResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'hosting-service-accounts/<id>/remote-repositories/'
resource = resources.remote_repository
basic_get_use_admin = True
compare_item = _compare_item
def setup_http_not_allowed_list_test(self, user):
account = HostingServiceAccount.objects.create(service_name='github',
username='bob')
return get_remote_repository_list_url(account)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
account = HostingServiceAccount.objects.create(
service_name='github',
username='bob',
local_site=self.get_local_site_or_none(name=local_site_name),
data=json.dumps({
'authorization': {
'token': '123',
},
}))
service = account.service
remote_repositories = [
RemoteRepository(service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1',
mirror_path='https://example.com/repo1'),
RemoteRepository(service,
repository_id='456',
name='repo2',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo2',
mirror_path='https://example.com/repo2'),
]
paginator = RemoteRepositoryTestPaginator(remote_repositories)
self.spy_on(GitHub.get_remote_repositories,
owner=GitHub,
call_fake=lambda *args, **kwargs: paginator)
return (get_remote_repository_list_url(account, local_site_name),
remote_repository_list_mimetype,
remote_repositories)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(SpyAgency, BaseWebAPITestCase):
"""Testing the RemoteRepositoryResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'hosting-service-accounts/<id>/remote-repositories/<id>/'
resource = resources.remote_repository
basic_get_use_admin = True
compare_item = _compare_item
def setup_http_not_allowed_item_test(self, user):
account = HostingServiceAccount.objects.create(service_name='github',
username='bob')
remote_repository = RemoteRepository(
account.service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
path='ssh://example.com/repo1')
return get_remote_repository_item_url(remote_repository)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
account = HostingServiceAccount.objects.create(
service_name='github',
username='bob',
local_site=self.get_local_site_or_none(name=local_sit
|
e_name),
data=json.dumps({
'authorization': {
'token': '123',
},
}))
remote_repository = RemoteRepository(
account.service,
repository_id='123',
name='repo1',
owner='bob',
scm_type='Git',
|
path='ssh://example.com/repo1',
mirror_path='https://example.com/repo1')
self.spy_on(GitHub.get_remote_repository,
owner=GitHub,
call_fake=lambda *args, **kwargs: remote_repository)
return (get_remote_repository_item_url(remote_repository,
local_site_name),
remote_repository_item_mimetype,
remote_repository)
|
betrisey/home-assistant
|
homeassistant/components/media_player/sonos.py
|
Python
|
mit
| 20,355
| 0
|
"""
Support to interface with Sonos players (via SoCo).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.sonos/
"""
import datetime
import logging
from os import path
import socket
import urllib
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_CLEAR_PLAYLIST,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, STATE_OFF,
ATTR_ENTITY_ID)
from homeassistant.config import load_yaml_config_file
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['SoCo==0.12']
_LOGGER = logging.getLogger(__name__)
# The soco library is excessively chatty when it comes to logging and
# causes a LOT of spam in the logs due to making a http connection to each
# speaker every 10 seconds. Quiet it down a bit to just actual problems.
_SOCO_LOGGER = logging.getLogger('soco')
_SOCO_LOGGER.setLevel(logging.ERROR)
_REQUESTS_LOGGER = logging.getLogger('requests')
_REQUESTS_LOGGER.setLevel(logging.ERROR)
SUPPORT_SONOS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA |\
SUPPORT_SEEK | SUPPORT_CLEAR_PLAYLIST | SUPPORT_SELECT_SOURCE
SERVICE_GROUP_PLAYERS = 'sonos_group_players'
SERVICE_UNJOIN = 'sonos_unjoin'
SERVICE_SNAPSHOT = 'sonos_snapshot'
SERVICE_RESTORE = 'sonos_restore'
SERVICE_SET_TIMER = 'sonos_set_sleep_timer'
SERVICE_CLEAR_TIMER = 'sonos_clear_sleep_timer'
SUPPORT_SOURCE_LINEIN = 'Line-in'
SUPPORT_SOURCE_TV = 'TV'
# Service call validation schemas
ATTR_SLEEP_TIME = 'sleep_time'
SONOS_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
SONOS_SET_TIMER_SCHEMA = SONOS_SCHEMA.extend({
vol.Required(ATTR_SLEEP_TIME): vol.All(vol.Coerce(int),
vol.Range(min=0, max=86399))
})
# List of devices that have been registered
DEVICES = []
# pylint: disable=unused-argument, too-many-locals
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Sonos platform."""
import soco
global DEVICES
if discovery_info:
player = soco.SoCo(discovery_info)
# if device allready exists by config
if player.uid in DEVICES:
return True
if player.is_visible:
device = SonosDevice(hass, player)
add_devices([device])
if not DEVICES:
register_services(hass)
DEVICES.append(device)
return True
return False
players = None
hosts = config.get('hosts', None)
if hosts:
# Support retro compatibility with comma separated list of hosts
# from config
hosts = hosts.split(',') if isinstance(hosts, str) else hosts
players = []
for host in hosts:
players.append(soco.SoCo(socket.gethostbyname(host)))
if not players:
players = soco.discover(interface_addr=config.get('interface_addr',
None))
if not players:
_LOGGER.warning('No Sonos speakers found.')
return False
DEVICES = [SonosDevice(hass, p) for p in players]
add_devices(DEVICES)
register_services(hass)
_LOGGER.info('Added %s Sonos speakers', len(players))
return True
def register_services(hass):
"""Register all services for sonos devices."""
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_GROUP_PLAYERS,
_group_players_service,
descriptions.get(SERVICE_GROUP_PLAYERS),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_UNJOIN,
_unjoin_service,
descriptions.get(SERVICE_UNJOIN),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SNAPSHOT,
_snapshot_service,
descriptions.get(SERVICE_SNAPSHOT),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_RESTORE,
_restore_service,
descriptions.get(SERVICE_RESTORE),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_TIMER,
_set_sleep_timer_service,
descriptions.get(SERVICE_SET_TIMER),
schema=SONOS_SET_TIMER_SCHEMA)
hass.services.register(DOMAIN, SERVICE_CLEAR_TIMER,
_clear_sleep_timer_service,
descriptions.get(SERVICE_CLEAR_TIMER),
schema=SONOS_SCHEMA)
def _apply_service(service, service_func, *service_func_args):
"""Internal func for applying a service."""
entity_ids = service.data.get('entity_id')
if entity_ids:
_devices = [device for device in DEVICES
if device.entity_id in entity_ids]
else:
_devices = DEVICES
for device in _devices:
service_func(device, *service_func_args)
device.update_ha_state(True)
def _group_players_service(service):
"""Group media players, use player as coordinator."""
_apply_service(service, SonosDevice.group_players)
def _unjoin_service(service):
"""Unjoin the player from a group."""
_apply_service(service, SonosDevice.unjoin)
def _snapshot_service(service):
"""Take a snapshot."""
_apply_service(service, SonosDevice.snapshot)
def _restore_service(service):
"""Restore a snapshot."""
_apply_service(service, SonosDevice.restore)
def _set_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.set_sleep_timer,
service.data[ATTR_SLEEP_TIME])
def _clear_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.clear_sleep_timer)
def only_if_coordinator(func):
"""Decorator for coordinator.
If used as dec
|
orator, avoid calling the decorated meth
|
od if player is not
a coordinator. If not, a grouped speaker (not in coordinator role) will
throw soco.exceptions.SoCoSlaveException.
Also, partially catch exceptions like:
soco.exceptions.SoCoUPnPException: UPnP Error 701 received:
Transition not available from <player ip address>
"""
def wrapper(*args, **kwargs):
"""Decorator wrapper."""
if args[0].is_coordinator:
from soco.exceptions import SoCoUPnPException
try:
func(*args, **kwargs)
except SoCoUPnPException:
_LOGGER.error('command "%s" for Sonos device "%s" '
'not available in this mode',
func.__name__, args[0].name)
else:
_LOGGER.debug('Ignore command "%s" for Sonos device "%s" (%s)',
func.__name__, args[0].name, 'not coordinator')
return wrapper
# pylint: disable=too-many-instance-attributes, too-many-public-methods
# pylint: disable=abstract-method
class SonosDevice(MediaPlayerDevice):
"""Representation of a Sonos device."""
# pylint: disable=too-many-arguments
def __init__(self, hass, player):
"""Initialize the Sonos device."""
from soco.snapshot import Snapshot
self.hass = hass
self.volume_increment = 5
self._player = player
self._speaker_info = None
self._name = None
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
|
thumbor/thumbor
|
tests/handlers/test_base_handler_with_auto_webp.py
|
Python
|
mit
| 7,701
| 0.00013
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from shutil import which
from unittest.mock import patch
from urllib.parse import quote
from libthumbor import CryptoURL
from preggy import expect
from tornado.testing import gen_test
from tests.handlers.test_base_handler import BaseImagingTestCase
from thumbor.config import Config
from thumbor.context import Context, RequestParameters, ServerParameters
from thumbor.importer import Importer
# pylint: disable=broad-except,abstract-method,attribute-defined-outside-init,line-too-long,too-many-public-methods
# pylint: disable=too-many-lines
class ImageOperationsWithAutoWebPTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which("gifsicle")
return ctx
async def get_as_webp(self, url):
return await self.async_fetch(
url, headers={"Accept": "image/webp,*/*;q=0.8"}
)
@gen_test
async def test_can_auto_convert_jpeg(self):
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_not_convert_an
|
imated_gifs_to_webp(self):
response = await self.get_as_webp("/unsafe/animated.gif")
expect(response.code).to_e
|
qual(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_should_convert_image_with_small_width_and_no_height(self):
response = await self.get_as_webp("/unsafe/0x0:1681x596/1x/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_convert_monochromatic_jpeg(self):
response = await self.get_as_webp("/unsafe/grayscale.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_should_convert_cmyk_jpeg(self):
response = await self.get_as_webp("/unsafe/cmyk.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_shouldnt_convert_cmyk_jpeg_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(png)/cmyk.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_png()
@gen_test
async def test_shouldnt_convert_cmyk_jpeg_if_gif(self):
response = await self.get_as_webp(
"/unsafe/filters:format(gif)/cmyk.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_shouldnt_convert_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(gif)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_gif()
@gen_test
async def test_shouldnt_add_vary_if_format_specified(self):
response = await self.get_as_webp(
"/unsafe/filters:format(webp)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).not_to_include("Vary")
expect(response.body).to_be_webp()
@gen_test
async def test_should_add_vary_if_format_invalid(self):
response = await self.get_as_webp(
"/unsafe/filters:format(asdf)/image.jpg"
)
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@gen_test
async def test_converting_return_etags(self):
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.headers).to_include("Etag")
class ImageOperationsWithAutoWebPWithResultStorageTestCase(
BaseImagingTestCase
):
def get_request(self, *args, **kwargs):
return RequestParameters(*args, **kwargs)
def get_context(self):
cfg = Config(SECURITY_KEY="ACME-SEC")
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = "thumbor.result_storages.file_storage"
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(
8889, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
ctx = Context(server, cfg, importer)
ctx.request = self.get_request()
ctx.server.gifsicle_path = which("gifsicle")
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
async def get_as_webp(self, url):
return await self.async_fetch(
url, headers={"Accept": "image/webp,*/*;q=0.8"}
)
@patch("thumbor.handlers.Context")
@gen_test
async def test_can_auto_convert_jpeg_from_result_storage(
self, context_mock
): # NOQA
context_mock.return_value = self.context
crypto = CryptoURL("ACME-SEC")
url = crypto.generate(
image_url=quote("http://test.com/smart/image.jpg")
)
self.context.request = self.get_request(url=url, accepts_webp=True)
with open("./tests/fixtures/images/image.webp", "rb") as fixture:
await self.context.modules.result_storage.put(fixture.read())
response = await self.get_as_webp(url)
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
@patch("thumbor.handlers.Context")
@gen_test
async def test_can_auto_convert_unsafe_jpeg_from_result_storage(
self, context_mock
):
context_mock.return_value = self.context
self.context.request = self.get_request(accepts_webp=True)
response = await self.get_as_webp("/unsafe/image.jpg")
expect(response.code).to_equal(200)
expect(response.headers).to_include("Vary")
expect(response.headers["Vary"]).to_include("Accept")
expect(response.body).to_be_webp()
|
giosalv/526-aladdin
|
MachSuite/script/llvm_compile.py
|
Python
|
apache-2.0
| 3,257
| 0.023641
|
#!/usr/bin/env python
import os
import sys
import string
import random
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
kernels = {
'aes-aes' : 'gf_alog,gf_log,gf_mulinv,rj_sbox,rj_xtime,aes_subBytes,aes_addRoundKey,aes_addRoundKey_cpy,aes_shiftRows,aes_mixColumns,aes_expandEncKey,aes256_encrypt_ecb',
'backprop-backprop':'sigmoid,update_layer,update,propagate_error_out,propagate_error_layer,update_weights,propagate_errors,comp_error,backprop',
'bfs-bulk' : 'bfs',
'bfs-queue' : 'bfs',
'kmp-kmp' : 'CPF,kmp',
'fft-strided' : 'fft',
'fft-transpose':'twiddles8,loadx8,loady8,fft1D_512',
'gemm-blocked': 'bbgemm',
'gemm-ncubed' : 'gemm',
'md-grid':'md',
'md-knn':'md_kernel',
'nw-nw' : 'needwun',
'sort-merge' : 'merge,mergesort',
'sort-radix' : 'local_scan,sum_scan,last_step_scan,init,hist,update,ss_sort',
'spmv-crs' : 'spmv',
'spmv-ellpack' : 'ellpack',
'stencil-stencil2d' : 'stencil',
'stencil-stencil3d' : 'stencil3d',
'viterbi-viterbi' : 'viterbi',
}
def main (directory, bench, source):
if not 'TRACER_HOME' in os.environ:
raise Exception('Set TRACER_HOME directory as an environment variable')
if not 'MACH_HOME' in os.environ:
raise Exception('Set MACH_HOME directory as an environment variable')
#id = id_generator()
os.chdir(directory)
obj = source + '.llvm'
opt_obj = source + '-opt.llvm'
executable = source + '-instrumented'
os.environ['WORKLOAD']=kernels[bench]
test = os.getenv('MACH_HOME')+'/common/harness.c'
test_obj = source + '_test.llvm'
source_file = source + '.c'
#for key in os.environ.keys():
# print "%30s %s" % (key,os.environ[key])
print directory
print '======================================================================'
command = 'clang -g -O1 -S -I' + os.environ['ALADDIN_HOME'] + \
' -fno-slp-vectorize -fno-vectorize -fno-unroll-loops ' + \
' -fno-inline -fno-builtin -emit-llvm -o ' + obj + ' ' + source_file
print command
os.system(command)
command = 'clang -g -O1 -S -I' + os.environ['ALADDIN_HOME'] + \
|
' -fno-slp-vectorize -fno-vectorize -fno-unroll-loops ' + \
' -fno-inline -fno-builtin -emit-llvm -o ' + test_obj + ' ' + test
print command
os.system(command)
command = 'opt -S -load=' + os.getenv('TRACER_HOME') + \
'/full-trace/full_trace.so -fulltrace ' + obj + ' -o ' + opt_obj
print command
os.system(command)
command = 'llvm-link -o full.llvm ' + opt_obj + ' ' + test_obj + ' ' + \
os.getenv('TRACER_HOME') + '/profile-func/trace_logger.llvm'
print command
os.system(command)
command = 'llc -O0 -disable-fp-elim -filetype=asm -o full.s full.llvm'
print command
os.system(command)
command = 'gcc -O0 -fno-inline -o ' + executable + ' full.s -lm -lz'
print command
os.system(command)
command = './' + executable + ' input.data check.data'
print command
os.system(command)
print '======================================================================'
if __name__ == '__main__':
directory = sys.argv[1]
bench = sys.argv[2]
source = sys.argv[3]
print directory, bench, source
main(directory, bench, source)
|
|
afaquejam/Linked-List-Problems
|
Others/FrontBackSplit.py
|
Python
|
mit
| 1,330
| 0.003008
|
import Linked_List
import sys
import random
def split_list(lst, a, b):
if lst.length % 2 == 1:
first_length = (lst.length / 2) + 1
else:
first_length = lst.length / 2
list_iterator = lst.head
count = 0
while count < first_length:
a.append(list_iterator.data)
list_iterator = list_iterator.next
count += 1
while list_iterator != None:
b.append(list_iterator.data)
list_iterator = list_iterator.next
lst =
|
Linked_List.LinkedList()
for iterator in range(0, int(sys.argv[1])):
lst.push(random.randint(1, 101))
print "\nOriginal List:"
lst.print_list()
a = Linked_List.LinkedList()
b = Linked_List.LinkedList()
split_list(lst, a
|
, b)
print "\nSplitted List A:"
a.print_list()
print "\nSplitted List B:"
b.print_list()
# Performance
# ------------
#
# * Speed
# The algorithm traverses the original list once and constructs
# both the list. The list construction operation (append) can be implemented with
# O(1) complexity. In a nutshell, the time complexity of this algorithm is
# O(N).
#
# Ideal time complexity for this algorithm?
# O(1). It's all about changing the pointers. However, the limiting factor is
# traversing the list, which is a linear operation.
#
# * Memory
# 2N. Where N is the memory required to store the original list.
|
MeGotsThis/BotGotsThis
|
lib/helper/textformat.py
|
Python
|
gpl-3.0
| 11,881
| 0
|
๏ปฟimport re
from typing import Callable, Dict, List # noqa: F401
FormatText = Callable[[str], str]
ascii: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
r'[\]^_`'
'abcdefghijklmnopqrstuvwxyz'
'{|}~')
upper: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
r'[\]^_`'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'{|}~')
lower: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'abcdefghijklmnopqrstuvwxyz'
r'[\]^_`'
'abcdefghijklmnopqrstuvwxyz'
'{|}~')
full: str = ('''ใ๏ผ๏ผ๏ผ๏ผ๏ผ
๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผใผ๏ผ๏ผ'''
'๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ'
'๏ผ๏ผใ๏ผใ๏ผ๏ผ '
'๏ผก๏ผข๏ผฃ๏ผค๏ผฅ๏ผฆ๏ผง๏ผจ๏ผฉ๏ผช๏ผซ๏ผฌ๏ผญ๏ผฎ๏ผฏ๏ผฐ๏ผฑ๏ผฒ๏ผณ๏ผด๏ผต๏ผถ๏ผท๏ผธ๏ผน๏ผบ'
'๏ผป๏ผผ๏ผฝ๏ผพ๏ผฟ๏ฝ'
'๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ
๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ๏ฝ'
'๏ฝ๏ฝ๏ฝ๏ฝ')
parenthesized: str = (''' !"#$%&'()*+,-./'''
'0โดโตโถโทโธโนโบโปโผ'
':;<=>?@'
'โโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโต'
r'[\]^_`'
'โโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโต'
'{|}~')
circled: str = (''' !"#$%&'()*+,-./'''
'โชโ โกโขโฃโคโฅโฆโงโจ'
':;<=>?@'
'โถโทโธโนโบโปโผโฝโพโฟโโโโโโ
โโโโโโโโโโ'
'[\\]^_`'
'โโโโโโโโโโโโโโโโโ โกโขโฃโคโฅโฆโงโจโฉ'
'{|}~')
smallcaps: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'แดสแดแด
แด๊ฐษขสษชแดแดสแดษดแดแดฉQสsแดแดแด แดกxYแดข'
r'[\]^_`'
'แดสแดแด
แด๊ฐษขสษชแดแดสแดษดแดแดฉqสsแดแดแด แดกxyแดข'
'{|}~')
upsidedown: str = (''' ยก"#$%โ
,()*+โ-./'''
'0123456789'
':;<=>ยฟ@'
'ษqษpวษฦษฅฤฑษพสืษฏuodbษนsสnสสxสz'
r'[\]^_`'
'ษqษpวษฦษฅฤฑษพสืษฏuodbษนsสnสสxสz'
'{|}~')
serifBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ'
'{|}~')
serifItalic: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐โ๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง'
'{|}~')
serifBoldItalic: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐'
r'[\]^_`'
'๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
sanSerif: str = (''' !"#$%&'()*+,-./'''
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
':;<=>?@'
'๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น'
r'[\]^_`'
'๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
sanSerifBold: str = (''' !"#$%&'()*+,-./'''
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ'
r'[\]^_`'
'๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐'
'{|}~')
sanSerifItalic: str = (''' !"#$%&'()*+,-./'''
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก'
r'[\]^_`'
'๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป'
'{|}~')
sanSerifBoldItalic: str = (''' !"#$%&'()*+,-./'''
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
':;<=>?@'
'๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ'
'{|}~')
script: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐โฌ๐๐โฐโฑ๐ขโโ๐ฅ๐ฆโโณ๐ฉ๐ช๐ซ๐ฌโ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต'
r'[\]^_`'
'๐ถ๐ท๐ธ๐นโฏ๐ปโ๐ฝ๐พ๐ฟ๐๐๐๐โด๐
๐๐๐๐๐๐๐๐๐๐'
'{|}~')
scriptBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ'
r'[\]^_`'
'๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐'
'{|}~')
fraktur: str = (''' !"#$%&'()*+,-./'''
'0123456789'
':;<=>?@'
'๐๐
โญ๐๐๐๐โโ๐๐๐๐๐๐๐๐โ๐๐๐๐๐๐๐โจ'
r'[\]^_`'
'๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท'
'{|}~')
frakturBold: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐๐'
':;<=>?@'
'๐ฌ๐ญ๐ฎ๐ฏ๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐'
'{|}~')
monospace: str = (''' !"#$%&'()*
|
+,-./'''
'๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ'
':;<=>?@'
'๐ฐ๐ฑ๐ฒ๐ณ๐ด๐ต๐ถ๐ท๐ธ๐น๐บ๐ป๐ผ๐ฝ๐พ๐ฟ๐๐๐๐๐๐
๐๐๐๐'
|
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ'
'{|}~')
doubleStruck: str = (''' !"#$%&'()*+,-./'''
'๐๐๐๐๐๐๐๐๐ ๐ก'
':;<=>?@'
'๐ธ๐นโ๐ป๐ผ๐ฝ๐พโ๐๐๐๐๐โ๐โโโ๐๐๐๐๐๐๐โค'
r'[\]^_`'
'๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐ ๐ก๐ข๐ฃ๐ค๐ฅ๐ฆ๐ง๐จ๐ฉ๐ช๐ซ'
'{|}~')
def _createAsciiTo(name: str,
toTable: str) -> FormatText:
table = str.maketrans(ascii, toTable)
def asciiTo(text: str) -> str:
return text.translate(table)
asciiTo.__name__ = name
return asciiTo
to_upper: FormatText = _createAsciiTo('to_upper', upper)
to_lower: FormatText = _createAsciiTo('to_lower', lower)
to_full_width: FormatText = _createAsciiTo('to_full_width', full)
to_parenthesized: FormatText = _createAsciiTo(
'to_parenthesized', parenthesized)
to_circled: FormatText = _createAsciiTo('to_circled', circled)
to_small_caps: FormatText = _createAsciiTo('to_small_caps', smallcaps)
_to_upside_down_reversed: FormatText = _createAsciiTo(
'to_upside_down', upsidedown)
def to_upside_down(text: str) -> str:
return _to_upside_down_reversed(text)[::-1]
to_serif_bold: FormatText = _createAsciiTo('to_serif_bold', serifBold)
to_serif_italic: FormatText = _createAsciiTo('to_serif_italic', serifItalic)
to_serif_bold_italic: FormatText = _createAsciiTo(
'to_serif_bold_italic', serifBoldItalic)
to_sanserif: FormatText = _createAsciiTo('to_sanserif', sanSerif)
to_sanserif_bold: FormatText = _createAsciiTo('to_sanserif_bold', sanSerifBold)
to_sanserif_italic: FormatText = _createAsciiTo(
'to_sanserif_italic', sanSerifItalic)
to_sanserif_bold_italic: FormatText = _createAsciiTo(
'to_sanserif_bold_italic', sanSerifBoldItalic)
to_script: FormatText = _createAsciiTo('to_script', script)
to_script_bold: FormatText = _createAsciiTo('to_script_bold', scriptBold)
to_fraktur: FormatText = _createAsciiTo('to_fraktur', fraktur)
to_fraktur_bold: FormatText = _createAsciiTo('to_fraktur_bold', frakturBold)
to_monospace: FormatText = _createAsciiTo('to_monospace', monospace)
to_double_struck: FormatText = _createAsciiTo('to_double_struck', doubleStruck)
def to_ascii(text: str) -> str:
fromTable: List[str]
fromTable = [full, parenthesized, circled, smallcaps, upsidedown,
serifBold, serifItalic, serifBoldItalic, sanSerif,
sanSerifBold, sanSerifItalic, sanSerifBoldItalic, script,
scriptBold, fraktur, frakturBold, monospace, doubleStruck,
ascii]
toTable: Dict[int, int] = {}
for table in fromTable:
toTable.update(str.maketrans(table, ascii))
return text.translate(toTable)
def format(string: str,
format_: str) -> str:
format_ = format_.lower()
strTable: Dict[str, FormatText] = {
'ascii': to_ascii,
'upper': to_upper,
'lower': to_lower,
'full': to_full_width,
'parenthesized': to_parenthesized,
'circled': to_circled,
'smallcaps': to_small_caps,
'upsidedown': to_upside_down,
'sanserif': to_sanserif,
'script': to_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.