code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
from libchristine.Library import library
import gtk
window = gtk.Window()
window.connect('destroy',gtk.main_quit)
window.set_size_request(640,480)
window.set_position(gtk.WIN_POS_CENTER)
Library = library()
lastSourceUsed = 'music'
Library.loadLibrary(lastSourceUsed)
TreeView = Library.tv
scroll = gtk.ScrolledWindow()
scroll.add(TreeView)
window.add(scroll)
window.show_all()
gtk.main() | markuz/Christine | christinetests/mainLibrary.py | Python | gpl-2.0 | 418 |
from toontown.toonbase.ToontownGlobals import *
from toontown.toonbase.ToonBaseGlobal import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
import HouseGlobals
from toontown.catalog import CatalogItemList
from toontown.catalog import CatalogItem
from toontown.catalog import CatalogSurfaceItem
from toontown.catalog import CatalogWallpaperItem
from toontown.catalog import CatalogFlooringItem
from toontown.catalog import CatalogMouldingItem
from toontown.catalog import CatalogWainscotingItem
from toontown.dna.DNAParser import DNADoor
WindowPlugNames = ('**/windowcut_c*', '**/windowcut_e*')
RoomNames = ('**/group2', '**/group1')
WallNames = ('ceiling*', 'wall_side_middle*', 'wall_front_middle*', 'windowcut_*')
MouldingNames = ('wall_side_top*', 'wall_front_top*')
FloorNames = ('floor*',)
WainscotingNames = ('wall_side_bottom*', 'wall_front_bottom*')
BorderNames = ('wall_side_middle*_border', 'wall_front_middle*_border', 'windowcut_*_border')
WallpaperPieceNames = (WallNames,
MouldingNames,
FloorNames,
WainscotingNames,
BorderNames)
class DistributedHouseInterior(DistributedObject.DistributedObject):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.houseId = 0
self.houseIndex = 0
self.interior = None
self.exteriorWindowsHidden = 0
return
def generate(self):
DistributedObject.DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.setup()
def disable(self):
self.interior.removeNode()
del self.interior
DistributedObject.DistributedObject.disable(self)
def delete(self):
self.ignore(self.uniqueName('enterclosetSphere'))
DistributedObject.DistributedObject.delete(self)
def setup(self):
dnaStore = base.cr.playGame.dnaStore
self.interior = loader.loadModel('phase_5.5/models/estate/tt_m_ara_int_estateHouseA')
self.interior.reparentTo(render)
doorModelName = 'door_double_round_ur'
door = dnaStore.findNode(doorModelName)
door_origin = self.interior.find('**/door_origin')
door_origin.setHpr(180, 0, 0)
door_origin.setScale(0.8, 0.8, 0.8)
door_origin.setPos(door_origin, 0, -0.025, 0)
doorNP = door.copyTo(door_origin)
houseColor = HouseGlobals.atticWood
color = Vec4(houseColor[0], houseColor[1], houseColor[2], 1)
DNADoor.setupDoor(doorNP, door_origin, door_origin, dnaStore, str(self.houseId), color)
doorFrame = doorNP.find('door_*_flat')
doorFrame.setColor(color)
self.interior.flattenMedium()
self.windowSlots = []
for name in WindowPlugNames:
plugNodes = self.interior.findAllMatches(name)
if plugNodes.isEmpty():
self.windowSlots.append((None, None))
else:
viewBase = plugNodes[0].getParent().attachNewNode('view')
viewBase.setTransform(plugNodes[0].getTransform())
plug = plugNodes[1].getParent().attachNewNode('plug')
plugNodes.reparentTo(plug)
plug.flattenLight()
self.windowSlots.append((plug, viewBase))
self.windowSlots[0][0].setPosHpr(16.0, -12.0, 5.51, -90, 0, 0)
self.windowSlots[1][0].setPosHpr(-12.0, 26.0, 5.51, 0, 0, 0)
self.__colorWalls()
self.__setupWindows()
messenger.send('houseInteriorLoaded-%d' % self.zoneId)
return None
def __colorWalls(self):
if not self.wallpaper:
self.notify.info('No wallpaper in interior; clearing.')
for str in WallNames + WainscotingNames:
nodes = self.interior.findAllMatches('**/%s' % str)
for node in nodes:
node.setTextureOff(1)
return
numSurfaceTypes = CatalogSurfaceItem.NUM_ST_TYPES
numRooms = min(len(self.wallpaper) / numSurfaceTypes, len(RoomNames))
for room in xrange(numRooms):
roomName = RoomNames[room]
roomNode = self.interior.find(roomName)
if not roomNode.isEmpty():
for surface in xrange(numSurfaceTypes):
slot = room * numSurfaceTypes + surface
wallpaper = self.wallpaper[slot]
color = wallpaper.getColor()
texture = wallpaper.loadTexture()
for str in WallpaperPieceNames[surface]:
nodes = roomNode.findAllMatches('**/%s' % str)
for node in nodes:
if str == 'ceiling*':
r, g, b, a = color
scale = 0.66
r *= scale
g *= scale
b *= scale
node.setColorScale(r, g, b, a)
else:
node.setColorScale(*color)
node.setTexture(texture, 1)
if wallpaper.getSurfaceType() == CatalogSurfaceItem.STWallpaper:
color2 = wallpaper.getBorderColor()
texture2 = wallpaper.loadBorderTexture()
nodes = roomNode.findAllMatches('**/%s_border' % str)
for node in nodes:
node.setColorScale(*color2)
node.setTexture(texture2, 1)
nodes = self.interior.findAllMatches('**/arch*')
for node in nodes:
node.setColorScale(*(HouseGlobals.archWood + (1,)))
def __setupWindows(self):
if not self.windows:
self.notify.info('No windows in interior; returning.')
return
for plug, viewBase in self.windowSlots:
for item in self.windows:
if plug:
plug.hide()
if viewBase:
model = item.loadModel()
model.setPos(plug.getPos())
model.setHpr(plug.getHpr())
model.reparentTo(self.interior)
if self.exteriorWindowsHidden:
model.findAllMatches('**/outside').stash()
def hideExteriorWindows(self):
self.exteriorWindowsHidden = 1
for item in self.windows:
plug, viewBase = self.windowSlots[item.placement]
if viewBase:
viewBase.findAllMatches('**/outside').stash()
def showExteriorWindows(self):
self.exteriorWindowsHidden = 0
for item in self.windows:
plug, viewBase = self.windowSlots[item.placement]
if viewBase:
viewBase.findAllMatches('**/outside;+s').unstash()
def setHouseId(self, index):
self.houseId = index
def setHouseIndex(self, index):
self.houseIndex = index
def setWallpaper(self, items):
self.wallpaper = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization)
if self.interior:
self.__colorWalls()
def setWindows(self, items):
self.windows = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization | CatalogItem.WindowPlacement)
if self.interior:
self.__setupWindows()
def testWallpaperCombo(self, wallpaperType, wallpaperColorIndex, borderIndex, borderColorIndex, mouldingType, mouldingColorIndex, flooringType, flooringColorIndex, wainscotingType, wainscotingColorIndex):
wallpaperItem = CatalogWallpaperItem.CatalogWallpaperItem(wallpaperType, wallpaperColorIndex, borderIndex, borderColorIndex)
mouldingItem = CatalogMouldingItem.CatalogMouldingItem(mouldingType, mouldingColorIndex)
flooringItem = CatalogFlooringItem.CatalogFlooringItem(flooringType, flooringColorIndex)
wainscotingItem = CatalogWainscotingItem.CatalogWainscotingItem(wainscotingType, wainscotingColorIndex)
self.wallpaper = CatalogItemList.CatalogItemList([wallpaperItem,
mouldingItem,
flooringItem,
wainscotingItem,
wallpaperItem,
mouldingItem,
flooringItem,
wainscotingItem], store=CatalogItem.Customization)
if self.interior:
self.__colorWalls()
| ToonTownInfiniteRepo/ToontownInfinite | toontown/estate/DistributedHouseInterior.py | Python | mit | 8,640 |
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
git vcs support.
refnames in git can be branchnames, hashes, partial hashes, tags. On
checkout, git will disambiguate by checking them in that order, taking
the first that applies
This class aims to provide git for linear centralized workflows. This
means we assume that the only relevant remote is the one named
"origin", and we assume that commits once on origin remain on origin.
A challenge with git is that it has strong reasonable conventions, but
is very allowing for breaking them. E.g. it is possible to name
remotes and branches with names like "refs/heads/master", give
branches and tags the same name, or a valid SHA-ID as name, etc.
Similarly git allows plenty of ways to reference any object, in case
of ambiguities, git attempts to take the most reasonable
disambiguation, and in some cases warns.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import gzip
import dateutil.parser # For parsing date strings
from distutils.version import LooseVersion
from vcstools.vcs_base import VcsClientBase, VcsError
from vcstools.common import sanitized, normalized_rel_path, run_shell_command
class GitError(Exception):
pass
def _git_diff_path_submodule_change(diff, rel_path_prefix):
"""
Parses git diff result and changes the filename prefixes.
"""
if diff is None:
return None
INIT = 0
INDIFF = 1
# small state machine makes sure we never touch anything inside
# the actual diff
state = INIT
result = ""
s_list = [line for line in diff.split(os.linesep)]
subrel_path = rel_path_prefix
for line in s_list:
newline = line
if line.startswith("Entering '"):
state = INIT
submodulepath = line.rstrip("'")[len("Entering '"):]
subrel_path = os.path.join(rel_path_prefix, submodulepath)
continue
if line.startswith("diff --git "):
state = INIT
if state == INIT:
if line.startswith("@@"):
state = INDIFF
else:
if line.startswith("---") and not line.startswith("--- /dev/null"):
newline = "--- " + subrel_path + line[5:]
if line.startswith("+++") and not line.startswith("+++ /dev/null"):
newline = "+++ " + subrel_path + line[5:]
if line.startswith("diff --git"):
# first replacing b in case path starts with a/
newline = line.replace(" b/", " " + subrel_path + "/", 1)
newline = newline.replace(" a/", " " + subrel_path + "/", 1)
if newline != '':
result += newline + '\n'
return result
def _get_git_version():
"""Looks up git version by calling git --version.
:raises: VcsError if git is not installed or returns
something unexpected"""
try:
cmd = 'git --version'
value, version, _ = run_shell_command(cmd, shell=True)
if value != 0:
raise VcsError("git --version returned %s, maybe git is not installed" % (value))
prefix = 'git version '
if version is not None and version.startswith(prefix):
version = version[len(prefix):].strip()
else:
raise VcsError("git --version returned invalid string: '%s'" % version)
except VcsError as exc:
raise VcsError("Could not determine whether git is installed: %s" % exc)
return version
class GitClient(VcsClientBase):
def __init__(self, path):
"""
:raises: VcsError if git not detected
"""
VcsClientBase.__init__(self, 'git', path)
self.gitversion = _get_git_version()
@staticmethod
def get_environment_metadata():
metadict = {}
try:
version = _get_git_version()
resetkeep = LooseVersion(version) >= LooseVersion('1.7.1')
submodules = LooseVersion(version) > LooseVersion('1.7')
metadict["features"] = "'reset --keep': %s, submodules: %s" % (resetkeep, submodules)
except VcsError:
version = "No git installed"
metadict["version"] = version
return metadict
def get_url(self):
"""
:returns: GIT URL of the directory path (output of git info command), or None if it cannot be determined
"""
if self.detect_presence():
cmd = "git config --get remote.origin.url"
_, output, _ = run_shell_command(cmd, shell=True, cwd=self._path)
return output.rstrip()
return None
@staticmethod
def static_detect_presence(path):
# There is a proposed implementation of detect_presence which might be
# more future proof, but would depend on parsing the output of git
# See: https://github.com/vcstools/vcstools/pull/10
return os.path.exists(os.path.join(path, '.git'))
def checkout(self, url, version=None, verbose=False, shallow=False, timeout=None):
"""calls git clone and then, if version was given, update(version)"""
if url is None or url.strip() == '':
raise ValueError('Invalid empty url : "%s"' % url)
# since we cannot know whether version names a branch, clone master initially
cmd = 'git clone'
if shallow:
cmd += ' --depth 1'
if LooseVersion(self.gitversion) >= LooseVersion('1.7.10'):
cmd += ' --no-single-branch'
if version is None:
# quicker than using _do_update, but undesired when switching branches next
cmd += ' --recursive'
cmd += ' %s %s' % (url, self._path)
value, _, msg = run_shell_command(cmd,
shell=True,
no_filter=True,
show_stdout=verbose,
timeout=timeout,
verbose=verbose)
if value != 0:
if msg:
self.logger.error('%s' % msg)
return False
try:
# update to make sure we are on the right branch. Do not
# check for "master" here, as default branch could be anything
if version is not None:
return self._do_update(version,
verbose=verbose,
fast_foward=True,
timeout=timeout,
update_submodules=True)
else:
return True
except GitError:
return False
def update_submodules(self, verbose=False, timeout=None):
# update and or init submodules too
if LooseVersion(self.gitversion) > LooseVersion('1.7'):
cmd = "git submodule update --init --recursive"
value, _, _ = run_shell_command(cmd,
shell=True,
cwd=self._path,
show_stdout=True,
timeout=timeout,
verbose=verbose)
if value != 0:
return False
return True
def update(self, version=None, verbose=False, force_fetch=False, timeout=None):
"""
if version is None, attempts fast-forwarding current branch, if any.
Else interprets version as a local branch, remote branch, tagname,
hash, etc.
If it is a branch, attempts to move to it unless
already on it, and to fast-forward, unless not a tracking
branch. Else go untracked on tag or whatever version is. Does
not leave if current commit would become dangling.
:return: True if already up-to-date with remote or after successful fast_foward
"""
if not self.detect_presence():
return False
try:
# fetch in any case to get updated tags even if we don't need them
self._do_fetch()
return self._do_update(refname=version, verbose=verbose, timeout=timeout)
except GitError:
return False
def _do_update(self,
refname=None,
verbose=False,
fast_foward=True,
timeout=None,
update_submodules=True):
'''
updates without fetching, thus any necessary fetching must be done before
allows arguments to reduce unnecessary steps after checkout
:param fast_foward: if false, does not perform fast-forward
:param update_submodules: if false, does not attempt to update submodules
'''
# are we on any branch?
current_branch = self.get_branch()
branch_parent = None
if current_branch:
# local branch might be named differently from remote by user, we respect that
same_branch = (refname == current_branch)
if not same_branch:
(branch_parent, remote) = self.get_branch_parent(current_branch=current_branch)
if not refname:
# ! changing refname to cause fast-forward
refname = branch_parent
same_branch = True
else:
same_branch = (refname == branch_parent)
if same_branch and not branch_parent:
# avoid expensive checking branch parent again later
fast_foward = False
else:
same_branch = False
if not refname:
# we are neither tracking, nor did we get any refname to update to
return (not update_submodules) or self.update_submodules(verbose=verbose,
timeout=timeout)
if same_branch:
if fast_foward:
if not branch_parent and current_branch:
(branch_parent, remote) = self.get_branch_parent(current_branch=current_branch)
if remote != 'origin':
# if remote is not origin, must not fast-forward (because based on origin)
sys.stderr.write("vcstools only handles branches tracking remote 'origin'," +
" branch '%s' tracks remote '%s'\n" % (current_branch, remote))
branch_parent = None
# already on correct branch, fast-forward if there is a parent
if branch_parent:
if not self._do_fast_forward(branch_parent=branch_parent,
fetch=False,
verbose=verbose):
return False
else:
# refname can be a different branch or something else than a branch
refname_is_local_branch = self.is_local_branch(refname)
if refname_is_local_branch:
# might also be remote branch, but we treat it as local
refname_is_remote_branch = False
else:
refname_is_remote_branch = self.is_remote_branch(refname, fetch=False)
refname_is_branch = refname_is_remote_branch or refname_is_local_branch
current_version = None
# shortcut if version is the same as requested
if not refname_is_branch:
current_version = self.get_version()
if current_version == refname:
return (not update_submodules) or self.update_submodules(verbose=verbose,
timeout=timeout)
if current_branch is None:
if not current_version:
current_version = self.get_version()
# prevent commit from becoming dangling
if self.is_commit_in_orphaned_subtree(current_version, fetch=False):
# commit becomes dangling unless we move to one of its descendants
if not self.rev_list_contains(refname, current_version, fetch=False):
# TODO: should raise error instead of printing message
sys.stderr.write("vcstools refusing to move away from dangling commit, to protect your work.\n")
return False
# git checkout makes all the decisions for us
self._do_checkout(refname, verbose=verbose, fetch=False)
if refname_is_local_branch:
# if we just switched to a local tracking branch (not created one), we should also fast forward
(new_branch_parent, remote) = self.get_branch_parent(current_branch=refname)
if remote != 'origin':
# if remote is not origin, must not fast-forward (because based on origin)
sys.stderr.write("vcstools only handles branches tracking remote 'origin'," +
" branch '%s' tracks remote '%s'\n" % (current_branch, remote))
new_branch_parent = None
if new_branch_parent is not None:
if fast_foward:
if not self._do_fast_forward(branch_parent=new_branch_parent,
fetch=False,
verbose=verbose):
return False
return (not update_submodules) or self.update_submodules(verbose=verbose, timeout=timeout)
def get_current_version_label(self):
"""
For git we change the label to clarify when a different remote
is configured.
"""
branch = self.get_branch()
if branch is None:
return '<detached>'
result = branch
(remote_branch, remote) = self.get_branch_parent()
if remote_branch is not None:
# if not following 'origin/branch', display 'branch < tracked ref'
if (remote_branch != branch or remote != 'origin'):
result += ' < '
if remote != 'origin':
result += remote + '/'
result += remote_branch
return result
def get_remote_version(self, fetch=False):
# try tracked branch on origin (returns None if on other remote)
(parent_branch, remote) = self.get_branch_parent(fetch=fetch)
if parent_branch is not None:
return self.get_version(spec=remote+'/'+parent_branch)
def get_version(self, spec=None):
"""
:param spec: (optional) token to identify desired version. For
git, this may be anything accepted by git log, e.g. a tagname,
branchname, or sha-id.
:param fetch: When spec is given, can be used to suppress git fetch call
:returns: current SHA-ID of the repository. Or if spec is
provided, the SHA-ID of a commit specified by some token if found, else None
"""
if self.detect_presence():
command = "git log -1"
if spec is not None:
command += " %s" % sanitized(spec)
command += " --format='%H'"
_, output, _ = run_shell_command(command, shell=True,
no_warn=True, cwd=self._path)
if output.strip() != '':
# On Windows the version can have single quotes around it
version = output.strip().strip("'")
return version # found SHA-ID
elif spec is None:
return None
# we try again after fetching if given spec had not been found
try:
self._do_fetch()
except GitError:
return None
# we repeat the call once again after fetching
_, output, _ = run_shell_command(command, shell=True,
no_warn=True, cwd=self._path)
if output.strip() == '':
# even if after fetching, not found specified version
return None
version = output.strip().strip("'")
return version
return None
def get_diff(self, basepath=None):
response = ''
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = normalized_rel_path(self._path, basepath)
# git needs special treatment as it only works from inside
# use HEAD to also show staged changes. Maybe should be option?
# injection should be impossible using relpath, but to be sure, we check
cmd = "git diff HEAD --src-prefix=%s/ --dst-prefix=%s/ ." % \
(sanitized(rel_path), sanitized(rel_path))
_, response, _ = run_shell_command(cmd, shell=True, cwd=self._path)
if LooseVersion(self.gitversion) > LooseVersion('1.7'):
cmd = 'git submodule foreach --recursive git diff HEAD'
_, output, _ = run_shell_command(cmd, shell=True, cwd=self._path)
response += _git_diff_path_submodule_change(output, rel_path)
return response
def get_log(self, relpath=None, limit=None):
response = []
if relpath is None:
relpath = ''
if self.path_exists() and os.path.exists(os.path.join(self._path, relpath)):
# Get the log
limit_cmd = (("-n %d" % (int(limit))) if limit else "")
GIT_COMMIT_FIELDS = ['id', 'author', 'email', 'date', 'message']
GIT_LOG_FORMAT = '%x1f'.join(['%H', '%an', '%ae', '%ad', '%s']) + '%x1e'
command = "git --work-tree=%s log --format=\"%s\" %s %s " % (self._path, GIT_LOG_FORMAT,
limit_cmd, sanitized(relpath))
return_code, response_str, stderr = run_shell_command(command, shell=True, cwd=self._path)
if return_code == 0:
# Parse response
response = response_str.strip('\n\x1e').split("\x1e")
response = [row.strip().split("\x1f") for row in response]
response = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in response]
# Parse dates
for entry in response:
entry['date'] = dateutil.parser.parse(entry['date'])
return response
def get_status(self, basepath=None, untracked=False):
response = None
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = normalized_rel_path(self._path, basepath)
# git command only works inside repo
# self._path is safe against command injection, as long as we check path.exists
command = "git status -s "
if not untracked:
command += " -uno"
_, response, _ = run_shell_command(command,
shell=True,
cwd=self._path)
response_processed = ""
for line in response.split('\n'):
if len(line.strip()) > 0:
# prepend relative path
response_processed += '%s%s/%s\n' % (line[0:3],
rel_path,
line[3:])
if LooseVersion(self.gitversion) > LooseVersion('1.7'):
command = "git submodule foreach --recursive git status -s"
if not untracked:
command += " -uno"
_, response2, _ = run_shell_command(command,
shell=True,
cwd=self._path)
for line in response2.split('\n'):
if line.startswith("Entering"):
continue
if len(line.strip()) > 0:
# prepend relative path
response_processed += line[0:3] + rel_path + '/' + line[3:] + '\n'
response = response_processed
return response
def is_remote_branch(self, branch_name, remote_name=None, fetch=True):
"""
checks list of remote branches for match. Set fetch to False if you just fetched already.
:returns: True if branch_name exists for remote <remote_name> (or 'origin' if None)
:raises: GitError when git fetch fails
"""
if remote_name is None:
remote_name = "origin" # default remote name is origin
if self.path_exists():
if fetch:
self._do_fetch()
_, output, _ = run_shell_command('git branch -r',
shell=True,
cwd=self._path)
for l in output.splitlines():
elem = l.split()[0]
rem_name = elem[:elem.find('/')]
br_name = elem[elem.find('/') + 1:]
if rem_name == remote_name and br_name == branch_name:
return True
return False
def is_local_branch(self, branch_name):
if self.path_exists():
_, output, _ = run_shell_command('git branch',
shell=True,
cwd=self._path)
for line in output.splitlines():
elems = line.split()
if len(elems) == 1:
if elems[0] == branch_name:
return True
elif len(elems) == 2:
if elems[0] == '*' and elems[1] == branch_name:
return True
return False
def get_branch(self):
if self.path_exists():
_, output, _ = run_shell_command('git branch',
shell=True,
cwd=self._path)
for line in output.splitlines():
elems = line.split()
if len(elems) == 2 and elems[0] == '*':
return elems[1]
return None
def get_branch_parent(self, fetch=False, current_branch=None):
"""
:param fetch: if true, performs git fetch first
:param current_branch: if not None, this is used as current branch (else extra shell call)
:returns: (branch, remote) the name of the branch this branch tracks and its remote
:raises: GitError if fetch fails
"""
if not self.path_exists():
return (None, None)
# get name of configured merge ref.
branchname = current_branch or self.get_branch()
if branchname is None:
return (None, None)
cmd = 'git config --get %s' % sanitized('branch.%s.merge' % branchname)
_, output, _ = run_shell_command(cmd,
shell=True,
cwd=self._path)
if not output:
return (None, None)
lines = output.splitlines()
if len(lines) > 1:
sys.stderr.write("vcstools unable to handle multiple merge references for branch %s:\n%s\n"
% (branchname, output))
return (None, None)
# get name of configured remote
cmd = 'git config --get "branch.%s.remote"' % branchname
_, output2, _ = run_shell_command(cmd, shell=True, cwd=self._path)
remote = output2 or 'origin'
branch_reference = lines[0]
# branch_reference is either refname, or /refs/heads/refname, or
# heads/refname we would like to return refname however,
# user could also have named any branch
# "/refs/heads/refname", for some unholy reason check all
# known branches on remote for refname, then for the odd
# cases, as git seems to do
candidate = branch_reference
if candidate.startswith('refs/'):
candidate = candidate[len('refs/'):]
if candidate.startswith('heads/'):
candidate = candidate[len('heads/'):]
elif candidate.startswith('tags/'):
candidate = candidate[len('tags/'):]
elif candidate.startswith('remotes/'):
candidate = candidate[len('remotes/'):]
result = None
if self.is_remote_branch(candidate, remote_name=remote, fetch=fetch):
result = candidate
elif branch_reference != candidate and self.is_remote_branch(branch_reference, remote_name=remote, fetch=False):
result = branch_reference
if result is not None:
return (result, remote)
return None, None
def is_tag(self, tag_name, fetch=True):
"""
checks list of tags for match.
Set fetch to False if you just fetched already.
:returns: True if tag_name among known tags
:raises: GitError when call to git fetch fails
"""
if fetch:
self._do_fetch()
if not tag_name:
raise ValueError('is_tag requires tag_name, got: "%s"' % tag_name)
if self.path_exists():
cmd = 'git tag -l %s' % sanitized(tag_name)
_, output, _ = run_shell_command(cmd, shell=True, cwd=self._path)
lines = output.splitlines()
if len(lines) == 1:
return True
return False
def rev_list_contains(self, refname, version, fetch=True):
"""
calls git rev-list with refname and returns True if version
can be found in rev-list result
:param refname: a git refname
:param version: an SHA IDs (if partial, caller is responsible
for mismatch)
:returns: True if version is an ancestor commit from refname
:raises: GitError when call to git fetch fails
"""
# to avoid listing unnecessarily many rev-ids, we cut off all
# those we are definitely not interested in
# $ git rev-list foo bar ^baz ^bez
# means "list all the commits which are reachable from foo or
# bar, but not from baz or bez". We use --parents because
# ^baz also excludes baz itself. We could also use git
# show --format=%P to get all parents first and use that,
# not sure what's more performant
if fetch:
self._do_fetch()
if (refname is not None and refname != '' and
version is not None and version != ''):
cmd = 'git rev-list %s ^%s --parents' % (sanitized(refname), sanitized(version))
_, output, _ = run_shell_command(cmd, shell=True, cwd=self._path)
for line in output.splitlines():
# can have 1, 2 or 3 elements (commit, parent1, parent2)
for hashid in line.split(" "):
if hashid.startswith(version):
return True
return False
def is_commit_in_orphaned_subtree(self, version, mask_self=False, fetch=True):
"""
checks git log --all (the list of all commits reached by
references, meaning branches or tags) for version. If it shows
up, that means git garbage collection will not remove the
commit. Else it would eventually be deleted.
:param version: SHA IDs (if partial, caller is responsible for mismatch)
:param mask_self: whether to consider direct references to this commit
(rather than only references on descendants) as well
:param fetch: whether fetch should be done first for remote refs
:returns: True if version is not recursively referenced by a branch or tag
:raises: GitError if git fetch fails
"""
if fetch:
self._do_fetch()
if version is not None and version != '':
cmd = 'git show-ref -s'
_, output, _ = run_shell_command(cmd, shell=True, cwd=self._path)
refs = output.splitlines()
# 2000 seems like a number the linux shell can cope with
chunksize = 2000
refchunks = [refs[x:x + chunksize] for x in range(0, len(refs), chunksize)]
for refchunk in refchunks:
# git log over all refs except HEAD
cmd = 'git log ' + " ".join(refchunk)
if mask_self:
# %P: parent hashes
cmd += " --pretty=format:%P"
else:
# %H: commit hash
cmd += " --pretty=format:%H"
_, output, _ = run_shell_command(cmd, shell=True, cwd=self._path)
for line in output.splitlines():
if line.strip("'").startswith(version):
return False
return True
return False
def export_repository(self, version, basepath):
# Use the git archive function
cmd = "git archive -o {0}.tar {1}".format(basepath, version)
result, _, _ = run_shell_command(cmd, shell=True, cwd=self._path)
if result:
return False
try:
# Gzip the tar file
with open(basepath + '.tar', 'rb') as tar_file:
gzip_file = gzip.open(basepath + '.tar.gz', 'wb')
try:
gzip_file.writelines(tar_file)
finally:
gzip_file.close()
finally:
# Clean up
os.remove(basepath + '.tar')
return True
def get_branches(self, local_only=False):
cmd = 'git branch --no-color'
if not local_only:
cmd += ' -a'
result, out, err = run_shell_command(cmd,
cwd=self._path,
shell=True,
show_stdout=False)
branches = []
for line in out.splitlines():
if 'HEAD -> ' in line:
continue
line = line.strip('* ')
branches.append(line)
return branches
def _do_fetch(self, timeout=None):
"""
calls git fetch
:raises: GitError when call fails
"""
cmd = "git fetch"
value1, _, _ = run_shell_command(cmd,
cwd=self._path,
shell=True,
no_filter=True,
timeout=timeout,
show_stdout=True)
# git fetch --tags ONLY fetches new tags and commits used, no other commits!
cmd = "git fetch --tags"
value2, _, _ = run_shell_command(cmd,
cwd=self._path,
shell=True,
no_filter=True,
timeout=timeout,
show_stdout=True)
if value1 != 0 or value2 != 0:
raise GitError('git fetch failed')
def _do_fast_forward(self, branch_parent, fetch=True, verbose=False):
"""Execute git fetch if necessary, and if we can fast-foward,
do so to the last fetched version using git rebase.
:param branch_parent: name of branch we track
:param fetch: whether fetch should be done first for remote refs
:returns: True if up-to-date or after succesful fast-forward
:raises: GitError when git fetch fails
"""
assert branch_parent is not None
current_version = self.get_version()
parent_version = self.get_version("remotes/origin/%s" % branch_parent)
if current_version == parent_version:
return True
# check if we are true ancestor of tracked branch
if not self.rev_list_contains(parent_version,
current_version,
fetch=fetch):
# if not rev_list_contains this version, we are on same
# commit (checked before), have advanced, or have diverged.
# Now check whether tracked branch is a true ancestor of us
if self.rev_list_contains(current_version,
parent_version,
fetch=False):
return True
print("Cannot fast-forward, local repository and remote '%s' have diverged." % branch_parent)
return False
if verbose:
print("Rebasing repository")
# Rebase, do not pull, because somebody could have
# commited in the meantime.
if LooseVersion(self.gitversion) >= LooseVersion('1.7.1'):
# --keep allows o rebase even with local changes, as long as
# local changes are not in files that change between versions
cmd = "git reset --keep remotes/origin/%s" % branch_parent
value, _, _ = run_shell_command(cmd,
shell=True,
cwd=self._path,
show_stdout=True,
verbose=verbose)
if value == 0:
return True
else:
verboseflag = ''
if verbose:
verboseflag = '-v'
# prior to version 1.7.1, git does not know --keep
# Do not merge, rebase does nothing when there are local changes
cmd = "git rebase %s remotes/origin/%s" % (verboseflag, branch_parent)
value, _, _ = run_shell_command(cmd,
shell=True,
cwd=self._path,
show_stdout=True,
verbose=verbose)
if value == 0:
return True
return False
def _do_checkout(self, refname, fetch=True, verbose=False):
"""
meaning git checkout, not vcstools checkout. This works
for local branches, remote branches, tagnames, hashes, etc.
git will create local branch of same name when no such local
branch exists, and also setup tracking. Git decides with own
rules whether local changes would cause conflicts, and refuses
to checkout else.
:raises GitError: when checkout fails
"""
# since refname may relate to remote branch / tag we do not
# know about yet, do fetch if not already done
if fetch:
self._do_fetch()
cmd = "git checkout %s" % (refname)
value, _, _ = run_shell_command(cmd,
shell=True,
cwd=self._path,
show_stdout=verbose,
verbose=verbose)
if value != 0:
raise GitError('Git Checkout failed')
# Backwards compatibility
GITClient = GitClient
| jpgr87/vcstools | src/vcstools/git.py | Python | bsd-3-clause | 37,164 |
import os
BASE_DIR = os.path.dirname(__file__)
DEBUG = True
INSTALLED_APPS = (
'bootstrap_paginator',
'testlist',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
},
}
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
)
SECRET_KEY = 'notsecure'
ROOT_URLCONF = 'urls'
| defrex/django-bootstrap-paginator | tests/settings.py | Python | mit | 438 |
"""
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the metaclass for your Field subclass, implement
to_python() and the other necessary methods and everything will work
seamlessly.
"""
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
| denisenkom/django | django/db/models/fields/subclassing.py | Python | bsd-3-clause | 1,818 |
"""
Test the autocomplete mode
"""
from pyqode.core.api import TextHelper
from pyqode.qt import QtCore
from pyqode.qt.QtTest import QTest
from pyqode.python import modes as pymodes
from ..helpers import editor_open
def get_mode(editor):
return editor.modes.get(pymodes.PyAutoCompleteMode)
def test_enabled(editor):
mode = get_mode(editor)
assert mode.enabled
mode.enabled = False
mode.enabled = True
@editor_open(__file__)
def test_basic(editor):
QTest.keyPress(editor, '(')
editor.clear()
QTest.keyPress(editor, '(')
def test_autocomple_func_parens(editor):
editor.clear()
editor.setPlainText('def foo')
TextHelper(editor).goto_line(0, len('def foo'))
QTest.keyPress(editor, '(')
assert editor.toPlainText() == 'def foo():'
def test_autocomple_method_parens(editor):
editor.clear()
editor.setPlainText('class\n def foo')
TextHelper(editor).goto_line(1, len(' def foo'))
QTest.keyPress(editor, '(')
assert editor.toPlainText() == 'class\n def foo(self):'
| mmolero/pyqode.python | test/test_modes/test_autocomplete.py | Python | mit | 1,046 |
import itertools, json, math, os, re, sys, urllib.request, yaml
DEFAULT_OUTPUT_DIRECTORY = "outputs"
DEFAULT_OUTPUT_FILE = "acs_tract_data.json"
DEFAULT_OUTPUT_ACS_VARIABLES = False
def census_query(acs_variables, base_url, geo):
"""This function performs the Census API query. It returns hashes of the form
{tract1 => {var1 => val1, var2 => val2, ...},
{tract2 => {var1 => val1, var2 => val2, ...}, ... }"""
acs_variables = list(acs_variables)
tracts = {}
start_index = 0
while start_index <= len(acs_variables):
v = acs_variables[start_index:(start_index + 50)]
url = "&".join([base_url, "get=" + ",".join(v), geo])
print("Fetching {} fields from ACS...".format(len(v)))
print("URL: " + url)
resp = json.loads(urllib.request.urlopen(url).read().decode("UTF-8"))
colnames, *data = resp
for row in data:
r = dict(zip(colnames, row))
# convert all values to ints except the geo identifiers
for k in r.keys():
if r[k] is not None and k not in ["state", "country", "tract"]:
r[k] = int(r[k])
# tract ids in the neighborhood file include the state and county number
tract_id = "11001" + r["tract"]
if tract_id in tracts:
tracts[tract_id].update(r)
else:
tracts[tract_id] = r
start_index += 50
return tracts
class ConfigurationException(Exception): pass
def main(args):
config_file = args.pop(0) if args else "config.yaml"
with open(config_file) as f:
config = yaml.load(f.read())
if "api" not in config:
raise ConfigurationException("No 'api' section in configuration")
if "acs_year" not in config["api"]:
raise ConfigurationException("No ACS year specified")
if "acs_period" not in config["api"]:
raise ConfigurationException("No ACS period specified")
acs_year = config["api"]["acs_year"]
acs_period = config["api"]["acs_period"]
if "acs_geography" not in config["api"]:
raise ConfigurationException("No ACS geography specified")
# construct the base URL
base_url = "http://api.census.gov/data/{}/acs{}?".format(acs_year, acs_period)
print("Using base URL: " + base_url)
# construct the geo parameter
geo_for_key, geo_for_value = list(config["api"]["acs_geography"]["for"].items())[0]
if re.match("all", geo_for_value, re.IGNORECASE):
geo_for_value = "*"
geo_for = "{}:{}".format(geo_for_key, geo_for_value)
ll = list(config["api"]["acs_geography"]["in"].items())[0]
geo_in = ":".join(str(x) for x in ll)
geo = "for={}&in={}".format(geo_for, geo_in)
# get the remaining configuration variables
if "output" in config:
output_directory = config["output"]["directory"] or DEFAULT_OUTPUT_DIRECTORY
output_file = config["output"]["file"] or DEFAULT_OUTPUT_FILE
output_acs_variables = config["output"]["acs_variables"] or DEFAULT_OUTPUT_ACS_VARIABLES
else:
output_directory = DEFAULT_OUTPUT_DIRECTORY
output_file = DEFAULT_OUTPUT_FILE
output_acs_variables = DEFAULT_OUTPUT_ACS_VARIABLES
output_file = os.path.join(output_directory, output_file)
print("Using output file: " + output_file)
for name in ["fields_rename", "fields_sum", "fields_sub", "fields_prod"]:
if name not in config:
print("WARNING: {} does not appear in configuration".format(name))
fields_rename = config["fields_rename"] or {}
fields_sum = config["fields_sum"] or {}
fields_sub = config["fields_sub"] or {}
fields_prod = config["fields_prod"] or {}
# download all ACS variables and all corresponding margin-of-error variables
# margin-of-error variables are obtained by replacing E by M in the ACS variable name
all_acs_fields = set(fields_rename.values())
all_acs_fields |= set(itertools.chain(*fields_sum.values()))
all_acs_fields |= set(itertools.chain(*fields_sub.values()))
all_acs_fields |= set(itertools.chain(*fields_prod.values()))
all_acs_fields |= set(re.sub("E", "M", s) for s in all_acs_fields)
tracts = census_query(all_acs_fields, base_url, geo)
# compute named variables from ACS variables
for tract_id, tract in tracts.items():
for outname, acsname in fields_rename.items():
errname = re.sub("E", "M", acsname)
tract[outname] = tract[acsname]
tract[outname + "_margin"] = tract[errname]
# We found some Census documentation suggesting that we use the
# sqrt(sum of squares) for the margin of error for aggregate estimates.
# https://www.census.gov/acs/www/Downloads/data_documentation/Statistical_Testing/ACS_2008_Statistical_Testing.pdf
for outname, acsfields in fields_sum.items():
tract[outname] = 0
tract[outname + "_margin"] = 0
for acsname in acsfields:
errname = re.sub("E", "M", acsname)
tract[outname] += tract[acsname]
tract[outname + "_margin"] += (tract[errname] / 1.645) ** 2
tract[outname + "_margin"] = math.sqrt(tract[outname + "_margin"]) * 1.645
for outname, acsfields in fields_sub.items():
first, *rest = acsfields
errname = re.sub("E", "M", first)
tract[outname] = tract[first]
tract[outname + "_margin"] = (tract[errname] / 1.645) ** 2
for acsname in rest:
errname = re.sub("E", "M", acsname)
tract[outname] -= tract[acsname]
tract[outname + "_margin"] += (tract[errname] / 1.645) ** 2
tract[outname + "_margin"] = math.sqrt(tract[outname + "_margin"]) * 1.645
# TODO: compute margin of error for product fiel
for outname, acsfields in fields_prod.items():
tract[outname] = 1
tract[outname + "_margin"] = 0
for acsname in acsfields:
tract[outname] *= tract[acsname]
# remove ACS variables from output
if not output_acs_variables:
for tract in tracts.values():
for f in all_acs_fields:
del(tract[f])
# write the result to a JSON file
with open(outname, "w") as f:
json.dump(tracts, f, indent=2)
if __name__ == "__main__":
main(sys.argv[1:])
| gggodhwani/state_of_children_in_india | data/scripts/fetch_acs.py | Python | mit | 6,478 |
# (C) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import hashlib
import json
import os
import tarfile
import uuid
import time
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode, urlparse
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url, prepare_multipart
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash_s
try:
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse
display = Display()
def g_connect(versions):
"""
Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the
endpoint.
:param versions: A list of API versions that the function supports.
"""
def decorator(method):
def wrapped(self, *args, **kwargs):
if not self._available_api_versions:
display.vvvv("Initial connection to galaxy_server: %s" % self.api_server)
# Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer
# auth for Automation Hub.
n_url = self.api_server
error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url)
if self.api_server == 'https://galaxy.ansible.com' or self.api_server == 'https://galaxy.ansible.com/':
n_url = 'https://galaxy.ansible.com/api/'
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg)
except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
# Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
# root (not JSON, no 'available_versions') so try appending '/api/'
if n_url.endswith('/api') or n_url.endswith('/api/'):
raise
# Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found).
n_url = _urljoin(n_url, '/api/')
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg)
except GalaxyError as new_err:
if new_err.http_code == 404:
raise err
raise
if 'available_versions' not in data:
raise AnsibleError("Tried to find galaxy API root at %s but no 'available_versions' are available "
"on %s" % (n_url, self.api_server))
# Update api_server to point to the "real" API root, which in this case could have been the configured
# url + '/api/' appended.
self.api_server = n_url
# Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though
# it isn't returned in the available_versions dict.
available_versions = data.get('available_versions', {u'v1': u'v1/'})
if list(available_versions.keys()) == [u'v1']:
available_versions[u'v2'] = u'v2/'
self._available_api_versions = available_versions
display.vvvv("Found API version '%s' with Galaxy server %s (%s)"
% (', '.join(available_versions.keys()), self.name, self.api_server))
# Verify that the API versions the function works with are available on the server specified.
available_versions = set(self._available_api_versions.keys())
common_versions = set(versions).intersection(available_versions)
if not common_versions:
raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s"
% (method.__name__, ", ".join(versions), ", ".join(available_versions),
self.name, self.api_server))
return method(self, *args, **kwargs)
return wrapped
return decorator
def _urljoin(*args):
return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a)
class GalaxyError(AnsibleError):
""" Error for bad Galaxy server responses. """
def __init__(self, http_error, message):
super(GalaxyError, self).__init__(message)
self.http_code = http_error.code
self.url = http_error.geturl()
try:
http_msg = to_text(http_error.read())
err_info = json.loads(http_msg)
except (AttributeError, ValueError):
err_info = {}
url_split = self.url.split('/')
if 'v2' in url_split:
galaxy_msg = err_info.get('message', http_error.reason)
code = err_info.get('code', 'Unknown')
full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code)
elif 'v3' in url_split:
errors = err_info.get('errors', [])
if not errors:
errors = [{}] # Defaults are set below, we just need to make sure 1 error is present.
message_lines = []
for error in errors:
error_msg = error.get('detail') or error.get('title') or http_error.reason
error_code = error.get('code') or 'Unknown'
message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code)
message_lines.append(message_line)
full_error_msg = "%s %s" % (message, ', '.join(message_lines))
else:
# v1 and unknown API endpoints
galaxy_msg = err_info.get('default', http_error.reason)
full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg)
self.message = to_native(full_error_msg)
class CollectionVersionMetadata:
def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies):
"""
Contains common information about a collection on a Galaxy server to smooth through API differences for
Collection and define a standard meta info for a collection.
:param namespace: The namespace name.
:param name: The collection name.
:param version: The version that the metadata refers to.
:param download_url: The URL to download the collection.
:param artifact_sha256: The SHA256 of the collection artifact for later verification.
:param dependencies: A dict of dependencies of the collection.
"""
self.namespace = namespace
self.name = name
self.version = version
self.download_url = download_url
self.artifact_sha256 = artifact_sha256
self.dependencies = dependencies
class GalaxyAPI:
""" This class is meant to be used as a API client for an Ansible Galaxy server """
def __init__(self, galaxy, name, url, username=None, password=None, token=None, validate_certs=True,
available_api_versions=None):
self.galaxy = galaxy
self.name = name
self.username = username
self.password = password
self.token = token
self.api_server = url
self.validate_certs = validate_certs
self._available_api_versions = available_api_versions or {}
display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs))
@property
@g_connect(['v1', 'v2', 'v3'])
def available_api_versions(self):
# Calling g_connect will populate self._available_api_versions
return self._available_api_versions
def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None):
headers = headers or {}
self._add_auth_token(headers, url, required=auth_required)
try:
display.vvvv("Calling Galaxy at %s" % url)
resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers,
method=method, timeout=20, http_agent=user_agent(), follow_redirects='safe')
except HTTPError as e:
raise GalaxyError(e, error_context_msg)
except Exception as e:
raise AnsibleError("Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e)))
resp_data = to_text(resp.read(), errors='surrogate_or_strict')
try:
data = json.loads(resp_data)
except ValueError:
raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s"
% (resp.url, to_native(resp_data)))
return data
def _add_auth_token(self, headers, url, token_type=None, required=False):
# Don't add the auth token if one is already present
if 'Authorization' in headers:
return
if not self.token and required:
raise AnsibleError("No access token or username set. A token can be set with --api-key "
"or at {0}.".format(to_native(C.GALAXY_TOKEN_PATH)))
if self.token:
headers.update(self.token.headers())
@g_connect(['v1'])
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/'
args = urlencode({"github_token": github_token})
resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent())
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@g_connect(['v1'])
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/'
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
elif github_repo.startswith('ansible-role'):
args['alternate_role_name'] = github_repo[len('ansible-role') + 1:]
data = self._call_galaxy(url, args=urlencode(args), method="POST")
if data.get('results', None):
return data['results']
return data
@g_connect(['v1'])
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports")
if task_id is not None:
url = "%s?id=%d" % (url, task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self._call_galaxy(url)
return data['results']
@g_connect(['v1'])
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = to_text(urlquote(to_bytes(role_name)))
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except Exception:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles",
"?owner__username=%s&name=%s" % (user_name, role_name))
data = self._call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect(['v1'])
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
results = []
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related,
"?page_size=50")
data = self._call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
# https://github.com/ansible/ansible/issues/64355
# api_server contains part of the API path but next_link includes the /api part so strip it out.
url_info = urlparse(self.api_server)
base_url = "%s://%s/" % (url_info.scheme, url_info.netloc)
while not done:
url = _urljoin(base_url, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
except Exception as e:
display.warning("Unable to retrieve role (id=%s) data (%s), but this is not fatal so we continue: %s"
% (role_id, related, to_text(e)))
return results
@g_connect(['v1'])
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size")
data = self._call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = _urljoin(self.api_server, data['next_link'])
data = self._call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error)))
@g_connect(['v1'])
def search_roles(self, search, **kwargs):
search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?")
if search:
search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search)))
tags = kwargs.get('tags', None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self._call_galaxy(search_url)
return data
@g_connect(['v1'])
def add_secret(self, source, github_user, github_repo, secret):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/'
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self._call_galaxy(url, args=args, method="POST")
return data
@g_connect(['v1'])
def list_secrets(self):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets")
data = self._call_galaxy(url, auth_required=True)
return data
@g_connect(['v1'])
def remove_secret(self, secret_id):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/'
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
@g_connect(['v1'])
def delete_role(self, github_user, github_repo):
url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole",
"?github_user=%s&github_repo=%s" % (github_user, github_repo))
data = self._call_galaxy(url, auth_required=True, method='DELETE')
return data
# Collection APIs #
@g_connect(['v2', 'v3'])
def publish_collection(self, collection_path):
"""
Publishes a collection to a Galaxy server and returns the import task URI.
:param collection_path: The path to the collection tarball to publish.
:return: The import task URI that contains the import results.
"""
display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server))
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
if not os.path.exists(b_collection_path):
raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path))
elif not tarfile.is_tarfile(b_collection_path):
raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection "
"build' to create a proper release artifact." % to_native(collection_path))
with open(b_collection_path, 'rb') as collection_tar:
sha256 = secure_hash_s(collection_tar.read(), hash_func=hashlib.sha256)
content_type, b_form_data = prepare_multipart(
{
'sha256': sha256,
'file': {
'filename': b_collection_path,
'mime_type': 'application/octet-stream',
},
}
)
headers = {
'Content-type': content_type,
'Content-length': len(b_form_data),
}
if 'v3' in self.available_api_versions:
n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/'
else:
n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/'
resp = self._call_galaxy(n_url, args=b_form_data, headers=headers, method='POST', auth_required=True,
error_context_msg='Error when publishing collection to %s (%s)'
% (self.name, self.api_server))
return resp['task']
@g_connect(['v2', 'v3'])
def wait_import_task(self, task_id, timeout=0):
"""
Waits until the import process on the Galaxy server has completed or the timeout is reached.
:param task_id: The id of the import task to wait for. This can be parsed out of the return
value for GalaxyAPI.publish_collection.
:param timeout: The timeout in seconds, 0 is no timeout.
"""
state = 'waiting'
data = None
# Construct the appropriate URL per version
if 'v3' in self.available_api_versions:
full_url = _urljoin(self.api_server, self.available_api_versions['v3'],
'imports/collections', task_id, '/')
else:
full_url = _urljoin(self.api_server, self.available_api_versions['v2'],
'collection-imports', task_id, '/')
display.display("Waiting until Galaxy import task %s has completed" % full_url)
start = time.time()
wait = 2
while timeout == 0 or (time.time() - start) < timeout:
try:
data = self._call_galaxy(full_url, method='GET', auth_required=True,
error_context_msg='Error when getting import task results at %s' % full_url)
except GalaxyError as e:
if e.http_code != 404:
raise
# The import job may not have started, and as such, the task url may not yet exist
display.vvv('Galaxy import process has not started, wait %s seconds before trying again' % wait)
time.sleep(wait)
continue
state = data.get('state', 'waiting')
if data.get('finished_at', None):
break
display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again'
% (state, wait))
time.sleep(wait)
# poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
wait = min(30, wait * 1.5)
if state == 'waiting':
raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
% to_native(full_url))
for message in data.get('messages', []):
level = message['level']
if level == 'error':
display.error("Galaxy import error message: %s" % message['message'])
elif level == 'warning':
display.warning("Galaxy import warning message: %s" % message['message'])
else:
display.vvv("Galaxy import message: %s - %s" % (level, message['message']))
if state == 'failed':
code = to_native(data['error'].get('code', 'UNKNOWN'))
description = to_native(
data['error'].get('description', "Unknown error, see %s for more details" % full_url))
raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code))
@g_connect(['v2', 'v3'])
def get_collection_version_metadata(self, namespace, name, version):
"""
Gets the collection information from the Galaxy server about a specific Collection version.
:param namespace: The collection namespace.
:param name: The collection name.
:param version: Version of the collection to get the information for.
:return: CollectionVersionMetadata about the collection at the version requested.
"""
api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
n_collection_url = _urljoin(*url_paths)
error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
% (namespace, name, version, self.name, self.api_server)
data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg)
return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
data['download_url'], data['artifact']['sha256'],
data['metadata']['dependencies'])
@g_connect(['v2', 'v3'])
def get_collection_versions(self, namespace, name):
"""
Gets a list of available versions for a collection on a Galaxy server.
:param namespace: The collection namespace.
:param name: The collection name.
:return: A list of versions that are available.
"""
relative_link = False
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
pagination_path = ['links', 'next']
relative_link = True # AH pagination results are relative an not an absolute URI.
else:
api_path = self.available_api_versions['v2']
pagination_path = ['next']
n_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/')
error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
data = self._call_galaxy(n_url, error_context_msg=error_context_msg)
if 'data' in data:
# v3 automation-hub is the only known API that uses `data`
# since v3 pulp_ansible does not, we cannot rely on version
# to indicate which key to use
results_key = 'data'
else:
results_key = 'results'
versions = []
while True:
versions += [v['version'] for v in data[results_key]]
next_link = data
for path in pagination_path:
next_link = next_link.get(path, {})
if not next_link:
break
elif relative_link:
# TODO: This assumes the pagination result is relative to the root server. Will need to be verified
# with someone who knows the AH API.
next_link = n_url.replace(urlparse(n_url).path, next_link)
data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
error_context_msg=error_context_msg)
return versions
| BondAnthony/ansible | lib/ansible/galaxy/api.py | Python | gpl-3.0 | 26,029 |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# pyre-strict
import asyncio
import getpass
import os
import pathlib
import sys
from typing import List, Optional
from . import daemon_util
from .config import EdenInstance
from .logfile import forward_log_file
from .systemd import (
EdenFSSystemdServiceConfig,
SystemdConnectionRefusedError,
SystemdFileNotFoundError,
SystemdServiceFailedToStartError,
SystemdUserBus,
edenfs_systemd_service_name,
print_service_status_using_systemctl_for_diagnostics_async,
)
from .util import print_stderr
async def start_systemd_service(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
) -> int:
try:
daemon_binary = daemon_util.find_daemon_binary(daemon_binary)
except daemon_util.DaemonBinaryNotFound as e:
print_stderr(f"error: {e}")
return 1
service_config = EdenFSSystemdServiceConfig(
eden_dir=instance.state_dir,
edenfs_executable_path=pathlib.Path(daemon_binary),
extra_edenfs_arguments=edenfs_args or [],
)
service_config.write_config_file()
service_name: str = edenfs_systemd_service_name(instance.state_dir)
xdg_runtime_dir: str = _get_systemd_xdg_runtime_dir(config=instance)
startup_log_path = service_config.startup_log_file_path
startup_log_path.write_bytes(b"")
with forward_log_file(startup_log_path, sys.stderr.buffer) as log_forwarder:
loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
async def start_service_async() -> int:
async with SystemdUserBus(xdg_runtime_dir=xdg_runtime_dir) as systemd:
service_name_bytes = service_name.encode()
active_state = await systemd.get_unit_active_state_async(
service_name_bytes
)
if active_state == b"active":
print_stderr("error: EdenFS systemd service is already running")
await print_service_status_using_systemctl_for_diagnostics_async(
service_name=service_name, xdg_runtime_dir=xdg_runtime_dir
)
return 1
await systemd.start_service_and_wait_async(service_name_bytes)
return 0
try:
loop.create_task(log_forwarder.poll_forever_async())
return await start_service_async()
except (SystemdConnectionRefusedError, SystemdFileNotFoundError):
print_stderr(
f"error: The systemd user manager is not running. Run the "
f"following command to\n"
f"start it, then try again:\n"
f"\n"
f" sudo systemctl start user@{getpass.getuser()}.service"
)
return 1
except SystemdServiceFailedToStartError as e:
print_stderr(f"error: {e}")
return 1
finally:
log_forwarder.poll()
def _get_systemd_xdg_runtime_dir(config: EdenInstance) -> str:
xdg_runtime_dir = os.getenv("XDG_RUNTIME_DIR")
if xdg_runtime_dir is None:
xdg_runtime_dir = config.get_fallback_systemd_xdg_runtime_dir()
print_stderr(
f"warning: The XDG_RUNTIME_DIR environment variable is not set; "
f"using fallback: {xdg_runtime_dir!r}"
)
return xdg_runtime_dir
| facebookexperimental/eden | eden/fs/cli/systemd_service.py | Python | gpl-2.0 | 3,563 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Wavefront OBJ format",
"author": "Campbell Barton",
"blender": (2, 58, 0),
"location": "File > Import-Export",
"description": "Import-Export OBJ, Import OBJ mesh, UV's, "
"materials and textures",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Wavefront_OBJ",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
if "bpy" in locals():
import imp
if "import_obj" in locals():
imp.reload(import_obj)
if "export_obj" in locals():
imp.reload(export_obj)
import bpy
from bpy.props import (BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
path_reference_mode,
axis_conversion,
)
class ImportOBJ(bpy.types.Operator, ImportHelper):
"""Load a Wavefront OBJ File"""
bl_idname = "import_scene.obj"
bl_label = "Import OBJ"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".obj"
filter_glob = StringProperty(
default="*.obj;*.mtl",
options={'HIDDEN'},
)
use_ngons = BoolProperty(
name="NGons",
description="Import faces with more than 4 verts as ngons",
default=True,
)
use_edges = BoolProperty(
name="Lines",
description="Import lines and faces with 2 verts as edge",
default=True,
)
use_smooth_groups = BoolProperty(
name="Smooth Groups",
description="Surround smooth groups by sharp edges",
default=True,
)
use_split_objects = BoolProperty(
name="Object",
description="Import OBJ Objects into Blender Objects",
default=True,
)
use_split_groups = BoolProperty(
name="Group",
description="Import OBJ Groups into Blender Objects",
default=True,
)
use_groups_as_vgroups = BoolProperty(
name="Poly Groups",
description="Import OBJ groups as vertex groups",
default=False,
)
use_image_search = BoolProperty(
name="Image Search",
description="Search subdirs for any associated images "
"(Warning, may be slow)",
default=True,
)
split_mode = EnumProperty(
name="Split",
items=(('ON', "Split", "Split geometry, omits unused verts"),
('OFF', "Keep Vert Order", "Keep vertex order from file"),
),
)
global_clamp_size = FloatProperty(
name="Clamp Size",
description="Clamp bounds under this value (zero to disable)",
min=0.0, max=1000.0,
soft_min=0.0, soft_max=1000.0,
default=0.0,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='-Z',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Y',
)
def execute(self, context):
# print("Selected: " + context.active_object.name)
from . import import_obj
if self.split_mode == 'OFF':
self.use_split_objects = False
self.use_split_groups = False
else:
self.use_groups_as_vgroups = False
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
"split_mode",
))
global_matrix = axis_conversion(from_forward=self.axis_forward,
from_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
if bpy.data.is_saved and context.user_preferences.filepaths.use_relative_paths:
import os
keywords["relpath"] = os.path.dirname((bpy.data.path_resolve("filepath", False).as_bytes()))
return import_obj.load(self, context, **keywords)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "use_ngons")
row.prop(self, "use_edges")
layout.prop(self, "use_smooth_groups")
box = layout.box()
row = box.row()
row.prop(self, "split_mode", expand=True)
row = box.row()
if self.split_mode == 'ON':
row.label(text="Split by:")
row.prop(self, "use_split_objects")
row.prop(self, "use_split_groups")
else:
row.prop(self, "use_groups_as_vgroups")
row = layout.split(percentage=0.67)
row.prop(self, "global_clamp_size")
layout.prop(self, "axis_forward")
layout.prop(self, "axis_up")
layout.prop(self, "use_image_search")
class ExportOBJ(bpy.types.Operator, ExportHelper):
"""Save a Wavefront OBJ File"""
bl_idname = "export_scene.obj"
bl_label = 'Export OBJ'
bl_options = {'PRESET'}
filename_ext = ".obj"
filter_glob = StringProperty(
default="*.obj;*.mtl",
options={'HIDDEN'},
)
# context group
use_selection = BoolProperty(
name="Selection Only",
description="Export selected objects only",
default=False,
)
use_animation = BoolProperty(
name="Animation",
description="Write out an OBJ for each frame",
default=False,
)
# object group
use_mesh_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply modifiers (preview resolution)",
default=True,
)
# extra data group
use_edges = BoolProperty(
name="Include Edges",
description="",
default=True,
)
use_smooth_groups = BoolProperty(
name="Smooth Groups",
description="Write sharp edges as smooth groups",
default=False,
)
use_smooth_groups_bitflags = BoolProperty(
name="Bitflag Smooth Groups",
description="Same as 'Smooth Groups', but generate smooth groups IDs as bitflags "
"(produces at most 32 different smooth groups, usually much less)",
default=False,
)
use_normals = BoolProperty(
name="Include Normals",
description="",
default=False,
)
use_uvs = BoolProperty(
name="Include UVs",
description="Write out the active UV coordinates",
default=True,
)
use_materials = BoolProperty(
name="Write Materials",
description="Write out the MTL file",
default=True,
)
use_triangles = BoolProperty(
name="Triangulate Faces",
description="Convert all faces to triangles",
default=False,
)
use_nurbs = BoolProperty(
name="Write Nurbs",
description="Write nurbs curves as OBJ nurbs rather than "
"converting to geometry",
default=False,
)
use_vertex_groups = BoolProperty(
name="Polygroups",
description="",
default=False,
)
# grouping group
use_blen_objects = BoolProperty(
name="Objects as OBJ Objects",
description="",
default=True,
)
group_by_object = BoolProperty(
name="Objects as OBJ Groups ",
description="",
default=False,
)
group_by_material = BoolProperty(
name="Material Groups",
description="",
default=False,
)
keep_vertex_order = BoolProperty(
name="Keep Vertex Order",
description="",
default=False,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='-Z',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Y',
)
global_scale = FloatProperty(
name="Scale",
min=0.01, max=1000.0,
default=1.0,
)
path_mode = path_reference_mode
check_extension = True
def execute(self, context):
from . import export_obj
from mathutils import Matrix
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"global_scale",
"check_existing",
"filter_glob",
))
global_matrix = (Matrix.Scale(self.global_scale, 4) *
axis_conversion(to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4())
keywords["global_matrix"] = global_matrix
return export_obj.save(self, context, **keywords)
def menu_func_import(self, context):
self.layout.operator(ImportOBJ.bl_idname, text="Wavefront (.obj)")
def menu_func_export(self, context):
self.layout.operator(ExportOBJ.bl_idname, text="Wavefront (.obj)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
| cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/io_scene_obj/__init__.py | Python | gpl-3.0 | 12,097 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rss_sources, feedparser
import requests, io, os, json, shutil
from pprint import pprint
from pyteaser import SummarizeUrl
from shutil import copyfile
import time, requests, urllib2
def getNews(rssDict, service, searchedImages):
try:
startTime = time.time()
directory = "./data/" + service + '/'
# create directory for service if doesnt yet exist
if not os.path.exists(directory):
os.makedirs(directory)
print('created directory: %s' % directory)
# iterate each feed in service -> fetch data -> write to temp file -> copy to actual file
for key, value in rssDict.items():
fileName = directory + key + "-write.json"
# delete {category}-write.json file if one already exists
if os.path.exists(fileName):
os.remove(fileName)
print('deleted existing file: %s' % fileName)
feed = feedparser.parse(value) #parse feed to get all the posts
feedDict = {}
feedCounter = 0
# loop through posts in category
for post in feed.entries[:20]: #limit to 20 entries per feed
imgUrl = "none"
# caching enabled. this prevents asking google for images every-time
if post.link in searchedImages:
imgUrl = searchedImages[post.link]
print('found image in cache for %s. done!' % post.link)
else:
query = post.title.split()
query = '+'.join(query)
if(service == 'reuters'):
imgSearch = ("https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + query)
else:
imgSearch = ("https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" +
service + "+" + query)
imgSearchRequest = requests.get(imgSearch, verify=False)
if (imgSearchRequest.status_code == 200): #on get success
imgSearchData = imgSearchRequest.json()
try:
getNextImg = 1;
imgUrl = imgSearchData['responseData']['results'][0]['url']
if (service == 'reuters'):
imgUrl = urllib2.unquote(imgUrl);
badBbcUrl = 'http://ichef.bbci.co.uk/news/660/media/images/80201000/png/_80201000_breaking_image_large-3.png'
if (service == 'bbc' and imgUrl == badBbcUrl):
imgUrl = imgSearchData['responseData']['results'][1]['url']
getNextImg = 2
# check if select url is actually an image
# if not, choose the next url
if not 'image' in requests.get(imgUrl, verify=False).headers['content-type']:
print("MISSED FIRST IMG URL = BAD CONTENT. SECOND FETCH!")
imgUrl = imgSearchData['responseData']['results'][getNextImg]['url']
searchedImages[post.link] = imgUrl # add to image cache if img found
print('image not in cache but new one fetched for %s. done!' % post.link)
except (TypeError, IndexError, requests.exceptions.MissingSchema):
print('DENIAL FROM GOOGLE for %s. failed!' % post.link)
imgUrl = "200F"
else:
imgUrl = "404"
print('image not in cache. also couldnt fetch new one for %s. failed!' % post.link)
summary = SummarizeUrl(post.link) # summarize text from article
feedDict[feedCounter] = [post.title, post.link, summary, imgUrl]
feedCounter += 1
# write the collected data to {category}-write.json in json format
with open(fileName, 'w') as fp:
json.dump(feedDict, fp)
print('wrote file: %s' % fileName)
# iterate through all cateogries and copy temp files to the actual files
for key,value in rssDict.items():
source = directory + key + "-write.json"
destination = directory + key + ".json"
if os.path.exists(source):
copyfile(source, destination)
print('copied file: %s' % destination)
else:
print ('cannot copy file: source %s not found' % source)
print("--- %s seconds ---\n" % (time.time() - startTime)) #iteration runtime
except Exception:
print("EXCEPTION ERROR EXCEPTION ERROR!!")
pass
searchedImages = {} # variable for caching google image urls
counter = 1 # count the iterations of while loop
bbcRssDict = rss_sources.getBbcRss()
cbcRssDict = rss_sources.getCbcRss()
reutersRssDict = rss_sources.getReutersRss()
while True:
getNews(bbcRssDict, 'bbc', searchedImages)
print("SERVICE BBC COMPLETE!")
time.sleep(300)
print("sleeping for 5 minutes to calm down Google")
getNews(cbcRssDict, 'cbc', searchedImages)
print("SERVICE CBC COMPLETE!")
time.sleep(300)
print("sleeping for 5 minutes to calm down Google")
getNews(reutersRssDict, 'reuters', searchedImages)
print("SERVICE REUTERS COMPLETE!")
print("Iteration # %d complete.\nSleeping for 1 hour\n" % counter)
time.sleep(3600)
counter += 1
| amtux/quick-news | services/news_fetcher.py | Python | gpl-2.0 | 4,581 |
"""
Examples used in docstrings.
"""
# facetcolumns
from petl import facetcolumns
table = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, True],
['b', 3]]
fc = facetcolumns(table, 'foo')
fc['a']
fc['a']['foo']
fc['a']['bar']
fc['a']['baz']
fc['b']
fc['b']['foo']
fc['b']['bar']
fc['b']['baz']
fc['c']
# rename
table1 = [['sex', 'age'],
['m', 12],
['f', 34],
['-', 56]]
from petl import look, rename
look(table1)
# rename a single field
table2 = rename(table1, 'sex', 'gender')
look(table2)
# rename multiple fields by passing a dictionary as the second argument
table3 = rename(table1, {'sex': 'gender', 'age': 'age_years'})
look(table3)
# the returned table object can also be used to modify the field mapping using the suffix notation
table4 = rename(table1)
table4['sex'] = 'gender'
table4['age'] = 'age_years'
look(table4)
# cut
table1 = [['foo', 'bar', 'baz'],
['A', 1, 2.7],
['B', 2, 3.4],
['B', 3, 7.8],
['D', 42, 9.0],
['E', 12]]
from petl import look, cut
look(table1)
table2 = cut(table1, 'foo', 'baz')
look(table2)
# fields can also be specified by index, starting from zero
table3 = cut(table1, 0, 2)
look(table3)
# field names and indices can be mixed
table4 = cut(table1, 'bar', 0)
look(table4)
# select a range of fields
table5 = cut(table1, *list(range(0, 2)))
look(table5)
# cutout
table1 = [['foo', 'bar', 'baz'],
['A', 1, 2.7],
['B', 2, 3.4],
['B', 3, 7.8],
['D', 42, 9.0],
['E', 12]]
from petl import cutout, look
look(table1)
table2 = cutout(table1, 'bar')
look(table2)
# cat
table1 = [['foo', 'bar'],
[1, 'A'],
[2, 'B']]
table2 = [['bar', 'baz'],
['C', True],
['D', False]]
table4 = [['foo', 'bar', 'baz'],
['A', 1, 2],
['B', '2', '3.4'],
['B', '3', '7.8', True],
['D', 'xyz', 9.0],
['E', None]]
table5 = [['bar', 'foo'],
['A', 1],
['B', 2]]
table7 = [['bar', 'foo'],
['A', 1],
['B', 2]]
table8 = [['bar', 'baz'],
['C', True],
['D', False]]
from petl import look, cat
look(table1)
look(table2)
table3 = cat(table1, table2)
look(table3)
# can also be used to square up a single table with uneven rows
look(table4)
look(cat(table4))
# use the header keyword argument to specify a fixed set of fields
look(table5)
table6 = cat(table5, header=['A', 'foo', 'B', 'bar', 'C'])
look(table6)
# using the header keyword argument with two input tables
look(table7)
look(table8)
table9 = cat(table7, table8, header=['A', 'foo', 'B', 'bar', 'C'])
look(table9)
# convert
table1 = [['foo', 'bar', 'baz'],
['A', '2.4', 12],
['B', '5.7', 34],
['C', '1.2', 56]]
from petl import convert, look
look(table1)
# using the built-in float function:
table2 = convert(table1, 'bar', float)
look(table2)
# using a lambda function::
table3 = convert(table1, 'baz', lambda v: v*2)
look(table3)
# a method of the data value can also be invoked by passing the method name
table4 = convert(table1, 'foo', 'lower')
look(table4)
# arguments to the method invocation can also be given
table5 = convert(table1, 'foo', 'replace', 'A', 'AA')
look(table5)
# values can also be translated via a dictionary
table7 = convert(table1, 'foo', {'A': 'Z', 'B': 'Y'})
look(table7)
# the same conversion can be applied to multiple fields
table8 = convert(table1, ('foo', 'bar', 'baz'), str)
look(table8)
# multiple conversions can be specified at the same time
table9 = convert(table1, {'foo': 'lower', 'bar': float, 'baz': lambda v: v*2})
look(table9)
# ...or alternatively via a list
table10 = convert(table1, ['lower', float, lambda v: v*2])
look(table10)
# ...or alternatively via suffix notation on the returned table object
table11 = convert(table1)
table11['foo'] = 'lower'
table11['bar'] = float
table11['baz'] = lambda v: v*2
look(table11)
# convertnumbers
table1 = [['foo', 'bar', 'baz', 'quux'],
['1', '3.0', '9+3j', 'aaa'],
['2', '1.3', '7+2j', None]]
from petl import convertnumbers, look
look(table1)
table2 = convertnumbers(table1)
look(table2)
# addfield
table1 = [['foo', 'bar'],
['M', 12],
['F', 34],
['-', 56]]
from petl import addfield, look
look(table1)
# using a fixed value
table2 = addfield(table1, 'baz', 42)
look(table2)
# calculating the value
table2 = addfield(table1, 'baz', lambda rec: rec['bar'] * 2)
look(table2)
# an expression string can also be used via expr
from petl import expr
table3 = addfield(table1, 'baz', expr('{bar} * 2'))
look(table3)
# rowslice
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 5],
['d', 7],
['f', 42]]
from petl import rowslice, look
look(table1)
table2 = rowslice(table1, 2)
look(table2)
table3 = rowslice(table1, 1, 4)
look(table3)
table4 = rowslice(table1, 0, 5, 2)
look(table4)
# head
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 5],
['d', 7],
['f', 42],
['f', 3],
['h', 90]]
from petl import head, look
look(table1)
table2 = head(table1, 4)
look(table2)
# tail
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 5],
['d', 7],
['f', 42],
['f', 3],
['h', 90],
['k', 12],
['l', 77],
['q', 2]]
from petl import tail, look
look(table1)
table2 = tail(table1, 4)
look(table2)
# sort
table1 = [['foo', 'bar'],
['C', 2],
['A', 9],
['A', 6],
['F', 1],
['D', 10]]
from petl import sort, look
look(table1)
table2 = sort(table1, 'foo')
look(table2)
# sorting by compound key is supported
table3 = sort(table1, key=['foo', 'bar'])
look(table3)
# if no key is specified, the default is a lexical sort
table4 = sort(table1)
look(table4)
# melt
table1 = [['id', 'gender', 'age'],
[1, 'F', 12],
[2, 'M', 17],
[3, 'M', 16]]
table3 = [['id', 'time', 'height', 'weight'],
[1, 11, 66.4, 12.2],
[2, 16, 53.2, 17.3],
[3, 12, 34.5, 9.4]]
from petl import melt, look
look(table1)
table2 = melt(table1, 'id')
look(table2)
# compound keys are supported
look(table3)
table4 = melt(table3, key=['id', 'time'])
look(table4)
# a subset of variable fields can be selected
table5 = melt(table3, key=['id', 'time'], variables=['height'])
look(table5)
# recast
table1 = [['id', 'variable', 'value'],
[3, 'age', 16],
[1, 'gender', 'F'],
[2, 'gender', 'M'],
[2, 'age', 17],
[1, 'age', 12],
[3, 'gender', 'M']]
table3 = [['id', 'vars', 'vals'],
[3, 'age', 16],
[1, 'gender', 'F'],
[2, 'gender', 'M'],
[2, 'age', 17],
[1, 'age', 12],
[3, 'gender', 'M']]
table6 = [['id', 'time', 'variable', 'value'],
[1, 11, 'weight', 66.4],
[1, 14, 'weight', 55.2],
[2, 12, 'weight', 53.2],
[2, 16, 'weight', 43.3],
[3, 12, 'weight', 34.5],
[3, 17, 'weight', 49.4]]
table9 = [['id', 'variable', 'value'],
[1, 'gender', 'F'],
[2, 'age', 17],
[1, 'age', 12],
[3, 'gender', 'M']]
from petl import recast, look
look(table1)
table2 = recast(table1)
look(table2)
# specifying variable and value fields
look(table3)
table4 = recast(table3, variablefield='vars', valuefield='vals')
look(table4)
# if there are multiple values for each key/variable pair, and no reducers
# function is provided, then all values will be listed
look(table6)
table7 = recast(table6, key='id')
look(table7)
# multiple values can be reduced via an aggregation function
def mean(values):
return float(sum(values)) / len(values)
table8 = recast(table6, key='id', reducers={'weight': mean})
look(table8)
# missing values are padded with whatever is provided via the missing
# keyword argument (None by default)
look(table9)
table10 = recast(table9, key='id')
look(table10)
# duplicates
table1 = [['foo', 'bar', 'baz'],
['A', 1, 2.0],
['B', 2, 3.4],
['D', 6, 9.3],
['B', 3, 7.8],
['B', 2, 12.3],
['E', None, 1.3],
['D', 4, 14.5]]
from petl import duplicates, look
look(table1)
table2 = duplicates(table1, 'foo')
look(table2)
# compound keys are supported
table3 = duplicates(table1, key=['foo', 'bar'])
look(table3)
# conflicts
table1 = [['foo', 'bar', 'baz'],
['A', 1, 2.7],
['B', 2, None],
['D', 3, 9.4],
['B', None, 7.8],
['E', None],
['D', 3, 12.3],
['A', 2, None]]
from petl import conflicts, look
look(table1)
table2 = conflicts(table1, 'foo')
look(table2)
# complement
a = [['foo', 'bar', 'baz'],
['A', 1, True],
['C', 7, False],
['B', 2, False],
['C', 9, True]]
b = [['x', 'y', 'z'],
['B', 2, False],
['A', 9, False],
['B', 3, True],
['C', 9, True]]
from petl import complement, look
look(a)
look(b)
aminusb = complement(a, b)
look(aminusb)
bminusa = complement(b, a)
look(bminusa)
# recordcomplement
a = (('foo', 'bar', 'baz'),
('A', 1, True),
('C', 7, False),
('B', 2, False),
('C', 9, True))
b = (('bar', 'foo', 'baz'),
(2, 'B', False),
(9, 'A', False),
(3, 'B', True),
(9, 'C', True))
from petl import recordcomplement, look
look(a)
look(b)
aminusb = recordcomplement(a, b)
look(aminusb)
bminusa = recordcomplement(b, a)
look(bminusa)
# diff
a = [['foo', 'bar', 'baz'],
['A', 1, True],
['C', 7, False],
['B', 2, False],
['C', 9, True]]
b = [['x', 'y', 'z'],
['B', 2, False],
['A', 9, False],
['B', 3, True],
['C', 9, True]]
from petl import diff, look
look(a)
look(b)
added, subtracted = diff(a, b)
# rows in b not in a
look(added)
# rows in a not in b
look(subtracted)
# recorddiff
a = (('foo', 'bar', 'baz'),
('A', 1, True),
('C', 7, False),
('B', 2, False),
('C', 9, True))
b = (('bar', 'foo', 'baz'),
(2, 'B', False),
(9, 'A', False),
(3, 'B', True),
(9, 'C', True))
from petl import recorddiff, look
look(a)
look(b)
added, subtracted = recorddiff(a, b)
look(added)
look(subtracted)
# capture
table1 = [['id', 'variable', 'value'],
['1', 'A1', '12'],
['2', 'A2', '15'],
['3', 'B1', '18'],
['4', 'C12', '19']]
from petl import capture, look
look(table1)
table2 = capture(table1, 'variable', '(\\w)(\\d+)', ['treat', 'time'])
look(table2)
# using the include_original argument
table3 = capture(table1, 'variable', '(\\w)(\\d+)', ['treat', 'time'], include_original=True)
look(table3)
# split
table1 = [['id', 'variable', 'value'],
['1', 'parad1', '12'],
['2', 'parad2', '15'],
['3', 'tempd1', '18'],
['4', 'tempd2', '19']]
from petl import split, look
look(table1)
table2 = split(table1, 'variable', 'd', ['variable', 'day'])
look(table2)
# select
table1 = [['foo', 'bar', 'baz'],
['a', 4, 9.3],
['a', 2, 88.2],
['b', 1, 23.3],
['c', 8, 42.0],
['d', 7, 100.9],
['c', 2]]
from petl import select, look
look(table1)
# the second positional argument can be a function accepting a record (i.e., a
# dictionary representation of a row).
table2 = select(table1, lambda rec: rec['foo'] == 'a' and rec['baz'] > 88.1)
look(table2)
# the second positional argument can also be an expression string, which
# will be converted to a function using expr()
table3 = select(table1, "{foo} == 'a' and {baz} > 88.1")
look(table3)
# the condition can also be applied to a single field
table4 = select(table1, 'foo', lambda v: v == 'a')
look(table4)
# fieldmap
table1 = [['id', 'sex', 'age', 'height', 'weight'],
[1, 'male', 16, 1.45, 62.0],
[2, 'female', 19, 1.34, 55.4],
[3, 'female', 17, 1.78, 74.4],
[4, 'male', 21, 1.33, 45.2],
[5, '-', 25, 1.65, 51.9]]
from petl import fieldmap, look
look(table1)
from collections import OrderedDict
mappings = OrderedDict()
# rename a field
mappings['subject_id'] = 'id'
# translate a field
mappings['gender'] = 'sex', {'male': 'M', 'female': 'F'}
# apply a calculation to a field
mappings['age_months'] = 'age', lambda v: v * 12
# apply a calculation to a combination of fields
mappings['bmi'] = lambda rec: rec['weight'] / rec['height']**2
# transform and inspect the output
table2 = fieldmap(table1, mappings)
look(table2)
# field mappings can also be added and/or updated after the table is created
# via the suffix notation
table3 = fieldmap(table1)
table3['subject_id'] = 'id'
table3['gender'] = 'sex', {'male': 'M', 'female': 'F'}
table3['age_months'] = 'age', lambda v: v * 12
# use an expression string this time
table3['bmi'] = '{weight} / {height}**2'
look(table3)
# facet
table1 = [['foo', 'bar', 'baz'],
['a', 4, 9.3],
['a', 2, 88.2],
['b', 1, 23.3],
['c', 8, 42.0],
['d', 7, 100.9],
['c', 2]]
from petl import facet, look
look(table1)
foo = facet(table1, 'foo')
list(foo.keys())
look(foo['a'])
look(foo['c'])
# selectre
table1 = (('foo', 'bar', 'baz'),
('aa', 4, 9.3),
('aaa', 2, 88.2),
('b', 1, 23.3),
('ccc', 8, 42.0),
('bb', 7, 100.9),
('c', 2))
from petl import selectre, look
look(table1)
table2 = selectre(table1, 'foo', '[ab]{2}')
look(table2)
# rowreduce
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4]]
from petl import rowreduce, look
look(table1)
def sumbar(key, rows):
return [key, sum(row[1] for row in rows)]
table2 = rowreduce(table1, key='foo', reducer=sumbar, fields=['foo', 'barsum'])
look(table2)
# recordreduce
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4]]
from petl import recordreduce, look
look(table1)
def sumbar(key, records):
return [key, sum([rec['bar'] for rec in records])]
table2 = recordreduce(table1, key='foo', reducer=sumbar, fields=['foo', 'barsum'])
look(table2)
# mergeduplicates
table1 = [['foo', 'bar', 'baz'],
['A', 1, 2.7],
['B', 2, None],
['D', 3, 9.4],
['B', None, 7.8],
['E', None, 42.],
['D', 3, 12.3],
['A', 2, None]]
from petl import mergeduplicates, look
look(table1)
table2 = mergeduplicates(table1, 'foo')
look(table2)
# merge
table1 = [['foo', 'bar', 'baz'],
[1, 'A', True],
[2, 'B', None],
[4, 'C', True]]
table2 = [['bar', 'baz', 'quux'],
['A', True, 42.0],
['B', False, 79.3],
['C', False, 12.4]]
from petl import look, merge
look(table1)
look(table2)
table3 = merge(table1, table2, key='bar')
look(table3)
# aggregate
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4],
['d', 3],
['d'],
['e']]
from petl import aggregate, look
look(table1)
from collections import OrderedDict
aggregators = OrderedDict()
aggregators['minbar'] = 'bar', min
aggregators['maxbar'] = 'bar', max
aggregators['sumbar'] = 'bar', sum
aggregators['listbar'] = 'bar', list
table2 = aggregate(table1, 'foo', aggregators)
look(table2)
# aggregation functions can also be added and/or updated using the suffix
# notation on the returned table object, e.g.::
table3 = aggregate(table1, 'foo')
table3['minbar'] = 'bar', min
table3['maxbar'] = 'bar', max
table3['sumbar'] = 'bar', sum
table3['listbar'] = 'bar' # default aggregation is list
look(table3)
# rangerowreduce
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4]]
from petl import rangerowreduce, look
look(table1)
def reducer(key, rows):
return [key[0], key[1], ''.join(row[0] for row in rows)]
table2 = rangerowreduce(table1, 'bar', 2, reducer=reducer, fields=['frombar', 'tobar', 'foos'])
look(table2)
# rangerecordreduce
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4]]
from petl import rangerecordreduce, look
look(table1)
def redu(minv, maxunpack, recs):
return [minv, maxunpack, ''.join([rec['foo'] for rec in recs])]
table2 = rangerecordreduce(table1, 'bar', 2, reducer=redu, fields=['frombar', 'tobar', 'foos'])
look(table2)
# rangecounts
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4],
['d', 3]]
from petl import rangecounts, look
look(table1)
table2 = rangecounts(table1, 'bar', width=2)
look(table2)
# rangeaggregate
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4],
['d', 3]]
from petl import rangeaggregate, look, strjoin
look(table1)
# aggregate whole rows
table2 = rangeaggregate(table1, 'bar', 2, len)
look(table2)
# aggregate single field
table3 = rangeaggregate(table1, 'bar', 2, list, 'foo')
look(table3)
# aggregate single field - alternative signature using keyword args
table4 = rangeaggregate(table1, key='bar', width=2, aggregation=list, value='foo')
look(table4)
# aggregate multiple fields
from collections import OrderedDict
aggregation = OrderedDict()
aggregation['foocount'] = len
aggregation['foojoin'] = 'foo', strjoin('')
aggregation['foolist'] = 'foo' # default is list
table5 = rangeaggregate(table1, 'bar', 2, aggregation)
look(table5)
# rowmap
table1 = [['id', 'sex', 'age', 'height', 'weight'],
[1, 'male', 16, 1.45, 62.0],
[2, 'female', 19, 1.34, 55.4],
[3, 'female', 17, 1.78, 74.4],
[4, 'male', 21, 1.33, 45.2],
[5, '-', 25, 1.65, 51.9]]
from petl import rowmap, look
look(table1)
def rowmapper(row):
transmf = {'male': 'M', 'female': 'F'}
return [row[0],
transmf[row[1]] if row[1] in transmf else row[1],
row[2] * 12,
row[4] / row[3] ** 2]
table2 = rowmap(table1, rowmapper, fields=['subject_id', 'gender', 'age_months', 'bmi'])
look(table2)
# recordmap
table1 = [['id', 'sex', 'age', 'height', 'weight'],
[1, 'male', 16, 1.45, 62.0],
[2, 'female', 19, 1.34, 55.4],
[3, 'female', 17, 1.78, 74.4],
[4, 'male', 21, 1.33, 45.2],
[5, '-', 25, 1.65, 51.9]]
from petl import recordmap, look
look(table1)
def recmapper(rec):
transmf = {'male': 'M', 'female': 'F'}
return [rec['id'],
transmf[rec['sex']] if rec['sex'] in transmf else rec['sex'],
rec['age'] * 12,
rec['weight'] / rec['height'] ** 2]
table2 = recordmap(table1, recmapper, fields=['subject_id', 'gender', 'age_months', 'bmi'])
look(table2)
# rowmapmany
table1 = [['id', 'sex', 'age', 'height', 'weight'],
[1, 'male', 16, 1.45, 62.0],
[2, 'female', 19, 1.34, 55.4],
[3, '-', 17, 1.78, 74.4],
[4, 'male', 21, 1.33]]
from petl import rowmapmany, look
look(table1)
def rowgenerator(row):
transmf = {'male': 'M', 'female': 'F'}
yield [row[0], 'gender', transmf[row[1]] if row[1] in transmf else row[1]]
yield [row[0], 'age_months', row[2] * 12]
yield [row[0], 'bmi', row[4] / row[3] ** 2]
table2 = rowmapmany(table1, rowgenerator, fields=['subject_id', 'variable', 'value'])
look(table2)
# recordmapmany
table1 = [['id', 'sex', 'age', 'height', 'weight'],
[1, 'male', 16, 1.45, 62.0],
[2, 'female', 19, 1.34, 55.4],
[3, '-', 17, 1.78, 74.4],
[4, 'male', 21, 1.33]]
from petl import recordmapmany, look
look(table1)
def rowgenerator(rec):
transmf = {'male': 'M', 'female': 'F'}
yield [rec['id'], 'gender', transmf[rec['sex']] if rec['sex'] in transmf else rec['sex']]
yield [rec['id'], 'age_months', rec['age'] * 12]
yield [rec['id'], 'bmi', rec['weight'] / rec['height'] ** 2]
table2 = recordmapmany(table1, rowgenerator, fields=['subject_id', 'variable', 'value'])
look(table2)
# setheader
table1 = [['foo', 'bar'],
['a', 1],
['b', 2]]
from petl import setheader, look
look(table1)
table2 = setheader(table1, ['foofoo', 'barbar'])
look(table2)
# extendheader
table1 = [['foo'],
['a', 1, True],
['b', 2, False]]
from petl import extendheader, look
look(table1)
table2 = extendheader(table1, ['bar', 'baz'])
look(table2)
# pushheader
table1 = [['a', 1],
['b', 2]]
from petl import pushheader, look
look(table1)
table2 = pushheader(table1, ['foo', 'bar'])
look(table2)
# skip
table1 = [['#aaa', 'bbb', 'ccc'],
['#mmm'],
['foo', 'bar'],
['a', 1],
['b', 2]]
from petl import skip, look
look(table1)
table2 = skip(table1, 2)
look(table2)
# skipcomments
table1 = [['##aaa', 'bbb', 'ccc'],
['##mmm',],
['#foo', 'bar'],
['##nnn', 1],
['a', 1],
['b', 2]]
from petl import skipcomments, look
look(table1)
table2 = skipcomments(table1, '##')
look(table2)
# unpack
table1 = [['foo', 'bar'],
[1, ['a', 'b']],
[2, ['c', 'd']],
[3, ['e', 'f']]]
from petl import unpack, look
look(table1)
table2 = unpack(table1, 'bar', ['baz', 'quux'])
look(table2)
# join
table1 = [['id', 'colour'],
[1, 'blue'],
[2, 'red'],
[3, 'purple']]
table2 = [['id', 'shape'],
[1, 'circle'],
[3, 'square'],
[4, 'ellipse']]
table5 = [['id', 'colour'],
[1, 'blue'],
[1, 'red'],
[2, 'purple']]
table6 = [['id', 'shape'],
[1, 'circle'],
[1, 'square'],
[2, 'ellipse']]
table8 = [['id', 'time', 'height'],
[1, 1, 12.3],
[1, 2, 34.5],
[2, 1, 56.7]]
table9 = [['id', 'time', 'weight'],
[1, 2, 4.5],
[2, 1, 6.7],
[2, 2, 8.9]]
from petl import join, look
look(table1)
look(table2)
table3 = join(table1, table2, key='id')
look(table3)
# if no key is given, a natural join is tried
table4 = join(table1, table2)
look(table4)
# note behaviour if the key is not unique in either or both tables
look(table5)
look(table6)
table7 = join(table5, table6, key='id')
look(table7)
# compound keys are supported
look(table8)
look(table9)
table10 = join(table8, table9, key=['id', 'time'])
look(table10)
# leftjoin
table1 = [['id', 'colour'],
[1, 'blue'],
[2, 'red'],
[3, 'purple']]
table2 = [['id', 'shape'],
[1, 'circle'],
[3, 'square'],
[4, 'ellipse']]
from petl import leftjoin, look
look(table1)
look(table2)
table3 = leftjoin(table1, table2, key='id')
look(table3)
# rightjoin
table1 = [['id', 'colour'],
[1, 'blue'],
[2, 'red'],
[3, 'purple']]
table2 = [['id', 'shape'],
[1, 'circle'],
[3, 'square'],
[4, 'ellipse']]
from petl import rightjoin, look
look(table1)
look(table2)
table3 = rightjoin(table1, table2, key='id')
look(table3)
# outerjoin
table1 = [['id', 'colour'],
[1, 'blue'],
[2, 'red'],
[3, 'purple']]
table2 = [['id', 'shape'],
[1, 'circle'],
[3, 'square'],
[4, 'ellipse']]
from petl import outerjoin, look
look(table1)
look(table2)
table3 = outerjoin(table1, table2, key='id')
look(table3)
# crossjoin
table1 = [['id', 'colour'],
[1, 'blue'],
[2, 'red']]
table2 = [['id', 'shape'],
[1, 'circle'],
[3, 'square']]
from petl import crossjoin, look
look(table1)
look(table2)
table3 = crossjoin(table1, table2)
look(table3)
# antijoin
table1 = [['id', 'colour'],
[0, 'black'],
[1, 'blue'],
[2, 'red'],
[4, 'yellow'],
[5, 'white']]
table2 = [['id', 'shape'],
[1, 'circle'],
[3, 'square']]
from petl import antijoin, look
look(table1)
look(table2)
table3 = antijoin(table1, table2, key='id')
look(table3)
# rangefacet
table1 = [['foo', 'bar'],
['a', 3],
['a', 7],
['b', 2],
['b', 1],
['b', 9],
['c', 4],
['d', 3]]
from petl import rangefacet, look
look(table1)
rf = rangefacet(table1, 'bar', 2)
list(rf.keys())
look(rf[(1, 3)])
look(rf[(7, 9)])
# transpose
table1 = (('id', 'colour'),
(1, 'blue'),
(2, 'red'),
(3, 'purple'),
(5, 'yellow'),
(7, 'orange'))
from petl import transpose, look
look(table1)
table2 = transpose(table1)
look(table2)
# intersection
table1 = (('foo', 'bar', 'baz'),
('A', 1, True),
('C', 7, False),
('B', 2, False),
('C', 9, True))
table2 = (('x', 'y', 'z'),
('B', 2, False),
('A', 9, False),
('B', 3, True),
('C', 9, True))
from petl import intersection, look
look(table1)
look(table2)
table3 = intersection(table1, table2)
look(table3)
# pivot
table1 = (('region', 'gender', 'style', 'units'),
('east', 'boy', 'tee', 12),
('east', 'boy', 'golf', 14),
('east', 'boy', 'fancy', 7),
('east', 'girl', 'tee', 3),
('east', 'girl', 'golf', 8),
('east', 'girl', 'fancy', 18),
('west', 'boy', 'tee', 12),
('west', 'boy', 'golf', 15),
('west', 'boy', 'fancy', 8),
('west', 'girl', 'tee', 6),
('west', 'girl', 'golf', 16),
('west', 'girl', 'fancy', 1))
from petl import pivot, look
look(table1)
table2 = pivot(table1, 'region', 'gender', 'units', sum)
look(table2)
table3 = pivot(table1, 'region', 'style', 'units', sum)
look(table3)
table4 = pivot(table1, 'gender', 'style', 'units', sum)
look(table4)
# flatten
table1 = [['foo', 'bar', 'baz'],
['A', 1, True],
['C', 7, False],
['B', 2, False],
['C', 9, True]]
from petl import flatten, look
look(table1)
list(flatten(table1))
# unflatten
table1 = [['lines',],
['A',],
[1,],
[True,],
['C',],
[7,],
[False,],
['B',],
[2,],
[False,],
['C'],
[9,]]
from petl import unflatten, look
input = ['A', 1, True, 'C', 7, False, 'B', 2, False, 'C', 9]
table = unflatten(input, 3)
look(table)
# a table and field name can also be provided as arguments
look(table1)
table2 = unflatten(table1, 'lines', 3)
look(table2)
# tocsv
table = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 2]]
from petl import tocsv, look
look(table)
tocsv(table, 'test.csv', delimiter='\t')
# look what it did
from petl import fromcsv
look(fromcsv('test.csv', delimiter='\t'))
# appendcsv
table = [['foo', 'bar'],
['d', 7],
['e', 42],
['f', 12]]
# look at an existing CSV file
from petl import look, fromcsv
testcsv = fromcsv('test.csv', delimiter='\t')
look(testcsv)
# append some data
look(table)
from petl import appendcsv
appendcsv(table, 'test.csv', delimiter='\t')
# look what it did
look(testcsv)
# topickle
table = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 2]]
from petl import topickle, look
look(table)
topickle(table, 'test.dat')
# look what it did
from petl import frompickle
look(frompickle('test.dat'))
# appendpickle
table = [['foo', 'bar'],
['d', 7],
['e', 42],
['f', 12]]
from petl import look, frompickle
# inspect an existing pickle file
testdat = frompickle('test.dat')
look(testdat)
# append some data
from petl import appendpickle
look(table)
appendpickle(table, 'test.dat')
# look what it did
look(testdat)
# tosqlite3
table = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 2]]
from petl import tosqlite3, look
look(table)
# by default, if the table does not already exist, it will be created
tosqlite3(table, 'test.db', 'foobar')
# look what it did
from petl import fromsqlite3
look(fromsqlite3('test.db', 'select * from foobar'))
# appendsqlite3
moredata = [['foo', 'bar'],
['d', 7],
['e', 9],
['f', 1]]
from petl import appendsqlite3, look
look(moredata)
appendsqlite3(moredata, 'test.db', 'foobar')
# look what it did
from petl import look, fromsqlite3
look(fromsqlite3('test.db', 'select * from foobar'))
# tojson
table = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 2]]
from petl import tojson, look
look(table)
tojson(table, 'example.json')
# check what it did
with open('example.json') as f:
print(f.read())
# tojsonarrays
table = [['foo', 'bar'],
['a', 1],
['b', 2],
['c', 2]]
from petl import tojsonarrays, look
look(table)
tojsonarrays(table, 'example.json')
# check what it did
with open('example.json') as f:
print(f.read())
# mergesort
table1 = (('foo', 'bar'),
('A', 9),
('C', 2),
('D', 10),
('A', 6),
('F', 1))
table2 = (('foo', 'bar'),
('B', 3),
('D', 10),
('A', 10),
('F', 4))
from petl import mergesort, look
look(table1)
look(table2)
table3 = mergesort(table1, table2, key='foo')
look(table3)
# mergesort - heterogeneous tables
table4 = (('foo', 'bar'),
('A', 9),
('C', 2),
('D', 10),
('A', 6),
('F', 1))
table5 = (('foo', 'baz'),
('B', 3),
('D', 10),
('A', 10),
('F', 4))
from petl import mergesort, look
table6 = mergesort(table4, table5, key='foo')
look(table6)
# mergesort - heterogeneous tables, reverse sorting
table1 = (('foo', 'bar'),
('A', 9),
('C', 2),
('D', 10),
('A', 6),
('F', 1))
table2 = (('foo', 'baz'),
('B', 3),
('D', 10),
('A', 10),
('F', 4))
from petl import mergesort, sort, cat, look
expect = sort(cat(table1, table2), key='foo', reverse=True)
look(expect)
actual = mergesort(table1, table2, key='foo', reverse=True)
look(actual)
actual = mergesort(sort(table1, key='foo'), reverse=True, sort(table2, key='foo', reverse=True), key='foo', reverse=True, presorted=True)
look(actual)
# annex
table1 = (('foo', 'bar'),
('A', 9),
('C', 2),
('F', 1))
table2 = (('foo', 'baz'),
('B', 3),
('D', 10))
from petl import annex, look
look(table1)
look(table2)
table3 = annex(table1, table2)
look(table3)
# progress
from petl import dummytable, progress, tocsv
d = dummytable(100500)
p = progress(d, 10000)
tocsv(p, 'output.csv')
# clock
from petl import dummytable, clock, convert, progress, tocsv
t1 = dummytable(100000)
c1 = clock(t1)
t2 = convert(c1, 'foo', lambda v: v**2)
c2 = clock(t2)
p = progress(c2, 10000)
tocsv(p, 'dummy.csv')
# time consumed retrieving rows from t1
c1.time
# time consumed retrieving rows from t2
c2.time
# actual time consumed by the convert step
c2.time - c1.time
# unpackdict
table1 = (('foo', 'bar'),
(1, {'baz': 'a', 'quux': 'b'}),
(2, {'baz': 'c', 'quux': 'd'}),
(3, {'baz': 'e', 'quux': 'f'}))
from petl import unpackdict, look
look(table1)
table2 = unpackdict(table1, 'bar')
look(table2)
# unique
table1 = (('foo', 'bar', 'baz'),
('A', 1, 2),
('B', '2', '3.4'),
('D', 'xyz', 9.0),
('B', '3', '7.8'),
('B', '2', 42),
('E', None, None),
('D', 4, 12.3),
('F', 7, 2.3))
from petl import unique, look
look(table1)
table2 = unique(table1, 'foo')
look(table2)
# isordered
table = (('foo', 'bar', 'baz'),
('a', 1, True),
('b', 3, True),
('b', 2))
from petl import isordered, look
look(table)
isordered(table, key='foo')
isordered(table, key='foo', strict=True)
isordered(table, key='foo', reverse=True)
# rowgroupby
table = (('foo', 'bar', 'baz'),
('a', 1, True),
('b', 3, True),
('b', 2))
from petl import rowgroupby, look
look(table)
# group entire rows
for key, group in rowgroupby(table, 'foo'):
print(key, list(group))
# group specific values
for key, group in rowgroupby(table, 'foo', 'bar'):
print(key, list(group))
# fold
table1 = (('id', 'count'), (1, 3), (1, 5), (2, 4), (2, 8))
from petl import fold, look
look(table1)
import operator
table2 = fold(table1, 'id', operator.add, 'count', presorted=True)
look(table2)
# aggregate
table1 = (('foo', 'bar', 'baz'),
('a', 3, True),
('a', 7, False),
('b', 2, True),
('b', 2, False),
('b', 9, False),
('c', 4, True))
from petl import aggregate, look
look(table1)
# aggregate whole rows
table2 = aggregate(table1, 'foo', len)
look(table2)
# aggregate single field
table3 = aggregate(table1, 'foo', sum, 'bar')
look(table3)
# alternative signature for single field aggregation
table4 = aggregate(table1, key=('foo', 'bar'), aggregation=list, value=('bar', 'baz'))
look(table4)
# aggregate multiple fields
from collections import OrderedDict
from petl import strjoin
aggregation = OrderedDict()
aggregation['count'] = len
aggregation['minbar'] = 'bar', min
aggregation['maxbar'] = 'bar', max
aggregation['sumbar'] = 'bar', sum
aggregation['listbar'] = 'bar' # default aggregation function is list
aggregation['bars'] = 'bar', strjoin(', ')
table5 = aggregate(table1, 'foo', aggregation)
look(table5)
# can also use list or tuple to specify multiple field aggregation
aggregation = [('count', len),
('minbar', 'bar', min),
('maxbar', 'bar', max),
('sumbar', 'bar', sum),
('listbar', 'bar'), # default aggregation function is list
('bars', 'bar', strjoin(', '))]
table6 = aggregate(table1, 'foo', aggregation)
look(table6)
# can also use suffix notation
table7 = aggregate(table1, 'foo')
table7['count'] = len
table7['minbar'] = 'bar', min
table7['maxbar'] = 'bar', max
table7['sumbar'] = 'bar', sum
table7['listbar'] = 'bar' # default aggregation function is list
table7['bars'] = 'bar', strjoin(', ')
look(table7)
# addrownumbers
table1 = (('foo', 'bar'),
('A', 9),
('C', 2),
('F', 1))
from petl import addrownumbers, look
look(table1)
table2 = addrownumbers(table1)
look(table2)
# nthword
from petl import nthword
s = 'foo bar'
f = nthword(0)
f(s)
g = nthword(1)
g(s)
# search
table1 = (('foo', 'bar', 'baz'),
('orange', 12, 'oranges are nice fruit'),
('mango', 42, 'I like them'),
('banana', 74, 'lovely too'),
('cucumber', 41, 'better than mango'))
from petl import search, look
look(table1)
# search any field
table2 = search(table1, '.g.')
look(table2)
# search a specific field
table3 = search(table1, 'foo', '.g.')
look(table3)
# addcolumn
table1 = (('foo', 'bar'),
('A', 1),
('B', 2))
from petl import addcolumn, look
look(table1)
col = [True, False]
table2 = addcolumn(table1, col, 'baz')
look(table2)
# lookupjoin
table1 = (('id', 'color', 'cost'),
(1, 'blue', 12),
(2, 'red', 8),
(3, 'purple', 4))
table2 = (('id', 'shape', 'size'),
(1, 'circle', 'big'),
(1, 'circle', 'small'),
(2, 'square', 'tiny'),
(2, 'square', 'big'),
(3, 'ellipse', 'small'),
(3, 'ellipse', 'tiny'))
from petl import lookupjoin, look
look(table1)
look(table2)
table3 = lookupjoin(table1, table2, key='id')
look(table3)
# filldown
table1 = (('foo', 'bar', 'baz'),
(1, 'a', None),
(1, None, .23),
(1, 'b', None),
(2, None, None),
(2, None, .56),
(2, 'c', None),
(None, 'c', .72))
from petl import filldown, look
look(table1)
table2 = filldown(table1)
look(table2)
table3 = filldown(table1, 'bar')
look(table3)
table4 = filldown(table1, 'bar', 'baz')
look(table4)
# fillright
table1 = (('foo', 'bar', 'baz'),
(1, 'a', None),
(1, None, .23),
(1, 'b', None),
(2, None, None),
(2, None, .56),
(2, 'c', None),
(None, 'c', .72))
from petl import fillright, look
look(table1)
table2 = fillright(table1)
look(table2)
# fillleft
table1 = (('foo', 'bar', 'baz'),
(1, 'a', None),
(1, None, .23),
(1, 'b', None),
(2, None, None),
(None, None, .56),
(2, 'c', None),
(None, 'c', .72))
from petl import fillleft, look
look(table1)
table2 = fillleft(table1)
look(table2)
# multirangeaggregate
table1 = (('x', 'y', 'z'),
(1, 3, 9),
(2, 3, 12),
(4, 2, 17),
(2, 7, 3),
(1, 6, 1))
from petl import look, multirangeaggregate
look(table1)
table2 = multirangeaggregate(table1, keys=('x', 'y'), widths=(2, 2), aggregation=sum, mins=(0, 0), maxs=(4, 4), value='z')
look(table2)
# unjoin
table1 = (('foo', 'bar', 'baz'),
('A', 1, 'apple'),
('B', 1, 'apple'),
('C', 2, 'orange'))
table4 = (('foo', 'bar'),
('A', 'apple'),
('B', 'apple'),
('C', 'orange'))
from petl import look, unjoin
look(table1)
table2, table3 = unjoin(table1, 'baz', key='bar')
look(table2)
look(table3)
look(table4)
table5, table6 = unjoin(table4, 'bar')
look(table5)
look(table6)
| obsoleter/petl | docs/examples.py | Python | mit | 39,606 |
from enum import Enum
class MenuKey(Enum):
buy_beverage = 1
open_fridge = 2
manage_beverages = 3
quit = 101
add = 102
delete = 103
class MenuEntry:
def __init__(self, key, title, item = None):
self.key = key
self.title = title
self.item = item
| omgwtflaserguns/matomatpy | matomat/models/menu.py | Python | mit | 303 |
"""
Django settings for arda_db project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wngfh!vr%wi8o5q7fej7ehl)lpjqtxe(+3_!f#@1r0ubve-z)n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# all the necessary production values are set at the end of the file
# Application definition
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'dev_static')]
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'browser',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
ROOT_URLCONF = 'arda_db.urls'
WSGI_APPLICATION = 'arda_db.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
if DEBUG == False:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'arda_db_deployed.sqlite3'),
'USER': 'arda-admin',
'PASSWORD': 'ICannotRememberTheLastTimeI8',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'arda_db.sqlite3'),
#~ 'USER': 'arda-admin',
#~ 'PASSWORD': 'ICannotRememberTheLastTimeI8',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Anchorage'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
if DEBUG == True:
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_SUBJECT_PREFIX = "[ARDA]"
ADMINS = (('ross', 'rwspicer@alaska.edu'),)
| rwspicer/ARDA | arda_db/arda_db/settings.py | Python | mit | 2,996 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = ""
cfg.versionfile_source = "oceans/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix),
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| pyoceans/python-oceans | oceans/_version.py | Python | bsd-3-clause | 18,426 |
# Bzrflag
# Copyright 2008-2011 Brigham Young University
#
# This file is part of Bzrflag.
#
# Bzrflag is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Bzrflag is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Bzrflag. If not, see <http://www.gnu.org/licenses/>.
#
# Inquiries regarding any further use of Bzrflag, please contact the Copyright
# Licensing Office, Brigham Young University, 3760 HBLL, Provo, UT 84602,
# (801) 422-9339 or 422-3821, e-mail copyright@byu.edu.
"""Bzrflag game server.
The Server object listens on a port for incoming connections. When a client
connects, the Server dispatches its connection to a new Handler.
"""
__author__ = "BYU AML Lab <kseppi@byu.edu>"
__copyright__ = "Copyright 2008-2011 Brigham Young University"
__license__ = "GNU GPL"
import sys
import asynchat
import asyncore
import math
import socket
import time
import random
import logging
import constants
logger = logging.getLogger('server')
class Server(asyncore.dispatcher):
"""Server that listens on the BZRC port and dispatches connections.
Each team has its own server which dispatches sessions to the Handler.
Only one connection is allowed at a time. Any subsequent connections will
be rejected until the active connection closes.
"""
def __init__(self, addr, team, game, config, sock=None, asyncore_map=None):
self.config = config
self.team = team
self.game = game
self.in_use = False
if sock is None:
sock = socket.socket()
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.asyncore_map = asyncore_map
asyncore.dispatcher.__init__(self, sock, self.asyncore_map)
self.sock = sock
# Disable Nagle's algorithm because this is a latency-sensitive
# low-bandwidth application.
self.bind(addr)
self.listen(constants.BACKLOG)
def handle_accept(self):
sock, addr = self.accept()
if self.in_use:
sock.close()
else:
self.in_use = True
Handler(sock, self.team, self.game, self.handle_closed_handler,
self.config, self.asyncore_map)
self.sock = sock
def get_port(self):
return self.socket.getsockname()[1]
def handle_closed_handler(self):
self.in_use = False
def __del__(self):
if self.sock:
self.sock.close()
class Handler(asynchat.async_chat):
"""Handler which implements the BZRC protocol with one client.
Methods whose names start with "bzrc_" are automagically interpreted as
bzrc commands. To create the command "xyz", just create a method called
"bzrc_xyz", and the Handler will automatically call it when the client
sends an "xyz" request. You don't have to add it to a table or anything.
"""
def __init__(self, sock, team, game, closed_callback, config, asyncore_map):
asynchat.async_chat.__init__(self, sock, asyncore_map)
self.config = config
self.team = team
self.game = game
self.closed_callback = closed_callback
self.set_terminator('\n')
self.input_buffer = ''
self.push('bzrobots 1\n')
self.init_timestamp = time.time()
self.established = False
def handle_close(self):
self.close()
def handle_error(self):
sys.excepthook(*sys.exc_info())
def collect_incoming_data(self, chunk):
if self.input_buffer:
self.input_buffer += chunk
else:
self.input_buffer = chunk
def push(self, text):
asynchat.async_chat.push(self, text)
if self.config['telnet_console']:
message = (self.team.color +' > ' + text)
self.game.game_loop.write_message(message)
logger.debug(self.team.color + ' > ' + text)
if text.startswith('fail '):
logger.error(self.team.color + ' > ' + text)
def found_terminator(self):
"""Called when Asynchat finds an end-of-line.
Note that Asynchat ensures that our input buffer contains everything
up to but not including the newline character.
"""
if self.config['telnet_console']:
message = (self.team.color + ' : ' + self.input_buffer + '\n')
self.game.game_loop.display.console.write(message)
logger.debug(self.team.color + ' : ' + self.input_buffer + '\n')
args = self.input_buffer.split()
self.input_buffer = ''
if args:
if self.established:
try:
command = getattr(self, 'bzrc_%s' % args[0])
except AttributeError:
self.push('fail invalid command\n')
return
try:
command(args)
except Exception, e:
color = self.team.color
logger.error(color + ' : ERROR : %s : %s\n' % (args, e))
message = (color +' : ERROR : %s : %s : %s\n' %
(args, e.__class__.__name__, e))
self.game.write_msg(message)
self.push('fail %s\n' % e)
import traceback
traceback.print_exc(file=sys.stdout)
return
elif args == ['agent', '1']:
self.established = True
else:
self.bad_handshake()
def bad_handshake(self):
"""Called when the client gives an invalid handshake message."""
self.push('fail Unrecognized handshake\n')
self.close()
def close(self):
self.closed_callback()
asynchat.async_chat.close(self)
def invalid_args(self, args):
self.ack(*args)
self.push('fail Invalid parameter(s)\n')
def ack(self, *args):
timestamp = time.time() - self.init_timestamp
arg_string = ' '.join(str(arg) for arg in args)
self.push('ack %s %s\n' % (timestamp, arg_string))
def bzrc_taunt(self, args):
# intentionally undocumented
try:
command = args[0]
msg = args[1:]
#if not len(msg) or msg[0] != 'please' or msg[-1] != 'thanks':
#raise ValueError
except ValueError, IndexError:
self.push('fail invalid command\n')
return
self.ack(*args)
taunt_msg = ' '.join(msg[:])
if self.team.taunt(taunt_msg):
self.push('ok\n')
else:
self.push('fail\n')
def bzrc_help(self, args):
"""help [command]
If no command is given, list the commands. Otherwise, return specific
help for a command.
"""
if len(args)==1:
help_lines = []
for name in dir(self):
if name.startswith('bzrc_'):
attr = getattr(self, name)
if attr.__doc__:
doc = ':%s\n' % attr.__doc__.split('\n')[0]
help_lines.append(doc)
self.push(''.join(help_lines))
else:
name = args[1]
func = getattr(self, 'bzrc_' + name, None)
if func:
doc = ':%s\n' % func.__doc__.strip()
self.push(doc)
else:
self.push('fail invalid command "%s"\n' % name)
def bzrc_shoot(self, args):
"""shoot [tankid]
Request the tank indexed by the given parameter to fire a shot.
Returns either:
ok [comment]
or:
fail [comment]
where the comment is optional.
"""
try:
command, tankid = args
tankid = int(tankid)
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command, tankid)
result = self.team.shoot(tankid)
if result:
self.push('ok\n')
else:
self.push('fail\n')
def bzrc_speed(self, args):
"""speed [tankid] [speed]
Request the tank to accelerate as quickly as possible to the
specified speed.
The speed is given as a multiple of maximum possible speed (1 is full
speed). A negative parameter will cause the tank to go in reverse.
Returns a boolean ("ok" or "fail" as described under shoot).
Mock objects needed?
>>> args = ['speed', '1', '1']
>>> Handler.bzrc_speed(Handler(), args)
fail
"""
try:
command, tankid, value = args
tankid = int(tankid)
value = float(value)
except ValueError, TypeError:
self.invalid_args(args)
self.push('fail\n')
return
self.ack(command, tankid, value)
self.team.speed(tankid, value)
self.push('ok\n')
def bzrc_angvel(self, args):
"""angvel [tankid] [angular_velocity]
Sets the angular velocity of the tank.
The parameter is given as a multiple of maximum possible angular
velocity (1 is full speed), where positive values indicate counter-
clockwise motion, and negative values indicate clockwise motion. The
sign is consistent with the convention use in angles in the circle.
Returns a boolean ("ok" or "fail" as described under shoot).
"""
try:
command, tankid, value = args
tankid = int(tankid)
value = float(value)
except ValueError, TypeError:
self.invalid_args(args)
self.push('fail\n')
return
self.ack(command, tankid, value)
self.team.angvel(tankid, value)
self.push('ok\n')
def bzrc_teams(self, args):
"""teams
Request a list of teams.
The response will be a list, whose elements are of the form:
team [color] [playercount]
Color is the identifying team color/team name. Playercount is the
number of tanks on the team.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
response = ['begin\n']
for color,team in self.game.teams.items():
response.append('team %s %d\n' % (color, len(team.tanks)))
response.append('end\n')
self.push(''.join(response))
def bzrc_obstacles(self, args):
"""obstacles
Request a list of obstacles.
The response is a list, whose elements are of the form:
obstacle [x1] [y1] [x2] [y2] ...
where (x1, y1), (x2, y2), etc. are the corners of the obstacle in
counter-clockwise order.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
if self.config['no_report_obstacles']:
self.push('fail\n')
return
response = ['begin\n']
for obstacle in self.game.obstacles:
response.append('obstacle')
for x, y in obstacle.shape:
x = random.gauss(x, self.team.posnoise)
y = random.gauss(y, self.team.posnoise)
response.append(' %s %s' % (x, y))
response.append('\n')
response.append('end\n')
self.push(''.join(response))
def bzrc_occgrid(self, args):
"""occgrid [tankid]
Request an occupancy grid.
Looks like:
100,430|20,20|####
#### = encoded 01 string
"""
try:
command, tankid = args
tank = self.team.tank(int(tankid))
except ValueError, TypeError:
self.invalid_args(args)
return
if self.game.occgrid is None:
raise Exception('occgrid not currently compatible with rotated '
'obstacles')
if tank.status == constants.TANKDEAD:
self.push('fail\n')
return
self.ack(command)
offset_x = int(self.config.world.width/2)
offset_y = int(self.config.world.height/2)
width = self.config['occgrid_width']
world_spos = [int(tank.pos[0]-width/2), int(tank.pos[1]-width/2)]
world_spos[0] = max(-offset_x, world_spos[0])
world_spos[1] = max(-offset_y, world_spos[1])
spos = [int(tank.pos[0]+offset_x-width/2),
int(tank.pos[1]+offset_y-width/2)]
epos = [spos[0]+width, spos[1]+width]
spos[0] = max(0, spos[0])
spos[1] = max(0, spos[1])
epos[0] = min(self.config.world.width, epos[0])
epos[1] = min(self.config.world.height, epos[1])
width = epos[0]-spos[0]
height = epos[1]-spos[1]
true_grid = self.game.occgrid[spos[0]:epos[0]]
for i in range(len(true_grid)):
true_grid[i] = true_grid[i][spos[1]:epos[1]]
true_positive = self.config['%s_true_positive' % self.team.color]
if true_positive is None:
true_positive = self.config['default_true_positive']
true_negative = self.config['%s_true_negative' % self.team.color]
if true_negative is None:
true_negative = self.config['default_true_negative']
randomized_grid = [[0 for i in range(height)] for j in range(width)]
r_array = [[random.uniform(0,1) for i in range(height)]
for j in range(width)]
for x in xrange(width):
for y in xrange(height):
occ = true_grid[x][y]
r = r_array[x][y]
if int(occ):
randomized_grid[x][y] = int(r < true_positive)
else:
randomized_grid[x][y] = int(r > true_negative)
response = ['begin\n']
response.append('at %d,%d\n' % tuple(world_spos))
response.append('size %dx%d\n' % (width, height))
for row in randomized_grid:
response.append(''.join([str(int(col)) for col in row]))
response.append('\n')
response.append('end\n')
self.push(''.join(response))
def bzrc_bases(self, args):
"""bases
Request a list of bases.
The response is a list, whose elements are of the form:
base [team color] [x1] [y1] [x2] [y2] ...
where (x1, y1), (x2, y2), etc. are the corners of the base in counter-
clockwise order and team color is the name of the owning team.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
response = ['begin\n']
for color,base in self.game.bases.items():
response.append('base %s' % color)
for point in base.shape:
response.append(' %s %s' % tuple(point))
response.append('\n')
response.append('end\n')
self.push(''.join(response))
def bzrc_flags(self, args):
"""flags
Request a list of visible flags.
The response is a list of flag elements:
flag [team color] [possessing team color] [x] [y]
The team color is the color of the owning team, and the possessing
team color is the color of the team holding the flag. If no tanks are
carrying the flag, the possessing team is "none". The coordinate
(x, y) is the current position of the flag. Note that the list may be
incomplete if visibility is limited.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
response = ['begin\n']
for color,team in self.game.teams.items():
possess = "none"
flag = team.flag
if flag.tank is not None:
possess = flag.tank.team.color
x,y = flag.pos
x = random.gauss(x,self.team.posnoise)
y = random.gauss(y,self.team.posnoise)
response.append('flag %s %s %s %s\n' % (color, possess, x, y))
response.append('end\n')
self.push(''.join(response))
def bzrc_shots(self, args):
"""shots
Reports a list of shots.
The response is a list of shot lines:
shot [x] [y] [vx] [vy]
where (c, y) is the current position of the shot and (vx, vy) is the
current velocity.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
response = ['begin\n']
for shot in self.game.shots():
x, y = shot.pos
vx, vy = shot.vel
response.append('shot %s %s %s %s\n' % (x, y, vx, vy))
response.append('end\n')
self.push(''.join(response))
def bzrc_mytanks(self, args):
"""mytanks
Request the status of the tanks controlled by this connection.
The response is a list of tanks:
mytank [index] [callsign] [status] [shots available]
[time to reload] [flag] [x] [y] [angle] [vx] [vy] [angvel]
Index is the 0 based index identifying this tank. This index is used
for instructions. The callsign is the tank's unique identifier within
the game. The status is a string like "alive," "dead," etc. Shots
available is the number of shots remaining before a reload delay. Flag
is the color/name of the flag being held, or "-" if none is held. The
coordinate (x, y) is the current position. Angle is the direction the
tank is pointed, between negative pi and pi. The vector (vx, vy) is
the current velocity of the tank, and angvel is the current angular
velocity of the tank (in radians per second).
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
response = ['begin\n']
entry_template = ('mytank %(id)s %(callsign)s %(status)s'
' %(shots_avail)s %(reload)s %(flag)s\
%(x)s %(y)s %(angle)s'
' %(vx)s %(vy)s %(angvel)s\n')
for i, tank in enumerate(self.team.tanks):
data = {}
data['id'] = i
data['callsign'] = tank.callsign
data['status'] = tank.status
data['shots_avail'] = constants.MAXSHOTS-len(tank.shots)
data['reload'] = tank.reloadtimer
data['flag'] = tank.flag and tank.flag.team.color or '-'
data['x'] = int(tank.pos[0])
data['y'] = int(tank.pos[1])
data['angle'] = self.normalize_angle(tank.rot)
data['vx'],data['vy'] = tank.velocity()
data['angvel'] = tank.angvel
response.append(entry_template % data)
response.append('end\n')
self.push(''.join(response))
def bzrc_othertanks(self, args):
"""othertanks
Request the status of other tanks in the game (those not
controlled by this connection.
The response is a list of tanks:
othertank [callsign] [color] [status] [flag] [x] [y] [angle]
where callsign, status, flag, x, y, and angle are as described under
mytanks and color is the name of the team color.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
response = ['begin\n']
entry_template = ('othertank %(callsign)s %(color)s %(status)s'
' %(flag)s %(x)s %(y)s %(angle)s\n')
for color,team in self.game.teams.items():
if team == self.team:
continue
for tank in team.tanks:
data = {}
data['color'] = color
data['callsign'] = tank.callsign
data['status'] = tank.status
data['shots_avail'] = constants.MAXSHOTS-len(tank.shots)
data['reload'] = tank.reloadtimer
data['flag'] = tank.flag and tank.flag.team.color or '-'
x, y = tank.pos
data['x'] = random.gauss(x, self.team.posnoise)
data['y'] = random.gauss(y, self.team.posnoise)
angle = random.gauss(tank.rot, self.team.angnoise)
data['angle'] = self.normalize_angle(angle)
vx,vy = tank.velocity()
data['vx'] = random.gauss(vx, self.team.velnoise)
data['vy'] = random.gauss(vy, self.team.velnoise)
data['angvel'] = tank.angvel
response.append(entry_template % data)
response.append('end\n')
self.push(''.join(response))
def bzrc_constants(self, args):
"""constants
Request a list of constants.
These constants define the rules of the game and the behavior of the
world. The response is a list:
constant [name] [value]
Name is a string. Value may be a number or a string. Boolean values
are 0 or 1.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
true_positive = self.config['%s_true_positive' % self.team.color]
if true_positive is None:
true_positive = self.config['default_true_positive']
true_negative = self.config['%s_true_negative' % self.team.color]
if true_negative is None:
true_negative = self.config['default_true_negative']
self.ack(command)
# TODO: is it possible to simply iterate through all constants without
# specifically referencing each one?
response = ['begin\n',
'constant team %s\n' % (self.team.color),
'constant worldsize %s\n' % (self.config['world_size']),
'constant tankangvel %s\n' % (constants.TANKANGVEL),
'constant tanklength %s\n' % (constants.TANKLENGTH),
'constant tankradius %s\n' % (constants.TANKRADIUS),
'constant tankspeed %s\n' % (constants.TANKSPEED),
'constant tankalive %s\n' % (constants.TANKALIVE),
'constant tankdead %s\n' % (constants.TANKDEAD),
'constant linearaccel %s\n' % (constants.LINEARACCEL),
'constant angularaccel %s\n' % (constants.ANGULARACCEL),
'constant tankwidth %s\n' % (constants.TANKWIDTH),
'constant shotradius %s\n' % (constants.SHOTRADIUS),
'constant shotrange %s\n' % (constants.SHOTRANGE),
'constant shotspeed %s\n' % (constants.SHOTSPEED),
'constant flagradius %s\n' % (constants.FLAGRADIUS),
'constant explodetime %s\n' % (constants.EXPLODETIME),
'constant truepositive %s\n' % (true_positive),
'constant truenegative %s\n' % (true_negative),
'end\n']
self.push(''.join(response))
def bzrc_scores(self, args):
"""scores
Request the scores of all teams. The response is a list of scores,
one for each team pair:
score [team_i] [team_j] [score]
Notice that a team generates no score when compared against itself.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
response = ['begin\n']
for team1 in self.game.teams:
for team2 in self.game.teams:
if team1 != team2:
t1_score = self.game.teams[team1].score.total()
t2_score = self.game.teams[team2].score.total()
score = round(t1_score - t2_score, 2)
response.append('score %s %s %s' % (team1, team2, score))
response.append('\n')
response.append('end\n')
self.push(''.join(response))
def bzrc_timer(self, args):
"""timer
Requests how much time has passed and what time limit exists.
timer [time elapsed] [time limit]
Time elapsed is the number of seconds that the server has been alive,
while time limit is the given limit. Once the limit is reached, the
server will stop updating the game.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
timespent = self.game.timespent
timelimit = self.game.timelimit
self.push('timer %s %s\n' % (timespent, timelimit))
def bzrc_quit(self, args):
"""quit
Disconnects the session.
This is technically an extension to the BZRC protocol. We should
really backport this to BZFlag.
"""
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
self.push('ok\n')
self.close()
def bzrc_endgame(self, args):
## purposely undocumented
try:
command, = args
except ValueError, TypeError:
self.invalid_args(args)
return
self.ack(command)
self.push('ok\n')
sys.exit(0)
@staticmethod
def normalize_angle(angle):
"""Normalize angles to be in the interval (-pi, pi].
The protocol specification guarantees that angles are in this range,
so all angles should be passed through this method before being sent
across the wire.
"""
angle %= 2 * math.pi
if angle > math.pi:
angle -= math.pi*2
return angle
if __name__ == "__main__":
import doctest
doctest.testmod()
# vim: et sw=4 sts=4
| sm-github/bzrflag | bzrflag/server.py | Python | gpl-3.0 | 26,751 |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from .views import (
TicketDeleteView,
TicketEditView,
)
urlpatterns = [
url(r'^(?P<ticket_id>[0-9]+)/$',
login_required(TicketEditView.as_view()), name='ticket_edit'),
url(r'^(?P<ticket_id>[0-9]+)/delete/$',
login_required(TicketDeleteView.as_view()), name='ticket_delete'),
]
| prontotools/zendesk-tickets-machine | zendesk_tickets_machine/tickets/urls.py | Python | mit | 404 |
from .base import BaseTestCase
class BoundaryTestCase(BaseTestCase):
abbr = 'ma'
url_tmpl = '/api/v1/districts/boundary/{boundary_id}/'
url_args = dict(
abbr=abbr,
boundary_id="ocd-division/country:us/state:oh/sldl:3"
)
def test_boundary(self):
expected_keys = set([
u'name', u'region', u'chamber', u'shape', u'division_id',
u'abbr', u'boundary_id', u'num_seats', u'id', u'bbox'])
self.assertEquals(set(self.json), expected_keys)
| mileswwatkins/billy | billy/web/api/tests/test_boundaries.py | Python | bsd-3-clause | 510 |
"""Filter an aggregated coverage file, keeping only the specified targets."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from .... import types as t
from . import (
CoverageAnalyzeTargetsConfig,
expand_indexes,
generate_indexes,
make_report,
read_report,
write_report,
)
if t.TYPE_CHECKING:
from . import (
NamedPoints,
TargetIndexes,
)
def command_coverage_analyze_targets_filter(args): # type: (CoverageAnalyzeTargetsFilterConfig) -> None
"""Filter target names in an aggregated coverage file."""
covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
filtered_path_arcs = expand_indexes(covered_path_arcs, covered_targets, lambda v: v)
filtered_path_lines = expand_indexes(covered_path_lines, covered_targets, lambda v: v)
include_targets = set(args.include_targets) if args.include_targets else None
exclude_targets = set(args.exclude_targets) if args.exclude_targets else None
include_path = re.compile(args.include_path) if args.include_path else None
exclude_path = re.compile(args.exclude_path) if args.exclude_path else None
def path_filter_func(path):
if include_path and not re.search(include_path, path):
return False
if exclude_path and re.search(exclude_path, path):
return False
return True
def target_filter_func(targets):
if include_targets:
targets &= include_targets
if exclude_targets:
targets -= exclude_targets
return targets
filtered_path_arcs = filter_data(filtered_path_arcs, path_filter_func, target_filter_func)
filtered_path_lines = filter_data(filtered_path_lines, path_filter_func, target_filter_func)
target_indexes = {} # type: TargetIndexes
indexed_path_arcs = generate_indexes(target_indexes, filtered_path_arcs)
indexed_path_lines = generate_indexes(target_indexes, filtered_path_lines)
report = make_report(target_indexes, indexed_path_arcs, indexed_path_lines)
write_report(args, report, args.output_file)
def filter_data(
data, # type: NamedPoints
path_filter_func, # type: t.Callable[[str], bool]
target_filter_func, # type: t.Callable[[t.Set[str]], t.Set[str]]
): # type: (...) -> NamedPoints
"""Filter the data set using the specified filter function."""
result = {} # type: NamedPoints
for src_path, src_points in data.items():
if not path_filter_func(src_path):
continue
dst_points = {}
for src_point, src_targets in src_points.items():
dst_targets = target_filter_func(src_targets)
if dst_targets:
dst_points[src_point] = dst_targets
if dst_points:
result[src_path] = dst_points
return result
class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets filter` command."""
def __init__(self, args): # type: (t.Any) -> None
super(CoverageAnalyzeTargetsFilterConfig, self).__init__(args)
self.input_file = args.input_file # type: str
self.output_file = args.output_file # type: str
self.include_targets = args.include_targets # type: t.List[str]
self.exclude_targets = args.exclude_targets # type: t.List[str]
self.include_path = args.include_path # type: t.Optional[str]
self.exclude_path = args.exclude_path # type: t.Optional[str]
| emonty/ansible | test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py | Python | gpl-3.0 | 3,581 |
"""\
Neurospin functions and classes for nipy.
(c) Copyright CEA-INRIA-INSERM, 2003-2009.
Distributed under the terms of the BSD License.
http://www.lnao.fr
functions for fMRI
This module contains several objects and functions for fMRI processing.
"""
from numpy.testing import Tester
import image_registration
import statistical_mapping
"""
import bindings
import glm
import register
import utils
"""
test = Tester().test
bench = Tester().bench
| yarikoptic/NiPy-OLD | nipy/neurospin/__init__.py | Python | bsd-3-clause | 459 |
import sys
from ctypes import *
lib = cdll.LoadLibrary('./lib/libproxy_genesis.so')
class genesis(object):
def __init__(self):
self.obj = lib.proxy_genesis_new()
def printf(self, format, *args):
sys.stdout.write(format % args)
def details(self):
lib.proxy_genesis_details(self.obj)
def compute_geodist(self, lat1, lon1, lat2, lon2, unit):
lib.proxy_genesis_compute_geodist(self.obj, c_double(lat1), c_double(lon1),
c_double(lat2), c_double(lon2), c_char_p(unit))
def print_geodist(self):
d_val = c_double(lib.proxy_genesis_print_geodist(self.obj))
self.printf("distance = %f\n", d_val.value)
def set_value(self, s1):
lib.proxy_genesis_set_value(self.obj, c_char_p(s1))
def print_value(self):
c_str = c_char_p(lib.proxy_genesis_print_value(self.obj))
print c_str.value
def add_usage(self, desc):
lib.proxy_genesis_add_usage(self.obj, desc)
def print_usage(self):
lib.proxy_genesis_print_usage(self.obj)
#
# Main
#
g = genesis()
# Show details build.
g.details()
# Compute geometry distance.
g.compute_geodist(-23.32, -46.38, -22.54, -43.12, "K")
g.print_geodist()
# Test message.
g.set_value("TEST")
g.print_value()
# Add usage command line.
g.add_usage( "Description:" );
g.add_usage( " This application xxx kkk oppop popopo klklkl..\n" );
g.add_usage( "Usage: " );
g.add_usage( " [<OPTIONS>] [<OPT1>] [<OPT21>] [<OPT3>] [<OPT4>]\n" );
g.add_usage( "Options:" );
g.add_usage( " --version | -v Display version information." );
g.add_usage( " --help | -h Prints this usage message." );
g.print_usage()
| ederbsd/genesis | wrapper/python/genesis.py | Python | gpl-3.0 | 1,713 |
"""
Each ElkM1 area will be created as a separate alarm_control_panel in HASS.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.elkm1/
"""
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
ATTR_CODE, ATTR_ENTITY_ID, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_ARMING, STATE_ALARM_DISARMED,
STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components.elkm1 import (
DOMAIN as ELK_DOMAIN, create_elk_entities, ElkEntity)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
DEPENDENCIES = [ELK_DOMAIN]
SIGNAL_ARM_ENTITY = 'elkm1_arm'
SIGNAL_DISPLAY_MESSAGE = 'elkm1_display_message'
ELK_ALARM_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Required(ATTR_CODE): vol.All(vol.Coerce(int), vol.Range(0, 999999)),
})
DISPLAY_MESSAGE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID, default=[]): cv.entity_ids,
vol.Optional('clear', default=2): vol.In([0, 1, 2]),
vol.Optional('beep', default=False): cv.boolean,
vol.Optional('timeout', default=0): vol.Range(min=0, max=65535),
vol.Optional('line1', default=''): cv.string,
vol.Optional('line2', default=''): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the ElkM1 alarm platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
entities = create_elk_entities(hass, elk.areas, 'area', ElkArea, [])
async_add_entities(entities, True)
def _dispatch(signal, entity_ids, *args):
for entity_id in entity_ids:
async_dispatcher_send(
hass, '{}_{}'.format(signal, entity_id), *args)
def _arm_service(service):
entity_ids = service.data.get(ATTR_ENTITY_ID, [])
arm_level = _arm_services().get(service.service)
args = (arm_level, service.data.get(ATTR_CODE))
_dispatch(SIGNAL_ARM_ENTITY, entity_ids, *args)
for service in _arm_services():
hass.services.async_register(
alarm.DOMAIN, service, _arm_service, ELK_ALARM_SERVICE_SCHEMA)
def _display_message_service(service):
entity_ids = service.data.get(ATTR_ENTITY_ID, [])
data = service.data
args = (data['clear'], data['beep'], data['timeout'],
data['line1'], data['line2'])
_dispatch(SIGNAL_DISPLAY_MESSAGE, entity_ids, *args)
hass.services.async_register(
alarm.DOMAIN, 'elkm1_alarm_display_message',
_display_message_service, DISPLAY_MESSAGE_SERVICE_SCHEMA)
def _arm_services():
from elkm1_lib.const import ArmLevel
return {
'elkm1_alarm_arm_vacation': ArmLevel.ARMED_VACATION.value,
'elkm1_alarm_arm_home_instant': ArmLevel.ARMED_STAY_INSTANT.value,
'elkm1_alarm_arm_night_instant': ArmLevel.ARMED_NIGHT_INSTANT.value,
}
class ElkArea(ElkEntity, alarm.AlarmControlPanel):
"""Representation of an Area / Partition within the ElkM1 alarm panel."""
def __init__(self, element, elk, elk_data):
"""Initialize Area as Alarm Control Panel."""
super().__init__(element, elk, elk_data)
self._changed_by_entity_id = ''
self._state = None
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes."""
await super().async_added_to_hass()
for keypad in self._elk.keypads:
keypad.add_callback(self._watch_keypad)
async_dispatcher_connect(
self.hass, '{}_{}'.format(SIGNAL_ARM_ENTITY, self.entity_id),
self._arm_service)
async_dispatcher_connect(
self.hass, '{}_{}'.format(SIGNAL_DISPLAY_MESSAGE, self.entity_id),
self._display_message)
def _watch_keypad(self, keypad, changeset):
if keypad.area != self._element.index:
return
if changeset.get('last_user') is not None:
self._changed_by_entity_id = self.hass.data[
ELK_DOMAIN]['keypads'].get(keypad.index, '')
self.async_schedule_update_ha_state(True)
@property
def code_format(self):
"""Return the alarm code format."""
return '^[0-9]{4}([0-9]{2})?$'
@property
def state(self):
"""Return the state of the element."""
return self._state
@property
def device_state_attributes(self):
"""Attributes of the area."""
from elkm1_lib.const import AlarmState, ArmedStatus, ArmUpState
attrs = self.initial_attrs()
elmt = self._element
attrs['is_exit'] = elmt.is_exit
attrs['timer1'] = elmt.timer1
attrs['timer2'] = elmt.timer2
if elmt.armed_status is not None:
attrs['armed_status'] = \
ArmedStatus(elmt.armed_status).name.lower()
if elmt.arm_up_state is not None:
attrs['arm_up_state'] = ArmUpState(elmt.arm_up_state).name.lower()
if elmt.alarm_state is not None:
attrs['alarm_state'] = AlarmState(elmt.alarm_state).name.lower()
attrs['changed_by_entity_id'] = self._changed_by_entity_id
return attrs
def _element_changed(self, element, changeset):
from elkm1_lib.const import ArmedStatus
elk_state_to_hass_state = {
ArmedStatus.DISARMED.value: STATE_ALARM_DISARMED,
ArmedStatus.ARMED_AWAY.value: STATE_ALARM_ARMED_AWAY,
ArmedStatus.ARMED_STAY.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_STAY_INSTANT.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_TO_NIGHT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_NIGHT_INSTANT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_VACATION.value: STATE_ALARM_ARMED_AWAY,
}
if self._element.alarm_state is None:
self._state = None
elif self._area_is_in_alarm_state():
self._state = STATE_ALARM_TRIGGERED
elif self._entry_exit_timer_is_running():
self._state = STATE_ALARM_ARMING \
if self._element.is_exit else STATE_ALARM_PENDING
else:
self._state = elk_state_to_hass_state[self._element.armed_status]
def _entry_exit_timer_is_running(self):
return self._element.timer1 > 0 or self._element.timer2 > 0
def _area_is_in_alarm_state(self):
from elkm1_lib.const import AlarmState
return self._element.alarm_state >= AlarmState.FIRE_ALARM.value
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._element.disarm(int(code))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
from elkm1_lib.const import ArmLevel
self._element.arm(ArmLevel.ARMED_STAY.value, int(code))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
from elkm1_lib.const import ArmLevel
self._element.arm(ArmLevel.ARMED_AWAY.value, int(code))
async def async_alarm_arm_night(self, code=None):
"""Send arm night command."""
from elkm1_lib.const import ArmLevel
self._element.arm(ArmLevel.ARMED_NIGHT.value, int(code))
async def _arm_service(self, arm_level, code):
self._element.arm(arm_level, code)
async def _display_message(self, clear, beep, timeout, line1, line2):
"""Display a message on all keypads for the area."""
self._element.display_message(clear, beep, timeout, line1, line2)
| tinloaf/home-assistant | homeassistant/components/alarm_control_panel/elkm1.py | Python | apache-2.0 | 7,820 |
import urllib2
import locale
import logging
log = logging.getLogger(__name__)
from django.core.exceptions import ImproperlyConfigured
from django.http import QueryDict
from django.utils.http import urlencode
from mezzanine.conf import settings
from cartridge.shop.checkout import CheckoutError
PAYPAL_NVP_API_ENDPOINT_SANDBOX = 'https://api-3t.sandbox.paypal.com/nvp'
PAYPAL_NVP_API_ENDPOINT = 'https://api-3t.paypal.com/nvp'
try:
PAYPAL_USER = settings.PAYPAL_USER
PAYPAL_PASSWORD = settings.PAYPAL_PASSWORD
PAYPAL_SIGNATURE = settings.PAYPAL_SIGNATURE
except AttributeError:
raise ImproperlyConfigured("You need to define PAYPAL_USER, "
"PAYPAL_PASSWORD and PAYPAL_SIGNATURE "
"in your settings module to use the "
"paypal payment processor.")
def process(request, order_form, order):
"""
Paypal direct payment processor.
PayPal is picky.
- https://cms.paypal.com/us/cgi-bin/?cmd=_render-content
&content_ID=developer/e_howto_api_nvp_r_DoDirectPayment
- https://cms.paypal.com/us/cgi-bin/?cmd=_render-content
&content_ID=developer/e_howto_api_nvp_errorcodes
Paypal requires the countrycode, and that it be specified in 2 single-
byte characters. Import the COUNTRIES tuple-of-tuples, included below,
and subclass OrderForm in my app, e.g.:
from cartridge.shop.payment.paypal import COUNTRIES
class OrderForm(OrderForm):
def __init__(self,*args,**kwrds):
super(OrderForm, self).__init__(*args, **kwrds)
billing_country = forms.Select(choices=COUNTRIES)
shipping_country = forms.Select(choices=COUNTRIES)
self.fields['billing_detail_country'].widget = billing_country
self.fields['shipping_detail_country'].widget = shipping_country
Raise cartride.shop.checkout.CheckoutError("error message") if
payment is unsuccessful.
"""
trans = {}
amount = order.total
trans['amount'] = amount
locale.setlocale(locale.LC_ALL, settings.SHOP_CURRENCY_LOCALE)
currency = locale.localeconv()
try:
ipaddress = request.META['HTTP_X_FORWARDED_FOR']
except:
ipaddress = request.META['REMOTE_ADDR']
if settings.DEBUG:
trans['connection'] = PAYPAL_NVP_API_ENDPOINT_SANDBOX
else:
trans['connection'] = PAYPAL_NVP_API_ENDPOINT
trans['configuration'] = {
'USER': PAYPAL_USER,
'PWD': PAYPAL_PASSWORD,
'SIGNATURE': PAYPAL_SIGNATURE,
'VERSION': '53.0',
'METHOD': 'DoDirectPayment',
'PAYMENTACTION': 'Sale',
'RETURNFMFDETAILS': 0,
'CURRENCYCODE': currency['int_curr_symbol'][0:3],
'IPADDRESS': ipaddress,
}
data = order_form.cleaned_data
trans['custBillData'] = {
'FIRSTNAME': data['billing_detail_first_name'],
'LASTNAME': data['billing_detail_last_name'],
'STREET': data['billing_detail_street'],
'CITY': data['billing_detail_city'],
'STATE': data['billing_detail_state'],
'ZIP': data['billing_detail_postcode'],
'COUNTRYCODE': data['billing_detail_country'],
# optional below
'SHIPTOPHONENUM': data['billing_detail_phone'],
'EMAIL': data['billing_detail_email'],
}
trans['custShipData'] = {
'SHIPTONAME': (data['shipping_detail_first_name'] + ' ' +
data['shipping_detail_last_name']),
'SHIPTOSTREET': data['shipping_detail_street'],
'SHIPTOCITY': data['shipping_detail_city'],
'SHIPTOSTATE': data['shipping_detail_state'],
'SHIPTOZIP': data['shipping_detail_postcode'],
'SHIPTOCOUNTRY': data['shipping_detail_country'],
}
trans['transactionData'] = {
'CREDITCARDTYPE': data['card_type'].upper(),
'ACCT': data['card_number'].replace(' ', ''),
'EXPDATE': (data['card_expiry_month'] + data['card_expiry_year']),
'CVV2': data['card_ccv'],
'AMT': trans['amount'],
'INVNUM': str(order.id)
}
part1 = urlencode(trans['configuration']) + "&"
part2 = "&" + urlencode(trans['custBillData'])
part3 = "&" + urlencode(trans['custShipData'])
trans['postString'] = (part1 + urlencode(trans['transactionData']) +
part2 + part3)
conn = urllib2.Request(url=trans['connection'], data=trans['postString'])
# useful for debugging transactions
# print trans['postString']
log.debug(trans['postString'])
try:
f = urllib2.urlopen(conn)
all_results = f.read()
except urllib2.URLError:
raise CheckoutError("Could not talk to PayPal payment gateway")
parsed_results = QueryDict(all_results)
log.debug(parsed_results)
state = parsed_results['ACK']
if not state in ["Success", "SuccessWithWarning"]:
raise CheckoutError(parsed_results['L_LONGMESSAGE0'])
return parsed_results['TRANSACTIONID']
COUNTRIES = (
("CA", "CANADA"),
("US", "UNITED STATES"),
("GB", "UNITED KINGDOM"),
("AF", "AFGHANISTAN"),
("AX", "ALAND ISLANDS"),
("AL", "ALBANIA"),
("DZ", "ALGERIA"),
("AS", "AMERICAN SAMOA"),
("AD", "ANDORRA"),
("AO", "ANGOLA"),
("AI", "ANGUILLA"),
("AQ", "ANTARCTICA"),
("AG", "ANTIGUA AND BARBUDA"),
("AR", "ARGENTINA"),
("AM", "ARMENIA"),
("AW", "ARUBA"),
("AU", "AUSTRALIA"),
("AT", "AUSTRIA"),
("AZ", "AZERBAIJAN"),
("BS", "BAHAMAS"),
("BH", "BAHRAIN"),
("BD", "BANGLADESH"),
("BB", "BARBADOS"),
("BY", "BELARUS"),
("BE", "BELGIUM"),
("BZ", "BELIZE"),
("BJ", "BENIN"),
("BM", "BERMUDA"),
("BT", "BHUTAN"),
("BO", "BOLIVIA, PLURINATIONAL STATE OF"),
("BA", "BOSNIA AND HERZEGOVINA"),
("BW", "BOTSWANA"),
("BV", "BOUVET ISLAND"),
("BR", "BRAZIL"),
("IO", "BRITISH INDIAN OCEAN TERRITORY"),
("BN", "BRUNEI DARUSSALAM"),
("BG", "BULGARIA"),
("BF", "BURKINA FASO"),
("BI", "BURUNDI"),
("KH", "CAMBODIA"),
("CM", "CAMEROON"),
("CV", "CAPE VERDE"),
("KY", "CAYMAN ISLANDS"),
("CF", "CENTRAL AFRICAN REPUBLIC"),
("TD", "CHAD"),
("CL", "CHILE"),
("CN", "CHINA"),
("CX", "CHRISTMAS ISLAND"),
("CC", "COCOS (KEELING) ISLANDS"),
("CO", "COLOMBIA"),
("KM", "COMOROS"),
("CG", "CONGO"),
("CD", "CONGO, THE DEMOCRATIC REPUBLIC OF THE"),
("CK", "COOK ISLANDS"),
("CR", "COSTA RICA"),
("CI", "COTE D'IVOIRE"),
("HR", "CROATIA"),
("CU", "CUBA"),
("CY", "CYPRUS"),
("CZ", "CZECH REPUBLIC"),
("DK", "DENMARK"),
("DJ", "DJIBOUTI"),
("DM", "DOMINICA"),
("DO", "DOMINICAN REPUBLIC"),
("EC", "ECUADOR"),
("EG", "EGYPT"),
("SV", "EL SALVADOR"),
("GQ", "EQUATORIAL GUINEA"),
("ER", "ERITREA"),
("EE", "ESTONIA"),
("ET", "ETHIOPIA"),
("FK", "FALKLAND ISLANDS (MALVINAS)"),
("FO", "FAROE ISLANDS"),
("FJ", "FIJI"),
("FI", "FINLAND"),
("FR", "FRANCE"),
("GF", "FRENCH GUIANA"),
("PF", "FRENCH POLYNESIA"),
("TF", "FRENCH SOUTHERN TERRITORIES"),
("GA", "GABON"),
("GM", "GAMBIA"),
("GE", "GEORGIA"),
("DE", "GERMANY"),
("GH", "GHANA"),
("GI", "GIBRALTAR"),
("GR", "GREECE"),
("GL", "GREENLAND"),
("GD", "GRENADA"),
("GP", "GUADELOUPE"),
("GU", "GUAM"),
("GT", "GUATEMALA"),
("GG", "GUERNSEY"),
("GN", "GUINEA"),
("GW", "GUINEA-BISSAU"),
("GY", "GUYANA"),
("HT", "HAITI"),
("HM", "HEARD ISLAND AND MCDONALD ISLANDS"),
("VA", "HOLY SEE (VATICAN CITY STATE)"),
("HN", "HONDURAS"),
("HK", "HONG KONG"),
("HU", "HUNGARY"),
("IS", "ICELAND"),
("IN", "INDIA"),
("ID", "INDONESIA"),
("IR", "IRAN, ISLAMIC REPUBLIC OF"),
("IQ", "IRAQ"),
("IE", "IRELAND"),
("IM", "ISLE OF MAN"),
("IL", "ISRAEL"),
("IT", "ITALY"),
("JM", "JAMAICA"),
("JP", "JAPAN"),
("JE", "JERSEY"),
("JO", "JORDAN"),
("KZ", "KAZAKHSTAN"),
("KE", "KENYA"),
("KI", "KIRIBATI"),
("KP", "KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF"),
("KR", "KOREA, REPUBLIC OF"),
("KW", "KUWAIT"),
("KG", "KYRGYZSTAN"),
("LA", "LAO PEOPLE'S DEMOCRATIC REPUBLIC"),
("LV", "LATVIA"),
("LB", "LEBANON"),
("LS", "LESOTHO"),
("LR", "LIBERIA"),
("LY", "LIBYAN ARAB JAMAHIRIYA"),
("LI", "LIECHTENSTEIN"),
("LT", "LITHUANIA"),
("LU", "LUXEMBOURG"),
("MO", "MACAO"),
("MK", "MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF"),
("MG", "MADAGASCAR"),
("MW", "MALAWI"),
("MY", "MALAYSIA"),
("MV", "MALDIVES"),
("ML", "MALI"),
("MT", "MALTA"),
("MH", "MARSHALL ISLANDS"),
("MQ", "MARTINIQUE"),
("MR", "MAURITANIA"),
("MU", "MAURITIUS"),
("YT", "MAYOTTE"),
("MX", "MEXICO"),
("FM", "MICRONESIA, FEDERATED STATES OF"),
("MD", "MOLDOVA, REPUBLIC OF"),
("MC", "MONACO"),
("MN", "MONGOLIA"),
("ME", "MONTENEGRO"),
("MS", "MONTSERRAT"),
("MA", "MOROCCO"),
("MZ", "MOZAMBIQUE"),
("MM", "MYANMAR"),
("NA", "NAMIBIA"),
("NR", "NAURU"),
("NP", "NEPAL"),
("NL", "NETHERLANDS"),
("AN", "NETHERLANDS ANTILLES"),
("NC", "NEW CALEDONIA"),
("NZ", "NEW ZEALAND"),
("NI", "NICARAGUA"),
("NE", "NIGER"),
("NG", "NIGERIA"),
("NU", "NIUE"),
("NF", "NORFOLK ISLAND"),
("MP", "NORTHERN MARIANA ISLANDS"),
("NO", "NORWAY"),
("OM", "OMAN"),
("PK", "PAKISTAN"),
("PW", "PALAU"),
("PS", "PALESTINIAN TERRITORY, OCCUPIED"),
("PA", "PANAMA"),
("PG", "PAPUA NEW GUINEA"),
("PY", "PARAGUAY"),
("PE", "PERU"),
("PH", "PHILIPPINES"),
("PN", "PITCAIRN"),
("PL", "POLAND"),
("PT", "PORTUGAL"),
("PR", "PUERTO RICO"),
("QA", "QATAR"),
("RE", "REUNION"),
("RO", "ROMANIA"),
("RU", "RUSSIAN FEDERATION"),
("RW", "RWANDA"),
("BL", "SAINT BARTHELEMY"),
("SH", "SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA"),
("KN", "SAINT KITTS AND NEVIS"),
("LC", "SAINT LUCIA"),
("MF", "SAINT MARTIN"),
("PM", "SAINT PIERRE AND MIQUELON"),
("VC", "SAINT VINCENT AND THE GRENADINES"),
("WS", "SAMOA"),
("SM", "SAN MARINO"),
("ST", "SAO TOME AND PRINCIPE"),
("SA", "SAUDI ARABIA"),
("SN", "SENEGAL"),
("RS", "SERBIA"),
("SC", "SEYCHELLES"),
("SL", "SIERRA LEONE"),
("SG", "SINGAPORE"),
("SK", "SLOVAKIA"),
("SI", "SLOVENIA"),
("SB", "SOLOMON ISLANDS"),
("SO", "SOMALIA"),
("ZA", "SOUTH AFRICA"),
("GS", "SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS"),
("ES", "SPAIN"),
("LK", "SRI LANKA"),
("SD", "SUDAN"),
("SR", "SURINAME"),
("SJ", "SVALBARD AND JAN MAYEN"),
("SZ", "SWAZILAND"),
("SE", "SWEDEN"),
("CH", "SWITZERLAND"),
("SY", "SYRIAN ARAB REPUBLIC"),
("TW", "TAIWAN, PROVINCE OF CHINA"),
("TJ", "TAJIKISTAN"),
("TZ", "TANZANIA, UNITED REPUBLIC OF"),
("TH", "THAILAND"),
("TL", "TIMOR-LESTE"),
("TG", "TOGO"),
("TK", "TOKELAU"),
("TO", "TONGA"),
("TT", "TRINIDAD AND TOBAGO"),
("TN", "TUNISIA"),
("TR", "TURKEY"),
("TM", "TURKMENISTAN"),
("TC", "TURKS AND CAICOS ISLANDS"),
("TV", "TUVALU"),
("UG", "UGANDA"),
("UA", "UKRAINE"),
("AE", "UNITED ARAB EMIRATES"),
("UM", "UNITED STATES MINOR OUTLYING ISLANDS"),
("UY", "URUGUAY"),
("UZ", "UZBEKISTAN"),
("VU", "VANUATU"),
("VE", "VENEZUELA, BOLIVARIAN REPUBLIC OF"),
("VN", "VIET NAM"),
("VG", "VIRGIN ISLANDS, BRITISH"),
("VI", "VIRGIN ISLANDS, U.S."),
("WF", "WALLIS AND FUTUNA"),
("EH", "WESTERN SAHARA"),
("YE", "YEMEN"),
("ZM", "ZAMBIA"),
("ZW ", "ZIMBABWE")
)
| orlenko/bccf | src/cartridge/shop/payment/paypal.py | Python | unlicense | 11,727 |
from __future__ import absolute_import, division, print_function
from keyword import iskeyword
import re
import datashape
from datashape import dshape, DataShape, Record, Var, Mono, Fixed
from datashape.predicates import isscalar, iscollection, isboolean, isrecord
import numpy as np
from odo.utils import copydoc
import toolz
from toolz import concat, memoize, partial, first
from toolz.curried import map, filter
from ..compatibility import _strtypes, builtins, boundmethod, PY2
from .core import Node, subs, common_subexpression, path
from .method_dispatch import select_functions
from ..dispatch import dispatch
from .utils import hashable_index, replace_slices
__all__ = ['Expr', 'ElemWise', 'Field', 'Symbol', 'discover', 'Projection',
'projection', 'Selection', 'selection', 'Label', 'label', 'Map',
'ReLabel', 'relabel', 'Apply', 'apply', 'Slice', 'shape', 'ndim',
'label', 'symbol', 'Coerce', 'coerce']
_attr_cache = dict()
def isvalid_identifier(s):
"""Check whether a string is a valid Python identifier
Examples
--------
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
>>> isvalid_identifier('1a')
False
>>> isvalid_identifier('a1')
True
>>> isvalid_identifier('for')
False
>>> isvalid_identifier(None)
False
"""
# the re module compiles and caches regexs so no need to compile it
return (s is not None and not iskeyword(s) and
re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None)
def valid_identifier(s):
"""Rewrite a string to be a valid identifier if it contains
>>> valid_identifier('hello')
'hello'
>>> valid_identifier('hello world')
'hello_world'
>>> valid_identifier('hello.world')
'hello_world'
>>> valid_identifier('hello-world')
'hello_world'
>>> valid_identifier(None)
>>> valid_identifier('1a')
"""
if isinstance(s, _strtypes):
if s[0].isdigit():
return
return s.replace(' ', '_').replace('.', '_').replace('-', '_')
return s
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, Record):
if fieldname == self._name:
return self
raise ValueError(
"Can not get field '%s' of non-record expression %s" %
(fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return selection(self, key)
elif (isinstance(key, list)
and builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple) and
all(isinstance(k, (int, slice, type(None), list, np.ndarray))
for k in key)):
return sliceit(self, key)
elif isinstance(key, (slice, int, type(None), list, np.ndarray)):
return sliceit(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
def _project(self, key):
return projection(self, key)
@property
def schema(self):
return datashape.dshape(self.dshape.measure)
@property
def fields(self):
if isinstance(self.dshape.measure, Record):
return self.dshape.measure.names
name = getattr(self, '_name', None)
if name is not None:
return [self._name]
return []
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __iter__(self):
raise NotImplementedError(
'Iteration over expressions is not supported.\n'
'Iterate over computed result instead, e.g. \n'
"\titer(expr) # don't do this\n"
"\titer(compute(expr)) # do this instead")
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) and self.fields:
result.extend(list(map(valid_identifier, self.fields)))
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
result.extend(list(d))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
if key == '_hash':
raise AttributeError()
try:
return _attr_cache[(self, key)]
except:
pass
try:
result = object.__getattribute__(self, key)
except AttributeError:
fields = dict(zip(map(valid_identifier, self.fields),
self.fields))
if self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
if key in d:
func = d[key]
if func in method_properties:
result = func(self)
else:
result = boundmethod(func, self)
else:
raise
_attr_cache[(self, key)] = result
return result
@property
def _name(self):
if (isscalar(self.dshape.measure) and
len(self._inputs) == 1 and
isscalar(self._child.dshape.measure)):
return self._child._name
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except AttributeError:
pass
return True
_symbol_cache = dict()
def _symbol_key(args, kwargs):
if len(args) == 1:
name, = args
ds = None
token = None
if len(args) == 2:
name, ds = args
token = None
elif len(args) == 3:
name, ds, token = args
ds = kwargs.get('dshape', ds)
token = kwargs.get('token', token)
ds = dshape(ds)
return (name, ds, token)
@memoize(cache=_symbol_cache, key=_symbol_key)
def symbol(name, dshape, token=None):
return Symbol(name, dshape, token=token)
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int, y: int}')
>>> points
points
>>> points.dshape
dshape("5 * 3 * {x: int32, y: int32}")
"""
__slots__ = '_hash', '_name', 'dshape', '_token'
__inputs__ = ()
def __init__(self, name, dshape, token=None):
self._name = name
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Mono) and not isinstance(dshape, DataShape):
dshape = DataShape(dshape)
self.dshape = dshape
self._token = token
def __str__(self):
return self._name or ''
def _resources(self):
return dict()
@dispatch(Symbol, dict)
def _subs(o, d):
""" Subs symbols using symbol function
Supports caching"""
newargs = [subs(arg, d) for arg in o._args]
return symbol(*newargs)
class ElemWise(Expr):
"""
Elementwise operation.
The shape of this expression matches the shape of the child.
"""
@property
def dshape(self):
return datashape.DataShape(*(self._child.dshape.shape
+ tuple(self.schema)))
class Field(ElemWise):
"""
A single field from an expression.
Get a single field from an expression with record-type schema.
We store the name of the field in the ``_name`` attribute.
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int32, y: int32}')
>>> points.x.dshape
dshape("5 * 3 * int32")
For fields that aren't valid Python identifiers, use ``[]`` syntax:
>>> points = symbol('points', '5 * 3 * {"space station": float64}')
>>> points['space station'].dshape
dshape("5 * 3 * float64")
"""
__slots__ = '_hash', '_child', '_name'
def __str__(self):
fmt = '%s.%s' if isvalid_identifier(self._name) else '%s[%r]'
return fmt % (self._child, self._name)
@property
def _expr(self):
return symbol(self._name, datashape.DataShape(self.dshape.measure))
@property
def dshape(self):
shape = self._child.dshape.shape
schema = self._child.dshape.measure.dict[self._name]
shape = shape + schema.shape
schema = (schema.measure,)
return DataShape(*(shape + schema))
class Projection(ElemWise):
"""Select a subset of fields from data.
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> accounts[['name', 'amount']].schema
dshape("{name: string, amount: int32}")
>>> accounts[['name', 'amount']]
accounts[['name', 'amount']]
See Also
--------
blaze.expr.expressions.Field
"""
__slots__ = '_hash', '_child', '_fields'
@property
def fields(self):
return list(self._fields)
@property
def schema(self):
d = self._child.schema[0].dict
return DataShape(Record([(name, d[name]) for name in self.fields]))
def __str__(self):
return '%s[%s]' % (self._child, self.fields)
def _project(self, key):
if isinstance(key, list) and set(key).issubset(set(self.fields)):
return self._child[key]
raise ValueError("Column Mismatch: %s" % key)
def _get_field(self, fieldname):
if fieldname in self.fields:
return Field(self._child, fieldname)
raise ValueError("Field %s not found in columns %s" % (fieldname,
self.fields))
@copydoc(Projection)
def projection(expr, names):
if not names:
raise ValueError("Projection with no names")
if not isinstance(names, (tuple, list)):
raise TypeError("Wanted list of strings, got %s" % names)
if not set(names).issubset(expr.fields):
raise ValueError("Mismatched names. Asking for names %s "
"where expression has names %s" %
(names, expr.fields))
return Projection(expr, tuple(names))
def sanitize_index_lists(ind):
""" Handle lists/arrays of integers/bools as indexes
>>> sanitize_index_lists([2, 3, 5])
[2, 3, 5]
>>> sanitize_index_lists([True, False, True, False])
[0, 2]
>>> sanitize_index_lists(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index_lists(np.array([False, True, True]))
[1, 2]
"""
if not isinstance(ind, (list, np.ndarray)):
return ind
if isinstance(ind, np.ndarray):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
def sliceit(child, index):
index2 = tuple(map(sanitize_index_lists, index))
index3 = hashable_index(index2)
s = Slice(child, index3)
hash(s)
return s
class Slice(Expr):
"""Elements `start` until `stop`. On many backends, a `step` parameter
is also allowed.
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts[2:7].dshape
dshape("5 * {name: string, amount: int32}")
>>> accounts[2:7:2].dshape
dshape("3 * {name: string, amount: int32}")
"""
__slots__ = '_hash', '_child', '_index'
@property
def dshape(self):
return self._child.dshape.subshape[self.index]
@property
def index(self):
return replace_slices(self._index)
def __str__(self):
if isinstance(self.index, tuple):
index = ', '.join(map(str, self._index))
else:
index = str(self._index)
return '%s[%s]' % (self._child, index)
class Selection(Expr):
""" Filter elements of expression based on predicate
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> deadbeats = accounts[accounts.amount < 0]
"""
__slots__ = '_hash', '_child', 'predicate'
def __str__(self):
return "%s[%s]" % (self._child, self.predicate)
@property
def dshape(self):
shape = list(self._child.dshape.shape)
shape[0] = Var()
return DataShape(*(shape + [self._child.dshape.measure]))
@copydoc(Selection)
def selection(table, predicate):
subexpr = common_subexpression(table, predicate)
if not builtins.all(isinstance(node, (ElemWise, Symbol))
or node.isidentical(subexpr)
for node in concat([path(predicate, subexpr),
path(table, subexpr)])):
raise ValueError("Selection not properly matched with table:\n"
"child: %s\n"
"apply: %s\n"
"predicate: %s" % (subexpr, table, predicate))
if not isboolean(predicate.dshape):
raise TypeError("Must select over a boolean predicate. Got:\n"
"%s[%s]" % (table, predicate))
return table._subs({subexpr: Selection(subexpr, predicate)})
class Label(ElemWise):
"""An expression with a name.
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> expr = accounts.amount * 100
>>> expr._name
'amount'
>>> expr.label('new_amount')._name
'new_amount'
See Also
--------
blaze.expr.expressions.ReLabel
"""
__slots__ = '_hash', '_child', 'label'
@property
def schema(self):
return self._child.schema
@property
def _name(self):
return self.label
def _get_field(self, key):
if key[0] == self.fields[0]:
return self
raise ValueError("Column Mismatch: %s" % key)
def __str__(self):
return 'label(%s, %r)' % (self._child, self.label)
@copydoc(Label)
def label(expr, lab):
if expr._name == lab:
return expr
return Label(expr, lab)
class ReLabel(ElemWise):
"""
Table with same content but with new labels
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.schema
dshape("{name: string, amount: int32}")
>>> accounts.relabel(amount='balance').schema
dshape("{name: string, balance: int32}")
>>> accounts.relabel(not_a_column='definitely_not_a_column')
Traceback (most recent call last):
...
ValueError: Cannot relabel non-existent child fields: {'not_a_column'}
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> s.relabel(0='foo') # doctest: +SKIP
Traceback (most recent call last):
...
SyntaxError: keyword can't be an expression
Notes
-----
When names are not valid Python names, such as integers or string with
spaces, you must pass a dictionary to ``relabel``. For example
.. code-block:: python
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> t = symbol('t', 'var * {"whoo hoo": ?float32}')
>>> t.relabel({"whoo hoo": 'foo'})
t.relabel({'whoo hoo': 'foo'})
See Also
--------
blaze.expr.expressions.Label
"""
__slots__ = '_hash', '_child', 'labels'
@property
def schema(self):
subs = dict(self.labels)
param = self._child.dshape.measure.parameters[0]
return DataShape(Record([[subs.get(name, name), dtype]
for name, dtype in param]))
def __str__(self):
labels = self.labels
if all(map(isvalid_identifier, map(first, labels))):
rest = ', '.join('%s=%r' % l for l in labels)
else:
rest = '{%s}' % ', '.join('%r: %r' % l for l in labels)
return '%s.relabel(%s)' % (self._child, rest)
@copydoc(ReLabel)
def relabel(child, labels=None, **kwargs):
labels = labels or dict()
labels = toolz.merge(labels, kwargs)
labels = dict((k, v) for k, v in labels.items() if k != v)
label_keys = set(labels)
fields = child.fields
if not label_keys.issubset(fields):
non_existent_fields = label_keys.difference(fields)
raise ValueError("Cannot relabel non-existent child fields: {%s}" %
', '.join(map(repr, non_existent_fields)))
if not labels:
return child
if isinstance(labels, dict): # Turn dict into tuples
labels = tuple(sorted(labels.items()))
if isscalar(child.dshape.measure):
if child._name == labels[0][0]:
return child.label(labels[0][1])
else:
return child
return ReLabel(child, labels)
class Map(ElemWise):
""" Map an arbitrary Python function across elements in a collection
Examples
--------
>>> from datetime import datetime
>>> t = symbol('t', 'var * {price: real, time: int64}') # times as integers
>>> datetimes = t.time.map(datetime.utcfromtimestamp)
Optionally provide extra schema information
>>> datetimes = t.time.map(datetime.utcfromtimestamp,
... schema='{time: datetime}')
See Also
--------
blaze.expr.expresions.Apply
"""
__slots__ = '_hash', '_child', 'func', '_schema', '_name0'
@property
def schema(self):
if self._schema:
return dshape(self._schema)
else:
raise NotImplementedError("Schema of mapped column not known.\n"
"Please specify datashape keyword in "
".map method.\nExample: "
"t.columnname.map(function, 'int64')")
def label(self, name):
assert isscalar(self.dshape.measure)
return Map(self._child,
self.func,
self.schema,
name)
@property
def shape(self):
return self._child.shape
@property
def ndim(self):
return self._child.ndim
@property
def _name(self):
if self._name0:
return self._name0
else:
return self._child._name
if PY2:
copydoc(Map, Expr.map.im_func)
else:
copydoc(Map, Expr.map)
class Apply(Expr):
""" Apply an arbitrary Python function onto an expression
Examples
--------
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> h = t.apply(hash, dshape='int64') # Hash value of resultant dataset
You must provide the datashape of the result with the ``dshape=`` keyword.
For datashape examples see
http://datashape.pydata.org/grammar.html#some-simple-examples
If using a chunking backend and your operation may be safely split and
concatenated then add the ``splittable=True`` keyword argument
>>> t.apply(f, dshape='...', splittable=True) # doctest: +SKIP
See Also
--------
blaze.expr.expressions.Map
"""
__slots__ = '_hash', '_child', 'func', '_dshape', '_splittable'
@property
def schema(self):
if iscollection(self.dshape):
return self.dshape.subshape[0]
else:
raise TypeError("Non-tabular datashape, %s" % self.dshape)
@property
def dshape(self):
return dshape(self._dshape)
@copydoc(Apply)
def apply(expr, func, dshape, splittable=False):
return Apply(expr, func, datashape.dshape(dshape), splittable)
class Coerce(Expr):
"""Coerce an expression to a different type.
Examples
--------
>>> t = symbol('t', '100 * float64')
>>> t.coerce(to='int64')
t.coerce(to='int64')
>>> t.coerce('float32')
t.coerce(to='float32')
>>> t.coerce('int8').dshape
dshape("100 * int8")
"""
__slots__ = '_hash', '_child', 'to'
@property
def schema(self):
return self.to
@property
def dshape(self):
return DataShape(*(self._child.shape + (self.schema,)))
def __str__(self):
return '%s.coerce(to=%r)' % (self._child, str(self.schema))
@copydoc(Coerce)
def coerce(expr, to):
return Coerce(expr, dshape(to) if isinstance(to, _strtypes) else to)
dshape_method_list = list()
schema_method_list = list()
method_properties = set()
dshape_methods = memoize(partial(select_functions, dshape_method_list))
schema_methods = memoize(partial(select_functions, schema_method_list))
@dispatch(DataShape)
def shape(ds):
s = ds.shape
s = tuple(int(d) if isinstance(d, Fixed) else d for d in s)
return s
@dispatch(object)
def shape(expr):
""" Shape of expression
>>> symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except TypeError:
pass
return tuple(s)
def ndim(expr):
""" Number of dimensions of expression
>>> symbol('s', '3 * var * int32').ndim
2
"""
return len(shape(expr))
dshape_method_list.extend([
(lambda ds: True, set([apply])),
(iscollection, set([shape, ndim])),
(lambda ds: iscollection(ds) and isscalar(ds.measure), set([coerce]))
])
schema_method_list.extend([
(isscalar, set([label, relabel, coerce])),
(isrecord, set([relabel])),
])
method_properties.update([shape, ndim])
@dispatch(Expr)
def discover(expr):
return expr.dshape
| maxalbert/blaze | blaze/expr/expressions.py | Python | bsd-3-clause | 22,980 |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe import _
def execute(filters=None):
if not filters: filters ={}
days_since_last_order = filters.get("days_since_last_order")
doctype = filters.get("doctype")
if cint(days_since_last_order) <= 0:
frappe.throw(_("'Days Since Last Order' must be greater than or equal to zero"))
columns = get_columns()
customers = get_sales_details(doctype)
data = []
for cust in customers:
if cint(cust[8]) >= cint(days_since_last_order):
cust.insert(7,get_last_sales_amt(cust[0], doctype))
data.append(cust)
return columns, data
def get_sales_details(doctype):
cond = """sum(so.base_net_total) as 'total_order_considered',
max(so.posting_date) as 'last_order_date',
DATEDIFF(CURDATE(), max(so.posting_date)) as 'days_since_last_order' """
if doctype == "Sales Order":
cond = """sum(if(so.status = "Stopped",
so.base_net_total * so.per_delivered/100,
so.base_net_total)) as 'total_order_considered',
max(so.transaction_date) as 'last_order_date',
DATEDIFF(CURDATE(), max(so.transaction_date)) as 'days_since_last_order'"""
return frappe.db.sql("""select
cust.name,
cust.customer_name,
cust.territory,
cust.customer_group,
count(distinct(so.name)) as 'num_of_order',
sum(base_net_total) as 'total_order_value', {0}
from `tabCustomer` cust, `tab{1}` so
where cust.name = so.customer and so.docstatus = 1
group by cust.name
order by 'days_since_last_order' desc """.format(cond, doctype), as_list=1)
def get_last_sales_amt(customer, doctype):
cond = "posting_date"
if doctype =="Sales Order":
cond = "transaction_date"
res = frappe.db.sql("""select base_net_total from `tab{0}`
where customer = %s and docstatus = 1 order by {1} desc
limit 1""".format(doctype, cond), customer)
return res and res[0][0] or 0
def get_columns():
return [
_("Customer") + ":Link/Customer:120",
_("Customer Name") + ":Data:120",
_("Territory") + "::120",
_("Customer Group") + "::120",
_("Number of Order") + "::120",
_("Total Order Value") + ":Currency:120",
_("Total Order Considered") + ":Currency:160",
_("Last Order Amount") + ":Currency:160",
_("Last Order Date") + ":Date:160",
_("Days Since Last Order") + "::160"
]
| manassolanki/erpnext | erpnext/selling/report/inactive_customers/inactive_customers.py | Python | gpl-3.0 | 2,411 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Docstrings follow the numpy conventions described at:
# https://numpydoc.readthedocs.io/en/latest/example.html#example
""" Discovers the changes made by ``PortChange_Generatr`` and notifies a Slack Channel.
Triggered by cloud watch events that are monitoring for security group changes.
Once discovered a notification will be sent to the specified Slack Channel.
Raises
------
HTTPError
If there is a problem with processing the request.
URLError
If there is a problem connecting to the server.
"""
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
import os
import logging
import json
SLACK_CHANNEL = os.environ['channel']
HOOK_URL = os.environ['hook']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
severity = 'warning'
def lambda_handler(event, context):
protocols = []
ports = []
ranges = []
message = {}
for item in event['detail']['requestParameters']['ipPermissions']['items']:
if item['ipProtocol'] == 17:
protocols.append("UDP")
elif item['ipProtocol'] == 6:
protocols.append("TCP")
elif item['ipProtocol'] == -1:
protocols.append("All Protocols")
else:
protocols.append(item['ipProtocol'])
if item['toPort'] == item['fromPort']:
ports.append(item['toPort'])
else:
ports.append('{}-{}'.format(item['fromPort'], item['toPort']))
ranges.append(item['ipRanges']['items'][0]['cidrIp'])
if 80 in ports or 443 in ports or 25 in ports or 465 in ports:
severity = 'good'
message['Description'] = 'Normal port changes'
elif 20 in ports or 21 in ports or 1433 in ports:
severity = 'danger'
message['Description'] = 'Ports changed are from AWS Trusted Advisory Warnings'
else:
severity = 'warning'
message['Description'] = 'Unexpected port changes'
message = {
'Name': event['detail']['userIdentity']['arn'].split(':').pop(),
'Description': 'Warning: There were port changes made',
'SecurityGroup': event['detail']['requestParameters']['groupId'],
'Ports': ports,
'Protocols': protocols,
'Ranges': ranges
}
slack_message = {
'channel': SLACK_CHANNEL,
'username': 'AWS SNS via Lamda :: ChaoSlingr',
'text': '*'+event['detail']['eventName']+'*',
'icon_emoji': ':hear_no_evil:',
'attachments': [{
'color': severity,
'text': json.dumps(message, indent=2, separators=(',', ': '))
}]
}
req = Request(
url=HOOK_URL,
data=json.dumps(slack_message).encode('utf-8'),
headers={'content-type': 'application/json'}
)
try:
response = urlopen(req)
response.read()
logger.info("Message posted to %s", slack_message['channel'])
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
except URLError as e:
logger.error("Server connection failed: %s", e.reason)
| Optum/ChaoSlingr | src/lambda/PortChange_Slack_Trackr.py | Python | apache-2.0 | 3,142 |
"""
Do not use the special characters in a filename.
Only alphabets, numbers and underbars can be used for a filename.
== Vilolation ==
/testdir/test-1.c <== Violation. - is used.
/testdir1/test!1.c <== Violation. ! is used
== Good ==
testdir/test.c
testdir1/test_1.c
"""
from nsiqcppstyle_reporter import * #@UnusedWildImport
from nsiqcppstyle_rulemanager import * #@UnusedWildImport
from nsiqcppstyle_checker import * #@UnusedWildImport
def RunRule(lexer, filename, dirname) :
if not Match(r"^[_A-Za-z0-9\.]*$", filename) :
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, "", 0, 0), __name__,
'Do not use special characters in file name (%s).' % filename)
ruleManager.AddFileStartRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFileStartRule(RunRule)
def test1(self):
self.Analyze("test/this-file.c", "")
self.Analyze("test2/!thisfile22.c", "")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisfile.c", "")
self.Analyze("test/thisfile.h", "")
assert not CheckErrorContent(__name__)
| DLR-SC/tigl | thirdparty/nsiqcppstyle/rules/RULE_3_2_CD_do_not_use_special_characters_in_filename.py | Python | apache-2.0 | 1,419 |
# downscale the prepped cmip5 data downloaded using SYNDA for EPSCoR SC project
# author: Michael Lindgren -- June 09, 2016 (UPDATED: Oct 28, 2016)
if __name__ == '__main__':
import glob, os, rasterio, itertools
from functools import partial
import downscale
from downscale import preprocess, Mask, utils
import numpy as np
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-b", "--base_dir", action='store', dest='base_dir', type=str, help="base directory where data is stored in structured folders" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-mv", "--mean_variable", action='store', dest='mean_variable', type=str, help="cmip5 mean variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
parser.add_argument( "-u", "--units", action='store', dest='units', type=str, help="cmip5 units name (exact)" )
parser.add_argument( "-met", "--metric", action='store', dest='metric', type=str, help="cmip5 metric name (exact)" )
parser.add_argument( "-lev", "--level", action='store', dest='level', const=None, type=int, help="optional level to extract for downscaling" )
parser.add_argument( "-levn", "--level_name", action='store', dest='level_name', const=None, type=str, help="name of level variable" )
args = parser.parse_args()
# unpack the args
variable = args.variable
mean_variable = args.mean_variable
scenario = args.scenario
model = args.model
units = args.units
metric = args.metric
base_dir = args.base_dir
level = args.level
level_name = args.level_name
project = 'ar5'
# # # # # FOR TESTING # # #
# base_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data'
# # fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/rcp85/tasmax/tasmax_IPSL-CM5A-LR_rcp85_r1i1p1_2006_2100.nc'
# # mean_fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/cmip5/prepped/IPSL-CM5A-LR/rcp85/tas/tas_IPSL-CM5A-LR_rcp85_r1i1p1_2006_2100.nc'
# variable = 'tasmin'
# mean_variable = 'tas'
# scenario = 'rcp85'
# model = 'GFDL-CM3'
# units = 'C'
# metric = 'mean'
# some setup args
base_path = os.path.join( base_dir,'cmip5','prepped' )
output_dir = os.path.join( base_dir, 'downscaled' )
variables = [ variable ]
scenarios = [ scenario ]
models = [ model ]
anom = True # write out anoms (True) or not (False)
# modelnames is simply the string name to put in the output filenaming if that differs from the modelname
# used in querying the file which is the models list variable
all_models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4' ]
modelnames = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'NCAR-CCSM4' ]
modelnames = dict( zip( all_models, modelnames ) )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
os.chdir( output_dir )
for variable, model, scenario in itertools.product( variables, models, scenarios ):
if scenario == 'historical':
begin = 1900
end = 2005
else:
begin = 2006
end = 2100
modelname = modelnames[ model ]
# SETUP BASELINE -- downscaled `tas` is our baseline data for the tasmin tasmax
clim_path = os.path.join( base_dir, 'downscaled', modelname, scenario, mean_variable )
filelist = glob.glob( os.path.join( clim_path, '*.tif' ) )
# sort these files
filelist = utils.only_years( utils.sort_files( filelist ), begin=begin, end=end )
baseline = downscale.Baseline( filelist )
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_dir, model, scenario, variable )
if not os.path.exists( output_path ):
os.makedirs( output_path )
print( input_path )
# # NOTE:
# ALL DATA FALL INTO THE historical ARGUMENT WITH THIS DOWNSCALING DUE TO NOT USING A CLIMATOLOGY TO
# GENERATE ANOMALIES WE ARE GENERATING DELTAS OF MIN/MAX FROM MEAN OF THE SAME VARIABLE GROUP ie. tas/tasmax/tasmin
# list files for this set of downscaling -- one per folder
fn, = glob.glob( os.path.join( input_path, '*.nc' ) )
historical = downscale.Dataset( fn, variable, model, scenario, project=project, units=units, metric=metric, begin=begin, end=end )
# mean data -- hacky...
mean_fn, = glob.glob( os.path.join( input_path.replace( variable, mean_variable ), '*.nc' ) )
mean_ds = downscale.Dataset( mean_fn, mean_variable, model, scenario, project=project, units=units, metric=metric, begin=begin, end=end )
future = None
# DOWNSCALE
mask = rasterio.open( baseline.filelist[0] ).read_masks( 1 )
# convert from Kelvin to Celcius
if variable in ['tas','tasmax','tasmin']:
print( '>>> conversion to Celcius >>>' )
if historical:
historical.ds[ variable ] = historical.ds[ variable ] - 273.15
historical.ds[ variable ][ 'units' ] = units
mean_ds.ds[ mean_variable ] = mean_ds.ds[ mean_variable ] - 273.15
mean_ds.ds[ mean_variable ][ 'units' ] = units
else:
Exception( 'minmax only works with a single series run in the `historical` arg slot' )
# these have absolutely no effect but are here since they are a required variable to the super class DeltaDownscale...
# we need a way to make this more nimble as this is not ideal...
clim_begin = '1961'
clim_end = '1990'
# rounding switch
if variable == 'pr':
rounder = np.rint
downscaling_operation = 'mult'
elif variable in [ 'hur','cld','clt' ]:
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'mult'
else:
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'add'
def round_it( x, mask ):
arr = np.ma.masked_array( data=x, mask=mask )
return rounder( arr )
round_data = partial( round_it, mask=( mask==0 ) )
def round_data_clamp( x ):
''' hur specific '''
x[ x < 0.0 ] = 0.0
x[ x > 100.0 ] = 100.0
return round_data( x )
if variable == 'hur' or variable == 'clt':
post_downscale_function = round_data_clamp
else:
post_downscale_function = round_data
ar5 = downscale.DeltaDownscaleMinMax( baseline=baseline, clim_begin=clim_begin, clim_end=clim_end,
historical=historical, future=future, downscaling_operation=downscaling_operation,
mask=mask, mask_value=0, ncpus=32, src_crs={'init':'epsg:4326'}, src_nodata=None,
dst_nodata=None, post_downscale_function=post_downscale_function, varname=variable,
modelname=modelname, anom=anom, mean_ds=mean_ds, mean_variable=mean_variable )
ar5.downscale( output_dir=output_path )
| ua-snap/downscale | snap_scripts/epscor_sc/downscale_cmip5_epscor_sc_minmax.py | Python | mit | 6,870 |
# -*- coding: utf-8 -*-
"""
pyvisa.compat
~~~~~~~~~~~~~
Compatibility layer.
:copyright: 2014 by PyVISA Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import sys
PYTHON3 = sys.version >= '3'
if PYTHON3:
string_types = str
def u(x):
return x
integer_types = (int, )
input = input
else:
string_types = basestring
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
integer_types = (int, long)
input = raw_input
if sys.version_info < (2, 7):
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
except ImportError:
raise Exception("Testing PyVISA in Python 2.6 requires package 'unittest2'")
else:
import unittest
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
try:
from logging import NullHandler
except ImportError:
from .nullhandler import NullHandler
try:
from subprocess import check_output
except ImportError:
from .check_output import check_output
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, str('temporary_class'), (), {})
| rubund/debian-pyvisa | pyvisa/compat/__init__.py | Python | mit | 1,655 |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import json
from datetime import datetime, timedelta
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.unittest import SkipTest
from survey.models import Survey, Question, Choice, Answer, Ballot, Preset, PresetChoice
from survey.helpers import now, get_current_timezone
try:
from django.utils.timezone import utc
except ImportError:
utc = None
# pylint: disable=R0902
class QuestionTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
self.question = Question.objects.create(message="What do you like best?", survey=self.survey)
def test_unicode(self):
self.assertTrue(unicode(self.question))
class ChoiceTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
self.question = Question.objects.create(message="What do you like best?", survey=self.survey)
self.choice = Choice.objects.create(question=self.question, message="Word up dog")
def test_unicode(self):
self.assertTrue(unicode(self.choice))
class IndexViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
def test_get_index(self):
Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
response = self.client.get(reverse('index'), follow=True)
self.assertEqual(response.status_code, 200)
class BallotResultsViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
self.questionRA = Question.objects.create(message="What time is it", survey=self.survey, type="RA")
self.choiceRA = Choice.objects.create(question=self.questionRA, message="5 oclock")
self.questionTB = Question.objects.create(message="Textbox question", survey=self.survey, type="TB")
self.choiceTB = Choice.objects.create(question=self.questionTB, message="QuestionText")
self.ballot = Ballot.objects.create(survey=self.survey)
def test_view(self):
response = self.client.get(reverse('ballot', kwargs={'slug': self.survey.slug}), follow=True)
self.assertEqual(response.status_code, 200)
def test_empty_page(self):
response = self.client.get(reverse('ballot', kwargs={'slug': self.survey.slug, }), {'page': 0}, follow=True)
self.assertEqual(response.status_code, 200)
def test_no_ballots_shows_no_ballots(self):
self.ballot.delete()
response = self.client.get(reverse('ballot', kwargs={'slug': self.survey.slug}), follow=True)
self.assertIsNone(response.context['ballot'])
self.assertIsNone(response.context['next_ballot'])
self.assertIsNone(response.context['previous_ballot'])
def test_get_ballot_or_404(self):
response = self.client.get(reverse('ballot', kwargs={'slug': self.survey.slug, 'ballot_id': 123456789}), follow=True)
self.assertEqual(response.status_code, 404)
class SurveyViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
self.survey_url = self.survey.get_absolute_url()
self.survey_results_url = reverse('surveyresults', kwargs={'slug': self.survey.slug})
self.questionRA = Question.objects.create(message="What time is it", survey=self.survey, type="RA")
self.choiceRA = Choice.objects.create(question=self.questionRA, message="5 oclock")
self.questionTB = Question.objects.create(message="Textbox question", survey=self.survey, type="TB")
self.choiceTB = Choice.objects.create(question=self.questionTB, message="QuestionText")
self.now = now()
self.one_hour = timedelta(hours=1)
def test_date_range_makes_survey_active(self):
self.survey.start_date = self.now - self.one_hour
self.survey.end_date = self.now + self.one_hour
self.survey.save()
self.assertTrue(self.survey.is_active)
def test_date_range_makes_survey_inactive(self):
self.survey.start_date = self.now - 2 * self.one_hour
self.survey.end_date = self.now - self.one_hour
self.survey.save()
self.assertFalse(self.survey.is_active)
def test_date_range_with_no_end_date_makes_survey_active(self):
self.survey.start_date = self.now - self.one_hour
self.survey.end_date = None
self.survey.save()
self.assertTrue(self.survey.is_active)
def test_date_range_with_no_end_date_makes_survey_inactive(self):
self.survey.start_date = self.now + self.one_hour
self.survey.end_date = None
self.survey.save()
self.assertFalse(self.survey.is_active)
def test_is_active_false_closes_survey(self):
self.client.logout()
self.survey.start_date = self.now - 2 * self.one_hour
self.survey.end_date = self.now - self.one_hour
self.survey.save()
response = self.client.get(self.survey_url)
self.assertIn(u'closed', unicode(response))
self.assertEqual(response.status_code, 403)
def test_is_active_false_closes_survey_post(self):
self.survey.start_date = self.now - 2 * self.one_hour
self.survey.end_date = self.now - self.one_hour
self.survey.save()
response = self.client.post(self.survey_url)
self.assertIn(u'closed', unicode(response))
self.assertEqual(response.status_code, 403)
def test_get_survey(self):
self.survey.publish()
response = self.client.get(self.survey_url, follow=True)
self.assertEqual(response.status_code, 200)
def test_post_survey(self):
self.survey.publish()
postdata = {"q" + unicode(self.questionRA.pk): "c" + unicode(self.choiceRA.pk), "q" + unicode(self.questionTB.pk): "word up dawg"}
response = self.client.post(self.survey_url, postdata)
self.assertEqual(response.status_code, 200)
def test_survey_results(self):
postdata = {"q" + unicode(self.questionRA.pk): "c" + unicode(self.choiceRA.pk), "q" + unicode(self.questionTB.pk): "word up dawg"}
response = self.client.post(self.survey_url, postdata)
response = self.client.get(self.survey_results_url)
self.assertEqual(response.status_code, 200)
def test_post_survey_empty_post_makes_no_answers(self):
self.survey.publish()
response = self.client.post(self.survey_url, {})
self.assertEqual(Answer.objects.all().count(), 0)
self.assertEqual(response.status_code, 200)
def test_post_survey_empty_value_makes_no_answers(self):
self.survey.publish()
postdata = {
u'q%s' % self.questionTB.pk: '', # Empty value in the textbox
}
response = self.client.post(self.survey_url, postdata)
self.assertEqual(Answer.objects.all().count(), 0)
self.assertEqual(response.status_code, 200)
def test_post_survey_bad_choice_ignores_it(self):
postdata = {
u'q%s' % self.questionRA.pk: 'c328947293847',
}
self.client.post(self.survey_url, postdata)
self.assertEqual(Answer.objects.all().count(), 0)
def test_post_survey_choice_for_wrong_question_ignores_it(self):
postdata = {
u'q%s' % self.questionRA.pk: 'c%s' % self.choiceTB.pk, # choiceTB belongs to questionTB, not question 1!
}
self.client.post(self.survey_url, postdata)
self.assertEqual(Answer.objects.all().count(), 0)
def test_post_survey_text_answer_for_multichoice_ignores_it(self):
postdata = {
u'q%s' % self.questionRA.pk: 'I love pizza.', # questionRA has radio buttons!
}
self.client.post(self.survey_url, postdata)
self.assertEqual(Answer.objects.all().count(), 0)
def test_new_survey_adds_survey(self):
# Needs maor casperjs!
self.client.get(reverse('newsurvey'))
data = """
{"title":"a new survey for post data",
"slug":"post-data-survey",
"description":"fdsasdf",
"questions":[
{"type":"DD",
"message":"ddl",
"required":false,
"order_number":0,
"choices":[
{"message":"1","order_number":0},
{"message":"2","order_number":1}
]}
]}
"""
postdata = {'r': data}
self.client.post(reverse('newsurvey'), postdata)
self.assertEqual(Survey.objects.get(slug='post-data-survey').title, "a new survey for post data")
def test_new_survey_unique_slug(self):
data = """
{"title":"a new survey for post data",
"slug":"my-new-survey",
"description":"fdsasdf",
"questions":[
{"type":"DD",
"message":"ddl",
"required":false,
"order_number":0,
"choices":[
{"message":"1","order_number":0},
{"message":"2","order_number":1}
]}
]}
"""
postdata = {'r': data}
response = self.client.post(reverse('newsurvey'), postdata)
response_data = json.JSONDecoder().decode(response.content)
self.assertIn(
'That title is already taken. Please choose a different one.',
response_data['warnings'],
'Integrity error: slug uniqueness is not being inforced')
class SurveyEditViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="Text Only Survey", slug="text-only-survey", creator=self.user)
self.survey2 = Survey.objects.create(title="Text Only Survey", slug="survey", creator=self.user)
self.arbitrary_start_date_str = 'Mon, 01 Jan 2013 06:00:00 GMT'
self.arbitrary_start_date = datetime.strptime(self.arbitrary_start_date_str, '%a, %d %b %Y %H:%M:%S %Z').replace(tzinfo=utc)
self.preset = Preset.objects.create(title="States")
PresetChoice.objects.create(preset=self.preset, option="MO")
def test_get_context_data(self):
response = self.client.get(reverse('surveyedit', args=[self.survey.slug]), follow=True)
self.assertEqual(response.status_code, 200)
def test_post(self):
data = """
{
"title":"Text Only Survey",
"slug":"text-onlysurvey",
"description":"",
"questions":
[
{
"type":"TA",
"message":"Text Area Question",
"required":false,
"order_number":0
},
{
"type":"TB",
"message":"Text Box Question",
"required":false,
"order_number":1
},
{
"type":"CH",
"message":"Check Box Question",
"required":false,
"order_number":2,
"choices":
[
{
"message":"Check Box Choice 1",
"order_number":0
},
{
"message":"Check Box Choice 2",
"order_number":1
},
{
"message":"Check Box Choice 3",
"order_number":2
}
]
},
{
"type":"RA",
"message":"Radio Button Question",
"required":false,
"order_number":3,
"choices":
[
{
"message":"Radio Choice 1",
"order_number":0
},
{
"message":"Radio Choice 2",
"order_number":1
},
{
"message":"Radio Choice 3",
"order_number":2
}
]
},
{
"type":"DD",
"message":"Drop Down List Question",
"required":false,
"order_number":4,
"choices":
[
{
"message":"Drop Down Choice 1",
"order_number":0
},
{
"message":"Drop Down Choice 2",
"order_number":1
},
{
"message":"Drop Down Choice 3",
"order_number":2
}
]
}
]
}
"""
postdata = {'r': data}
response = self.client.post(reverse('surveyedit', args=[self.survey.slug]), postdata)
self.assertEqual(response.status_code, 200, "The page didn't return a 200")
self.assertEqual(self.survey.title, 'Text Only Survey', "The Survey didn't update properly")
self.assertEqual(self.survey.question_set.all()[0].message, 'Text Area Question', "Question didn't update properly")
self.assertEqual(self.survey.question_set.all()[2].choice_set.all()[1].message, 'Check Box Choice 2', "Question Order is broken")
self.assertEqual(self.survey.question_set.all()[4].choice_set.all()[2].order_number, 2, "Choice Order is broken")
def test_has_access(self):
self.survey.start_date = self.arbitrary_start_date
self.survey.save()
data = """
{
"title":"Text Only Survey",
"slug":"survey",
"description":"",
"questions":
[
{
"type":"TA",
"message":"Text Area Question",
"required":false,
"order_number":0
}
]
}
"""
postdata = {'r': data}
response = self.client.post(reverse('surveyedit', args=[self.survey.slug]), postdata)
self.assertEqual(response.status_code, 404, "The page didn't return a 404")
def test_survey_slug_unique(self):
data = """
{
"title":"Text Only Survey",
"slug":"survey",
"description":"",
"questions":
[
{
"type":"TA",
"message":"Text Area Question",
"required":false,
"order_number":0
}
]
}
"""
postdata = {'r': data}
response = self.client.post(reverse('surveyedit', args=[self.survey.slug]), postdata)
response_data = json.JSONDecoder().decode(response.content)
self.assertIn(
'That title is already taken. Please choose a different one.',
response_data['warnings'],
'Integrity error: slug uniqueness is not being inforced')
def test_survey_slug_or_title_required(self):
data = """
{
"title":"",
"slug":"",
"description":"",
"questions":
[
{
"type":"TA",
"message":"Text Area Question",
"required":false,
"order_number":0
}
]
}
"""
postdata = {'r': data}
response = self.client.post(reverse('surveyedit', args=[self.survey.slug]), postdata)
response_data = json.JSONDecoder().decode(response.content)
self.assertIn('Please enter a valid title.', response_data['warnings'], 'Integrity error: slug must be defined')
class SurveyResultsViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="Text Only Survey", slug="text-only-survey", creator=self.user)
self.question1 = Question.objects.create(survey=self.survey, message="Question1(CH)", type="CH", required=False, order_number=0)
self.choice1 = Choice.objects.create(question=self.question1, message="Q1C1", order_number=0)
self.choice2 = Choice.objects.create(question=self.question1, message="Q1C2", order_number=1)
self.choice3 = Choice.objects.create(question=self.question1, message="Q1C2", order_number=2)
self.question2 = Question.objects.create(survey=self.survey, message="Question(RA)", type="RA", required=False, order_number=1)
self.choice4 = Choice.objects.create(question=self.question2, message="Q2C1", order_number=0)
self.choice5 = Choice.objects.create(question=self.question2, message="Q2C2", order_number=1)
self.choice6 = Choice.objects.create(question=self.question2, message="Q2C2", order_number=2)
self.ballot1 = Ballot.objects.create(survey=self.survey)
self.ballot2 = Ballot.objects.create(survey=self.survey)
self.ballot3 = Ballot.objects.create(survey=self.survey)
self.question1.answer_with_choices((self.choice1, self.choice2, self.choice3), self.ballot1)
self.question2.answer_with_choices((self.choice4,), self.ballot1)
self.question1.answer_with_choices((self.choice1, self.choice2), self.ballot2)
self.question2.answer_with_choices((self.choice4,), self.ballot2)
self.question1.answer_with_choices((self.choice1,), self.ballot3)
self.question2.answer_with_choices((self.choice4,), self.ballot3)
def test_get_context_data(self):
response = self.client.get(reverse('surveyresults', args=[self.survey.slug, self.choice1.pk]))
self.assertEqual(response.status_code, 200, "The page didn't return a 200")
class SurveyDetailsViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
self.arbitrary_start_date_str = 'Tue, 01 Jan 2013 11:00:00 GMT'
self.arbitrary_start_date = datetime.strptime(self.arbitrary_start_date_str, '%a, %d %b %Y %H:%M:%S %Z').replace(tzinfo=utc)
self.arbitrary_end_date_str = 'Wed, 01 Jan 2020 11:00:00 GMT'
self.arbitrary_end_date = datetime.strptime(self.arbitrary_end_date_str, '%a, %d %b %Y %H:%M:%S %Z').replace(tzinfo=utc)
def test_set_start_and_end_dates(self):
if self.arbitrary_start_date.tzinfo:
self.arbitrary_start_date = self.arbitrary_start_date.astimezone(get_current_timezone())
if self.arbitrary_end_date.tzinfo:
self.arbitrary_end_date = self.arbitrary_end_date.astimezone(get_current_timezone())
post_data = {
'start_date': self.arbitrary_start_date.strftime('%m/%d/%Y'),
'start_time': self.arbitrary_start_date.strftime('%I:%M%p'),
'end_date': self.arbitrary_end_date.strftime('%m/%d/%Y'),
'end_time': self.arbitrary_start_date.strftime('%I:%M%p'),
'set_duration': '',
}
self.client.post(reverse('surveydetails', args=[self.survey.slug]), post_data)
self.survey = Survey.objects.get(slug="my-new-survey")
self.assertEqual(self.survey.start_date, self.arbitrary_start_date)
self.assertEqual(self.survey.end_date, self.arbitrary_end_date)
def test_set_social(self):
self.client.post(reverse('surveydetails', args=[self.survey.slug]), {'show_social': True})
self.survey = Survey.objects.get(slug="my-new-survey")
self.assertEqual(self.survey.show_social, False)
def test_set_track(self):
self.client.post(reverse('surveydetails', args=[self.survey.slug]), {'disable_cookies': True})
self.survey = Survey.objects.get(slug="my-new-survey")
self.assertEqual(self.survey.use_cookies, False)
def test_blank_dates(self):
response = self.client.post(reverse('surveydetails', args=[self.survey.slug]), {'start_date': '', 'start_time': '',
'end_date': '', 'end_time': '', 'set_duration': ''})
self.assertEqual(response.status_code, 200, "This page should return a 200")
self.assertEqual(self.survey.start_date, None)
def test_valid_date_values(self):
response = self.client.post(reverse('surveydetails', args=[self.survey.slug]), {'start_date': '01/01/2011', 'start_time': '',
'end_date': '', 'end_time': '', 'set_duration': ''})
def test_def_get(self):
response = self.client.get(reverse('surveydetails', args=[self.survey.slug]))
self.assertEqual(response.status_code, 404, "The page didn't return a 404")
def test_survey_change_publish_date_after_gone_live(self):
ballot1 = Ballot.objects.create(survey=self.survey)
self.survey.start_date = self.arbitrary_start_date
self.survey.save()
response = self.client.post(reverse('surveydetails', args=[self.survey.slug]), {'start_date': '01/01/2013', 'start_time': '12:01am',
'end_date': '01/01/2020', 'end_time': '12:00am', 'set_duration': ''})
response_data = json.JSONDecoder().decode(response.content)
self.assertEqual(response.status_code, 200, "This page should return a 200")
self.assertEqual(response_data['errors'][0], "A surveys publish date cannot be changed if it has already gone live.",
"Error: Survey publish dates are being allowed to change once ballots are present")
class SurveyDeleteViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.user.is_staff = True
self.user.save()
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
def test_post(self):
response = self.client.post(reverse('surveydelete', args=[self.survey.slug]))
class SurveyCloneViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.user.is_staff = True
self.user.save()
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
self.question1 = Question.objects.create(survey=self.survey, message="Question1(CH)", type="CH", required=False, order_number=0)
self.choice1 = Choice.objects.create(question=self.question1, message="Q1C1", order_number=0)
self.choice2 = Choice.objects.create(question=self.question1, message="Q1C2", order_number=1)
def test_post_non_unique_slug(self):
response = self.client.post(reverse('surveyclone', args=[self.survey.slug]), {'title': 'My new survey'})
response_data = json.JSONDecoder().decode(response.content)
self.assertEqual(response.status_code, 200, "This request should return a 200")
self.assertEqual(response_data['error'], 'A survey with that title already exists.', 'Unique slugs are not being enforced')
def test_post_unique_slug(self):
response = self.client.post(reverse('surveyclone', args=[self.survey.slug]), {'title': 'food survey'})
response_data = json.JSONDecoder().decode(response.content)
self.assertEqual(response.status_code, 200, "This request should return a 200")
self.assertEqual(response_data['status'], 'success', 'The procedure did not succeed')
def test_post_not_auth_user(self):
self.user.is_staff = False
self.user.save()
response = self.client.post(reverse('surveyclone', args=[self.survey.slug]), {'title': 'My new survey'})
response_data = json.JSONDecoder().decode(response.content)
self.assertEqual(response.status_code, 200, "This request should return a 200")
self.assertEqual(response_data['status'], 'Auth Error', 'The should return an Auth Error')
class SurveyQRCodeView(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="My new survey", slug="my-new-survey", creator=self.user)
def test_get(self):
try:
response = self.client.get(reverse('qrcode', args=[self.survey.slug]))
self.assertEqual(response.status_code, 200, "This request should return a 200")
except ImportError:
raise SkipTest("QRCode testing requires the qrcode library")
class SurveyExportViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.user.is_staff = True
self.user.save()
self.client.login(username='admin', password='asdf')
self.survey = Survey.objects.create(title="Text Only Survey", slug="text-only-survey", creator=self.user)
self.question1 = Question.objects.create(survey=self.survey, message="Question1(CH)", type="CH", required=False, order_number=0)
self.choice1 = Choice.objects.create(question=self.question1, message="Q1C1", order_number=0)
self.choice2 = Choice.objects.create(question=self.question1, message="Q1C2", order_number=1)
self.choice3 = Choice.objects.create(question=self.question1, message="Q1C2", order_number=2)
self.question2 = Question.objects.create(survey=self.survey, message="Question(RA)", type="RA", required=False, order_number=1)
self.choice4 = Choice.objects.create(question=self.question2, message="Q2C1", order_number=0)
self.choice5 = Choice.objects.create(question=self.question2, message="Q2C2", order_number=1)
self.choice6 = Choice.objects.create(question=self.question2, message="Q2C2", order_number=2)
self.question3 = Question.objects.create(survey=self.survey, message="Question(TB)", type="TB", required=False, order_number=2)
self.choice7 = Choice.objects.create(question=self.question3, message="Q3C1", order_number=0)
self.ballot1 = Ballot.objects.create(survey=self.survey)
self.ballot2 = Ballot.objects.create(survey=self.survey)
self.ballot3 = Ballot.objects.create(survey=self.survey)
self.ballot4 = Ballot.objects.create(survey=self.survey)
self.question1.answer_with_choices((self.choice1, self.choice2, self.choice3), self.ballot1)
self.question2.answer_with_choices((self.choice4,), self.ballot1)
self.question1.answer_with_choices((self.choice1, self.choice2), self.ballot2)
self.question2.answer_with_choices((self.choice4,), self.ballot2)
self.question1.answer_with_choices((self.choice1,), self.ballot3)
self.question2.answer_with_choices((self.choice4,), self.ballot3)
self.question3.answer_with_text((self.choice7,), self.ballot4)
def test_get_full_report(self):
response = self.client.get(reverse('exportresults', args=[self.survey.slug]), {'rtype': 'Full'})
self.assertEqual(response.status_code, 200, "This request should return a 200")
def test_get_summary_report(self):
response = self.client.get(reverse('exportresults', args=[self.survey.slug]), {'rtype': 'Summary'})
self.assertEqual(response.status_code, 200, "This request should return a 200")
def test_no_report_type_selected(self):
response = self.client.get(reverse('exportresults', args=[self.survey.slug]))
self.assertEqual(response.status_code, 200, "This request should return a 200")
def test_user_not_staff(self):
self.user.is_staff = False
self.user.save()
response = self.client.get(reverse('exportresults', args=[self.survey.slug]))
self.assertEqual(response.status_code, 403, "This request should return a 403")
class PresetSearchView(TestCase):
def setUp(self):
self.user = User.objects.create_user('admin', email="a@a.com", password='asdf')
self.client.login(username='admin', password='asdf')
self.preset = Preset.objects.create(title="States")
PresetChoice.objects.create(preset=self.preset, option="MO")
def test_get(self):
response = self.client.get(reverse('preset_search_view'), {'title': 'States'})
self.assertEqual(response.status_code, 200, "This request should return a 200")
| mostateresnet/django-resnet-survey | survey/tests.py | Python | mit | 30,921 |
import io
from builtins import bytes, str
from fro._implementation import chompers
class Parser(object):
"""
An immutable parser.
"""
def __init__(self, chomper):
self._chomper = chomper
# public interface
def parse(self, lines, loud=True):
"""
Parse an iterable collection of chunks. Returns the produced value, or throws a ``FroParseError``
explaining why the parse failed (or returns ``None`` if ``loud`` is ``False``).
:param Iterable[str] lines:
:param bool loud: if parsing failures should result in an exception
:return: Value produced by parse
"""
tracker = chompers.abstract.FroParseErrorTracker()
state = chompers.state.ChompState(lines)
box = self._chomper.chomp(state, tracker)
if box is None:
return self._failed_parse(state, tracker, False, loud)
elif not state.at_end():
return self._failed_parse(state, tracker, True, loud)
return box.value
def parse_str(self, string_to_parse, loud=True):
"""
Attempts to parse ``string_to_parse``. Treats the entire string ``string_to_parse`` as a single
chunk. Returns the produced value, or throws a ``FroParseError`` explaining why
the parse failed (or returns ``None`` if ``loud`` is ``False``).
:param str string_to_parse: string to parse
:param loud: if parsing failures should result in an exception
:return: value produced by parse
"""
return self.parse([string_to_parse], loud)
def parse_file(self, filename, encoding="utf-8", loud=True):
"""
Parse the contents of a file with the given filename, treating each line as a separate chunk.
Returns the produced value, or throws a ``FroParseError`` explaining why
the parse failed (or returns ``None`` if ``loud`` is ``False``).
:param filename: filename of file to parse
:param encoding: encoding of filename to parse
:param loud: if parsing failures should result in an exception
:return: value produced by parse
"""
with io.open(filename, encoding=encoding) as file_to_parse:
return self.parse(file_to_parse, loud=loud)
def name(self, name):
"""
Returns a parser equivalent to ``self``, but with the given name.
:param str name: name for new parser
:return: a parser identical to this, but with specified name
:rtype: Parser
"""
return Parser(self._chomper.clone(name=name))
def maybe(self, default=None):
"""
Returns a parser equivalent to ``self``, but defaults to consuming none of the input
string and producing ``default`` when ``self`` fails to chomp a string. See :doc:`parser` for an explanation of
chomping.
:param Any default: default value to produce
:return: parser that defaults to consuming nothing and producing ``default`` instead of failing
:rtype: Parser
Example::
parser = fro.comp([fro.rgx(r"ab+").maybe("a"), fro.intp])
parser.parse_str("abb3") # evaluates to ("abb", 3)
parser.parse_str("87") # evaluates to ("a", 87)
"""
return Parser(chompers.util.OptionalChomper(
self._chomper,
default=default,
significant=self._chomper.significant(),
name=self._chomper.name()))
def append(self, value):
"""
Returns a parser that chomps with the called parser, chomps with the ``Parser``
represented by ``value``, and produces the value produced by the called parser. The returned parser has
the same name as significance as ``self``.
:param Union[Parser,str] value: parser to "append" to ``self``
:return: ``value`` appended to ``self``
:rtype: Parser
"""
return Parser(chompers.composition.CompositionChomper(
[self._chomper.clone(significant=True),
_extract(value).clone(significant=False)],
significant=self._chomper.significant(),
name=self._chomper.name())).get()
def prepend(self, value):
"""
Returns a parser that chomps with the ``Parser`` represented by ``value``, then chomps
with ``self``, and produces the value produced by ``self``. The returned parser has
the same name as significance as ``self``.
:param Union[Parser,str] value: parser to "prepend" to ``self``
:return: ``value`` prepended to ``self``
:rtype: Parser
"""
return Parser(chompers.composition.CompositionChomper(
[_extract(value).clone(significant=False),
self._chomper.clone(significant=True)],
significant=self._chomper.significant(),
name=self._chomper.name())).get()
def lstrip(self):
"""
Returns a parser that is equivalent to ``self``, but ignores and
consumes any leading whitespace inside a single chunk. Equivalent to
``fro.comp([r"~\s*", self]).get()``, but with the same name and significance as ``self``.
:return: a parser that ignores leading whitespace inside a single chunk
:rtype: Parser
Example::
parser = fro.rgx(r"[a-z]+").lstrip()
# Will succeed, producing "hello". It's okay if there's no whitespace
parser.parse_str("hello")
# Will succeed, producing "world"
parser.parse_str("\\nworld")
# Will succeed, producing "planet". Note that the leading whitespace is
# confined to a single chunk (even though this chunk is different than
# the chunk that "planet" appears in)
parser.parse([" ", "planet"])
# Will fail, leading whitespace is across multiple chunks
parser.parse([" ", "\\tgalaxy"])
"""
return self.prepend(r"~\s*")
def lstrips(self):
"""
Returns a parser that is equivalent to ``self``, but ignores and
consumes any leading whitespace across multiple chunks.
:return: a parser that ignored leading whitespace
:rtype: Parser
Example::
parser = fro.rgx(r"[a-z]+").lstrips()
# Will succeed, producing "hello". It's okay if there's no whitespace
parser.parse_str("hello")
# Will succeed, producing "world"
parser.parse_str("\\nworld")
# Will succeed, producing "planet". Note that the leading whitespace is
# confined to a single chunk (even though this chunk is different than
# the chunk that "planet" appears in)
parser.parse([" ", "planet"])
# Will succeed, producing "galaxy". Unlike lstrip(), lstrips() can handle
# whitespace across multiple chunks
parser.parse([" ", "\\r\\r", "\\tgalaxy"])
"""
return self.prepend(until(r"~[^\s]"))
def rstrip(self):
"""
Returns a parser that is equivalent to ``self``, but ignores and consumes
trailing whitespace inside a single chunk. Equivalent to ``fro.comp([self, r"~\s*"]).get()``,
but with the same name and significance as ``self``.
:return: parser that ignore trailing whitespace inside a single chunk
:rtype: Parser
Example::
parser = fro.rgx(r"[a-z]+").rstrip()
# Will succeed, producing "hello". It's okay if there's no whitespace
parser.parse_str("hello")
# Will succeed, producing "world"
parser.parse_str("world\\n")
# Will succeed, producing "planet". Note that the trailing whitespace is
# confined to a single chunk (even though this chunk is different than
# the chunk that "planet" appears in)
parser.parse(["planet", " "])
# Will fail, trailing whitespace is across multiple chunks
parser.parse(["galaxy\\t", "\\r"])
"""
return self.append(r"~\s*")
def rstrips(self):
"""
Returns a parser that is equivalent to ``self``, but ignores and
consumes any leading whitespace across multiple chunks.
:return: parser that ignores leading whitespace
:rtype: Parser
Example::
parser = fro.rgx(r"[a-z]+").lstrips()
# Will succeed, producing "hello". It's okay if there's no whitespace
parser.parse_str("hello")
# Will succeed, producing "world"
parser.parse_str("world\\n")
# Will succeed, producing "planet".
parser.parse(["planet", " "])
# Will succeed, producing "galaxy". Unlike rstrip(), rstrips() can handle
# whitespace spread across multiple chunks
parser.parse(["galaxy\\n\\n", " ", "\\r\\r"])
"""
return self.append(until(r"~[^\s]"))
def strip(self):
"""
Returns a parser that is equivalent to ``self``, but ignores and consumes leading and
trailing whitespace inside a single chunk. ``self.strip()`` is equivalent to
``self.lstrip().rstrip()``.
:return: parser that ignores leading and trailing whitespace inside a single chunk
:rtype: Parser
Example::
parser = fro.rgx(r"[a-z]+").strip()
# This will succeed, producing "abc". All whitespace is inside a single chunk.
parser.parse_str([" abc \\t"])
# This will also succeed, producing "abc". All leading whitespace is inside
# a single chunk, as is all trailing whitespace (even though those chunks
# are different!)
parser.parse_str(["\\n\\n", "abc \\t"])
# This will not succeed. Leading whitespace is spread across multiple chunks.
parser.parse_str(["\\n\\n", "\\n abc\\t\\r"])
"""
return self.lstrip().rstrip()
def strips(self):
"""
Returns a parser object that is equivalent to ``self``, but ignores and consumes leading
and trailing whitespace, across chunk boundaries. ``self.strips()`` is equivalent to
``self.lstrips().rstrips()``.
:return: parser that ignores leading and trailing whitespace
:rtype: Parser
Example::
parser = fro.rgx(r"[a-z]+").strips()
# This will succeed, producing "abc". All whitespace is inside a single chunk.
parser.parse_str([" abc \\t"])
# This will also succeed, producing "abc".
parser.parse_str(["\\n\\n", "abc \\t"])
# This will succeed, producing "abc". Unlike strip(), strips() can handle
# whitespace that spans multiple chunks.
parser.parse_str(["\\n\\n", "\\n abc\\t\\r"])
"""
return self.lstrips().rstrips()
def unname(self):
"""
Returns a copy of the called parser that does not have a name.
:return: a copy of the called parser that does not have a name
:rtype: Parser
"""
return Parser(self._chomper.unname())
def get(self):
"""
Returns a ``Parser`` object that retrieves the sole first element of the value produced by ``self``, and
throws an error if ``self`` produces an non-iterable value or an iterable value that does not have exactly
one element. Equivalent to ``self >> lambda x: x``.
:return: parser that unpacks the sole produced value
:rtype: Parser
Example::
# Recall that comp(..) always produces a tuple, in this case a tuple with one value
parser = fro.comp(r"~\(", fro.intp, r"~\)").get()
parser.parse_str("(-3)") # evaluates to -3
"""
return self >> (lambda x: x)
def __invert__(self):
"""
Returns a new ``Parser`` that is equivalent to ``self`` but is insignificant.
:return: an insignificant copy of the called parser
:rtype: Parser
Example::
commap = fro.rgx(r",")
composition = fro.comp([~fro.intp, ~commap, fro.intp]).get()
composition.parse("2,3") # evaluates to 3
"""
return Parser(self._chomper.clone(significant=False))
def significant(self):
"""
Returns a parser that is equivalent to ``self`` but is significant.
:return: a significant copy of the called parser
:rtype: Parser
"""
return Parser(self._chomper.clone(significant=True))
def __or__(self, func):
"""
Returns a new ``Parser`` object that applies ``func`` to the values produced
by ``self``. The new parser has the same name and significance as ``self``.
:param Callable[[T], U] func: function applied to produced values
:return: a new parser that maps produced values using ``func``
:rtype: Parser
Example::
parser = fro.intp | (lambda x: x * x)
parser.parse_str("4") # evaluates to 16
"""
return Parser(self._chomper.clone(func=func))
def __rshift__(self, func):
"""
Returns a ``Parser`` object that unpacks the values produced by ``self`` and then applies ``func`` to
them. Throws an error if the number of unpacked arguments does not equal a number of arguments that ``func``
can take, or if the value by produced ``self`` is not unpackable. Equivalent to ``self | lambda x: func(*x)``.
The new parser has the same name and significance as ``self``.
:param Callable[?, U] func: function applied to unpacked produced values
:return: a new parser that maps produced values using ``func``
:rtype: Parser
Example::
parser = fro.comp([fro.intp, r"~,", fro.intp]) >> (lambda x, y: x + y)
parser.parse_str("4,5") # evaluates to 9
"""
return Parser(self._chomper.clone(func=lambda x: func(*x)))
# internals
def _failed_parse(self, state, tracker, valid_value, loud):
if valid_value:
curr = state.current()
col = state.column()
msg = "Unexpected character {}".format(curr[col])
chomp_err = chompers.chomp_error.ChompError(msg, state.location())
tracker.report_error(chomp_err)
return self._raise(tracker.retrieve_error(), loud)
def _raise(self, err, loud):
if not loud:
return None
if err is None:
raise AssertionError("err to raise is None")
raise err
# --------------------------------------------------------------------
# internals (put first to avoid use before def'n issues)
def _extract(value):
if value is None:
return None
elif isinstance(value, str):
return rgx(value)._chomper
elif isinstance(value, bytes):
return rgx(value)._chomper
elif isinstance(value, Parser):
return value._chomper
else:
msg = "{} does not represent a parser".format(repr(value))
raise ValueError(msg)
def _parse_rgx(regex_string):
"""
:return: a tuple of (modified regex_string, whether significant)
"""
if regex_string[0:1] == r"~":
return regex_string[1:], False
elif regex_string[0:2] == r"\~":
return regex_string[1:], True
return regex_string, True
# --------------------------------------------------------------------
# public interface
def alt(parser_values, name=None):
"""
Returns a parser that is the alternation of the parsers in ``parser_values``.
More specifically, the returned parser chomps by successively trying to chomp with the parsers in ``parser_values``,
and producing the value producing by the first successful chomp, and failing if none of the parsers in
``parser_values`` successfully chomp.
:param Iterable[Union[Parser | str]] parser_values: collection of parser values
:param str name: name of the created parser
:return: a parser that is the alternation of the parsers in ``parser_values``
:rtype: Parser
Example::
parser = fro.alt([r"a*b*c*", r"[0-9]{3}", fro.intp])
parser.parse_str("aac") # evaluates to "aac"
parser.parse_str("12") # evaluates to 12
parser.parse_str("235") # evaluates to "235"
parser.parse_str("abc123") # fails
parser.parse_str("") # evaluates to ""
parser.parse_str("1234") # fails
# The last one is tricky. When r"a*b*c*" tries to chomp "1234", it fails to chomp.
# Then, when r"[0-9]{3}" tries to chomp "1234", it chomps off "123", leaving behind
# "4". This is the first successful chomp, so this is what the variable parser chomps.
# However, since the variable parser did not chomp the entire string "1234", it fails
# to parse it.
"""
chompers_ = [_extract(p) for p in parser_values]
return Parser(chompers.alternation.AlternationChomper(
chompers_, name=name))
def chain(func, name=None):
"""
Given a function ``func`` which maps one parser to another, returns a parser value that is equivalent
to a large number of successive calls to ``func``.
Conceptually, the returned parser is equivalent to ``func(func(func(...)))``. During parsing,
successive calls to ``func`` are made lazily on an as-needed basis.
Fro parsers parse top-down, so users of this function should take care to avoid left recursion.
In general the parser ``func(parser)`` should consume input before delegating
parsing to the ``parser`` argument.
:param Callable[[Parser],Union[Parser,str]] func: function from ``Parser`` to parser value
:param name: name for created parser
:return: lazily-evaluated infinite parser
:rtype: Parser
Example::
box = fro.BoxedValue(None)
def wrap(parser):
openp = fro.rgx(r"[a-z]", name="open") | box.update_and_get
closep = fro.thunk(lambda: box.get(), name="close")
return fro.comp([~openp, parser.maybe(0), ~closep]) >> lambda n: n + 1
parser = fro.chain(wrap)
parser.parse_str("aa") # evaluates to 1
parser.parse_str("ab") # fails
parser.parse_str("aeiiea") # evaluates to 3
parser.parse_str("aeiie") # fails
"""
def func_(chomper):
return _extract(func(Parser(chomper)))
return Parser(chompers.util.ChainChomper(func_, name=name))
def comp(parser_values, sep=None, name=None):
"""
Returns a parser that is the composition of the parsers in ``parser_values``.
More specifically, the returned parser chomps by successively chomping with the parsers in ``parser_values``, and
produces a tuple of the values produced by ``parser_values``. If ``sep`` is not ``None``, then the returned parser
will chomp with ``sep`` between each parser in ``parser_values`` (and discard the produced value).
:param Iterable[Union[Parser,str]] parser_values: collection of parser values to compose
:param Union[Parser,str] sep: separating parser to use between composition elements
:param str name: name for the parser
:return: a parser that is the composition of the parsers ``parser_values``
:rtype: Parser
Example::
parser = fro.comp([r"ab?c+", r"~,", fro.intp])
parser.parse_str("abcc,4") # evaluates to ("abcc", 4)
parser.parse_str("ac,-1") # evaluates to ("ac", -1)
parser.parse_str("abc,0,") # fails
"""
if isinstance(parser_values, str) or isinstance(parser_values, bytes):
raise TypeError("Do not pass a string/bytes for the parser_values argument")
chompers_ = [_extract(p) for p in parser_values]
return Parser(chompers.composition.CompositionChomper(
chompers_, sep, name=name))
def group_rgx(regex_string, name=None):
"""
Returns a parser that consumes the regular expression ``regex_string``, and produces a tuple of the groups of
the corresponding match. Regular expressions should adhere to the syntax outlined in the
`re module <https://docs.python.org/3/library/re.html>`_. Also see the
`re module <https://docs.python.org/3/library/re.html>`_ for a description of regular expression groups.
:param str regex_string: regular expression
:param str name: name for the parser
:return: parser that consumes the regular expression ``regex_string``, and produces a tuple of the groups of
the corresponding match.
:rtype: Parser
Example::
parser = fro.group_rgx(r"(x*)(y*)(z*)")
parser.parse_str("xxz") # evaluates to ("xx", "", "z")
parser.parse_str("wxyz") # fails
"""
rgx_str, significant = _parse_rgx(regex_string)
return Parser(chompers.regex.GroupRegexChomper(
rgx_str, significant=significant, name=name))
def nested(open_regex_string, close_regex_string, reducer="".join, name=None):
"""
Returns a ``Parser`` that parses well-nested sequences where the opening token is given by
``open_regex_string`` and the closing token given by ``close_regex_string``.
The parser passes an iterator containing the chunks of content between the first opening token
and final closing token into ``reducer``, and produces the resulting value. The default behavior
is to concatenate the chunks.
If there are overlapping opening and closing tokens, the token with the earliest start positions wins,
with ties going to opening tokens.
:param str open_regex_string: regex for opening tokens
:param str close_regex_string: regex for closing tokens
:param Callable[[Iterable[str],T] reducer: function from iterator of chunks to produced value
:param name:
:return:
Example::
parser = fro.nested(r"\(", r"\)")
parser.parse_str("(hello (there))") # evaluates to "hello (there)"
parser.parse_str("(hello (there)") # fails, no closing ) for the first (
"""
return Parser(chompers.nested.NestedChomper(
open_regex_string,
close_regex_string,
reducer,
name=name))
def rgx(regex_string, name=None):
"""
Returns a parser that parses strings that match the given regular expression, and produces
the string it consumed. The regular expressions should adhere to the syntax outlined in the
`re module <https://docs.python.org/3/library/re.html>`_
:param str regex_string: regex that parser should match
:param str name: name for the parser
:return: parser that parses strings that match the given regular expression
:rtype: Parser
Example::
parser = fro.rgx(r"abc+")
parser.parse_str("abccc") # evaluates to "abccc"
parser.parse_str("abd") # fails
"""
rgx_str, significant = _parse_rgx(regex_string)
return Parser(chompers.regex.RegexChomper(
rgx_str, significant=significant, name=name))
def seq(parser_value, reducer=list, sep=None, name=None):
"""
Returns a parser that parses sequences of the values parsed by ``parser_value``.
More specifically, the returned parser repeatedly chomps with ``parser_value`` until it fails,
passes an iterator of the produced values as argument to ``reducer``, and produces the
resulting value. ``reducer`` default to producing a list of the produced values.If ``sep`` is not ``None``, the returned parser chomps using
``sep`` between each ``parser_value`` chomp (and discards the produced value).
:param Union[Parser,str] parser_value: Parser-like value
:param Callable[[Iterable[str]],T] reducer: function from iterator of chunks to produced value
:param Union[Parser,str] sep: separating parser to use between adjacent sequence elements
:param str name: name for the parser
:return: a parser that parses sequences of the values parsed by ``parser_value``
:rtype: Parser
Example::
parser = fro.seq(fro.intp, sep=r",")
parser.parse_str("") # evaluates to []
parser.parse_str("1") # evaluates to [1]
parser.parse_str("1,2,3") # evaluates to [1, 2, 3]
parser.parse_str("1,2,3,") # fails
"""
return Parser(chompers.sequence.SequenceChomper(
_extract(parser_value), reducer, _extract(sep), name=name))
def thunk(func, name=None):
"""
Given a function ``func``, which takes no argument and produces a parser value, returns a parser
that when chomping, calls ``func()`` and chomps with the resulting parser. This function is primarily
intended for creating parsers whose behavior is dependent on some sort of external state.
:param Callable[[],Parser] func:
:param str name: name for the parser
:return: a parser that parses with the parsers generated by ``func``
:rtype: Parser
Example::
regex_box = fro.BoxedValue(r"ab*")
parser = fro.thunk(lambda: regex_box.get(), name="Boxed regex")
parser.parse_str("abb") # evaluates to "abb"
parser.parse_str("aab") # fails
box.update(r"cd*")
parser.parse_str("cdddd") # evaluates to "cdddd"
parser.parse_str("abb") # fails
"""
return Parser(chompers.util.ThunkChomper(
lambda: _extract(func()), name=name))
def tie(func, name=None):
"""
Given a function ``func``, which maps one parser to another parser, returns a cyclic parser whose
structure matches the parsers returned by ``func``.
Conceptually, what happens is::
stub = some_placeholder
result = func(stub)
... # in result, replace all references to stub to instead point back to result
The parser ``tie(func)`` is equivalent to ``chain(func)``, except that ``tie(func)`` is a
cyclic parser, whereas ``chain(func)`` is a lazily-evaluated infinite parser.
This difference is relevant only when the corresponding parsers are dependent on external state.
In other cases, it is more memory-efficient to use ``tie(func)``.
Fro parsers parse top-down, so users of this function should take care to avoid left recursion.
In general the parser ``func(parser)`` should consume input before delegating
parsing to the ``parser`` argument.
Since parsers are immutable, the only way to create a self-referencing parser is via ``tie(..)``.
:param Callable[[Parser],Parser] func: function for generating cyclic parser
:param str name: name for the parser
:return: a cyclic parser whose structure matches the parsers returned by ``func``
:rtype: Parser
Example::
def func(parser):
return fro.comp([r"~\(", parser.maybe(0),r"~\)"]) | lambda n: n + 1
parser = fro.tie(func)
parser.parse("(())") # evaluates to 2
parser.parse("(((())))") # evaluates to 4
parser.parse("((()") # fails
"""
stub_chomper = chompers.util.StubChomper(name=name)
stub_parser = Parser(stub_chomper)
result = func(stub_parser)
stub_chomper.set_delegate(result._chomper)
if name is not None:
result = result.name(name)
return result
def until(regex_str, reducer=lambda _: None, name=None):
"""
Returns a parser that consumes all input until it encounters a match to the given regular expression,
or the end of the input.
The parser passes an iterator of the chunks it consumed to ``reducer``, and produces the resulting
value. By default, the parser produces ``None``. The parser does not consume the match when parsing,
but only everything up until the match.
:param str regex_str: regex until which the parser will consume
:param Callable[[Iterable[str]],T] reducer: function from iterator of chunks to produced value
:param str name: name for the parser
:return: a parser that consumes all input until it encounters a match to ``regex_str`` or the end of
the input
:rtype: Parser
Example::
untilp = fro.until(r"a|b",
reducer=lambda chunks: sum(len(chunk) for chunk in chunks),
name="until a or b")
parser = fro.comp([untilp, r"apples"], name="composition")
parser.parse(["hello\\n","world\\n", "apples"]) # evaluates to (12, apples)
"""
rgx_str, significant = _parse_rgx(regex_str)
return Parser(chompers.until.UntilChomper(rgx_str, reducer, name=name,
significant=significant))
# nothing before decimal or something before decimal
_floatp = r"(-?\.[0-9]+)|(-?[0-9]+(\.[0-9]*)?)"
#: A parser that parses floating-point values from their string representations.
#:
#: :type: Parser
floatp = (rgx(r"{}([eE][-+]?[0-9]+)?".format(_floatp)) | float).name("float")
#: A parser that parses int values from their string representations.
#:
#: :type: Parser
intp = (rgx(r"-?[0-9]+") | int).name("int")
#: A parser that parses non-negative integers (i.e. natural numbers) from their string representations.
#:
#: :type: Parser
natp = (rgx(r"[0-9]+") | int).name("non-negative int")
#: A parser that parses positive integers from their string representations.
#:
#: :type: Parser
posintp = (rgx(r"0*[1-9][0-9]*") | int).name("positive int")
| ethantkoenig/fro | fro/_implementation/parser.py | Python | mit | 29,302 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-28 10:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("site_config", "0008_siteconfiguration_default_number_prefix")]
operations = [
migrations.AddField(
model_name="siteconfiguration",
name="sms_expiration_date",
field=models.DateField(
help_text="If this date is set, any messages older than this will beremoved from the database.",
null=True,
verbose_name="SMS Expiration Date",
),
)
]
| monty5811/apostello | site_config/migrations/0009_siteconfiguration_sms_expiration_date.py | Python | mit | 674 |
#!/usr/bin/python3
"""v through is an arbitrary type corresponding to a node value in a
tree. label is another arbitrary type representing all labels."""
""" root = None | info"""
""" info = collections.namedtuple(label, v, [branchings])"""
""" branching = root | (root, root)"""
# The major differences between my current implementation and the Yakker implementation are:
# 1. The non-packed nodes in their tree are binarized.
# 2. Nodes embed both packing and the normal branching in the same data structure. Each node contains a list of pairs of children.
# 3. Every node contains, in addition to its children, an arbitrary semantic value and a label.
# 4. Both have a weak hash table containing links to all the partial trees in the forest. However, their implementation seems to use the hash table like a set, with a function that checks directly if a partial tree is in the table and returning it if so.
| ceridwen/combinators | yakker/history.py | Python | mit | 925 |
class DecisionNode:
"""
Represents a node / leaf in a tree.
:param col: Column of the variable to split in this node
:param value: Value of the variable used to split
:param result: Result contained in leaf nodes
:param tb: Left branch of the split
:param fb: Right branch of the split
"""
def __init__(self, col=-1, value=None, result=None, tb=None, fb=None):
self.col = col
self.value = value
self.result = result
self.tb = tb
self.fb = fb
| amstuta/pylearning | pylearning/trees/node.py | Python | mit | 536 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Grab the list of databases to be exported in the MSG configuration file and
export them to cloud storage.
Files beyond a maximum limit are split according to the number of chunks set
in the config file.
Usage:
python exportDBsToCloud.py
"""
__author__ = 'Daniel Zhang (張道博)'
__copyright__ = 'Copyright (c) 2014, University of Hawaii Smart Energy Project'
__license__ = 'https://raw.github' \
'.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' \
'-LICENSE.txt'
from sek.logger import SEKLogger
from msg_notifier import MSGNotifier
from msg_db_exporter import MSGDBExporter
import argparse
import time
COMMAND_LINE_ARGS = None
def processCommandLineArguments():
"""
Generate command-line arguments. Load them into global variable
COMMAND_LINE_ARGS.
"""
global COMMAND_LINE_ARGS
parser = argparse.ArgumentParser(description = '')
parser.add_argument('--dbname', help = 'Database file to be uploaded.')
parser.add_argument('--fullpath',
help = 'Full path to database file to be uploaded.')
parser.add_argument('--testing', action = 'store_true', default = False)
COMMAND_LINE_ARGS = parser.parse_args()
if __name__ == '__main__':
logger = SEKLogger(__name__, 'INFO')
logger.log("Exporting DBs to cloud.")
processCommandLineArguments()
exporter = MSGDBExporter()
notifier = MSGNotifier()
exporter.logger.shouldRecord = True
startTime = time.time()
dbs = exporter.configer.configOptionValue('Export', 'dbs_to_export').split(
',')
fileIDs = exporter.exportDBs(databases = dbs, toCloud = True,
testing = COMMAND_LINE_ARGS.testing,
deleteOutdated = True)
wallTime = time.time() - startTime
wallTimeMin = int(wallTime / 60.0)
wallTimeSec = (wallTime - wallTimeMin * 60.0)
if len(fileIDs) == len(dbs):
exporter.logger.log('No errors occurred during export.', 'info')
else:
exporter.logger.log('ERRORS occurred during export.', 'warning')
exporter.logger.log('Free space remaining: %d' % exporter.freeSpace(),
'info')
exporter.logger.log(
'Wall time: {:d} min {:.2f} s.'.format(wallTimeMin, wallTimeSec - (
wallTimeMin * 60)), 'info')
# Send the available file list by POST.
exporter.sendDownloadableFiles()
# Testing recording log output.
myPath = '{}/{}'.format(exporter.exportTempWorkPath, 'export-report.txt')
fp = open(myPath, 'wb')
fp.write(exporter.logger.recording)
fp.close()
| Hawaii-Smart-Energy-Project/Maui-Smart-Grid | src/automated-scripts/exportDBsToCloud.py | Python | bsd-3-clause | 2,686 |
# Copyright 2016 Ananya Mishra (am747@cornell.edu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import flask_login
# Need to expose these downstream
# pylint: disable=unused-import
from flask_login import (current_user,
logout_user,
login_required,
login_user)
# pylint: enable=unused-import
from flask import url_for, redirect, request
from flask_oauthlib.client import OAuth
from airflow import models, configuration, settings
from airflow.configuration import AirflowConfigException
_log = logging.getLogger(__name__)
def get_config_param(param):
return str(configuration.get('google', param))
class GoogleUser(models.User):
def __init__(self, user):
self.user = user
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def get_id(self):
'''Returns the current user id as required by flask_login'''
return self.user.get_id()
def data_profiling(self):
'''Provides access to data profiling tools'''
return True
def is_superuser(self):
'''Access all the things'''
return True
class AuthenticationError(Exception):
pass
class GoogleAuthBackend(object):
def __init__(self):
# self.google_host = get_config_param('host')
self.login_manager = flask_login.LoginManager()
self.login_manager.login_view = 'airflow.login'
self.flask_app = None
self.google_oauth = None
self.api_rev = None
def init_app(self, flask_app):
self.flask_app = flask_app
self.login_manager.init_app(self.flask_app)
self.google_oauth = OAuth(self.flask_app).remote_app(
'google',
consumer_key=get_config_param('client_id'),
consumer_secret=get_config_param('client_secret'),
request_token_params={'scope': '''https://www.googleapis.com/auth/userinfo.profile
https://www.googleapis.com/auth/userinfo.email'''},
base_url='https://www.google.com/accounts/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth')
self.login_manager.user_loader(self.load_user)
self.flask_app.add_url_rule(get_config_param('oauth_callback_route'),
'google_oauth_callback',
self.oauth_callback)
def login(self, request):
_log.debug('Redirecting user to Google login')
return self.google_oauth.authorize(callback=url_for(
'google_oauth_callback',
_external=True,
next=request.args.get('next') or request.referrer or None))
def get_google_user_profile_info(self, google_token):
resp = self.google_oauth.get('https://www.googleapis.com/oauth2/v1/userinfo',
token=(google_token, ''))
if not resp or resp.status != 200:
raise AuthenticationError(
'Failed to fetch user profile, status ({0})'.format(
resp.status if resp else 'None'))
return resp.data['name'], resp.data['email']
def domain_check(self, email):
domain = email.split('@')[1]
if domain == get_config_param('domain'):
return True
return False
def load_user(self, userid):
if not userid or userid == 'None':
return None
session = settings.Session()
user = session.query(models.User).filter(
models.User.id == int(userid)).first()
session.expunge_all()
session.commit()
session.close()
return GoogleUser(user)
def oauth_callback(self):
_log.debug('Google OAuth callback called')
next_url = request.args.get('next') or url_for('admin.index')
resp = self.google_oauth.authorized_response()
try:
if resp is None:
raise AuthenticationError(
'Null response from Google, denying access.'
)
google_token = resp['access_token']
username, email = self.get_google_user_profile_info(google_token)
if not self.domain_check(email):
return redirect(url_for('airflow.noaccess'))
except AuthenticationError:
_log.exception('')
return redirect(url_for('airflow.noaccess'))
session = settings.Session()
user = session.query(models.User).filter(
models.User.username == username).first()
if not user:
user = models.User(
username=username,
email=email,
is_superuser=False)
session.merge(user)
session.commit()
login_user(GoogleUser(user))
session.commit()
session.close()
return redirect(next_url)
login_manager = GoogleAuthBackend()
def login(self, request):
return login_manager.login(request)
| juvoinc/airflow | airflow/contrib/auth/backends/google_auth.py | Python | apache-2.0 | 5,866 |
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute for Medical Research, Heidelberg, Germany
# Authors: Sven Dorkenwald, Philipp Schubert, Jörgen Kornfeld
import sys
import cPickle as pkl
from syconnfs.representations import super_segmentation_helper as ssh
path_storage_file = sys.argv[1]
path_out_file = sys.argv[2]
with open(path_storage_file) as f:
args = []
while True:
try:
args.append(pkl.load(f))
except:
break
out = ssh.aggregate_segmentation_object_mappings_thread(args)
with open(path_out_file, "wb") as f:
pkl.dump(out, f)
| StructuralNeurobiologyLab/SyConnFS | syconnfs/qsub_scripts/QSUB_aggregate_segmentation_object_mappings.py | Python | gpl-2.0 | 670 |
import mcpi.minecraft as minecraft
mc = minecraft.Minecraft.create()
mc.setting("world_immutable", False)
| mohsraspi/mhscs14 | liam/settings.py | Python | gpl-2.0 | 107 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | GoogleCloudPlatform/tensorflow-without-a-phd | tensorflow-rnn-tutorial/old-school-tensorflow/tutorial/__init__.py | Python | apache-2.0 | 573 |
#!/usr/bin/env python
# use python3
import numpy as np
import time
#from dpLoadh5 import dpLoadh5
from dpFRAG import dpFRAG
# labeled chunks
#chunk_range_beg = 17,19,2, 17,23,1, 22,23,1, 22,18,1, 22,23,2, 19,22,2
chunk = [17,19,2]
size = [128,128,128]
offset = [0,0,0]
has_ECS = True
#username = 'watkinspv'
username = 'patilra'
# Input supervoxel labels (hdf5)
labelfile = '/Data/' + username + '/full_datasets/neon_sixfold/mbfergus32/huge_supervoxels.h5'
label_subgroups = ['with_background','0.99999000']
# Input probability data (hdf5)
probfile = '/Data/' + username + '/full_datasets/neon_sixfold/mbfergus32/huge_probs.h5'
# Input segmented labels (hdf5)
gtfile = '/Data/datasets/labels/gt/M0007_33_labels_briggmankl_watkinspv_39x35x7chunks_Forder.h5'
gt_dataset = 'labels'
# Input raw EM data
rawfile = '/Data/datasets/raw/M0007_33_39x35x7chunks_Forder.h5'
raw_dataset = 'data_mag1'
# Output agglomerated labels
outfile = '/Data/' + username + '/tmp_agglo_out.h5'
# Input probability augmented data
probaugfile = ''
#probaugfile = '/Data/' + username + '/full_datasets/neon_sixfold/mbfergus32/huge_probs.h5'
# Input raw EM augmented data
rawaugfile = ''
#rawaugfile = '/Data/datasets/raw/M0007_33_39x35x7chunks_Forder_aug.h5'
# output raw supervoxels (with empty labels removed)
rawout = '/home/' + username + ('/Downloads/svox_%dx%dx%d.raw' % tuple(size))
feature_set = 'minimal'
progressBar = True
verbose = True
# use getFeatures=False to only get the RAG (wihtout boundary voxels or features)
getFeatures = False
# instantiate frag and load data
frag = dpFRAG.makeBothFRAG(labelfile, chunk, size, offset,
[probfile, probaugfile], [rawfile, rawaugfile],
raw_dataset, gtfile, outfile, label_subgroups, ['training','thr'],
progressBar=progressBar, feature_set=feature_set, has_ECS=has_ECS,
verbose=verbose)
# hack to save raveled indices of overlap in context of whole volume (including boundary)
# boundary size is saved in frag.eperim
frag.ovlp_attrs += ['ovlp_cur_dilate']
# create graph
frag.createFRAG(features=getFeatures)
# just to use same name for RAG networkx object as was in driver-cpu.py (from gala example.py)
g_train = frag.FRAG
# save adjacency matrix
print('Exporting adjacency matrix'); t=time.time()
import networkx as nx
am=nx.to_numpy_matrix(g_train)
#np.savetxt("tmp-adjacency_matrix-cpu.txt",am, fmt="%d", delimiter='')
print(am.dtype, am.shape)
fn = 'tmp-adjacency-matrix-cpu-%dx%d-%s.raw' % (am.shape[0], am.shape[1], str(am.dtype))
am.tofile(fn)
am2 = np.fromfile(fn, dtype=np.float64).reshape(am.shape)
print(am2.dtype, am2.shape)
print('\tdone in %.4f s' % (time.time() - t))
# dump supervoxels
frag.supervoxels_noperim.transpose((2,1,0)).tofile(rawout)
if getFeatures:
print('Outputting boundary voxels'); t=time.time()
fout = open("tmp-boundary_pixel_indices-cpu.txt","w")
edges = g_train.edges()
edges.sort()
for edge in edges:
fout.write("(%d, %d): "%(edge[0],edge[1]))
#for b in g_train[edge[0]][edge[1]]['boundary']:
# fout.write("%d "%b)
boundary_subs = np.transpose(np.nonzero(g_train[edge[0]][edge[1]]['ovlp_attrs']['ovlp_cur_dilate']))
start_sub = np.array([x.start for x in g_train[edge[0]][edge[1]]['ovlp_attrs']['aobnd']])
#global_subs_padded = boundary_subs + start_sub
#global_inds = np.ravel_multi_index(global_subs_padded.T.reshape(3,-1), frag.supervoxels.shape)
#for b in global_inds:
# fout.write("%d "%b)
global_subs_unpadded = boundary_subs + start_sub - frag.eperim
for b in range(global_subs_unpadded.shape[0]):
fout.write("(%d,%d,%d) " % tuple(global_subs_unpadded[b,:].tolist()))
fout.write("\n")
fout.close()
print('\tdone in %.4f s' % (time.time() - t))
| elhuhdron/emdrp | emdrp/emdrp/scripts/driver-cpu-FRAG.py | Python | mit | 3,923 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
def file_content(filename):
'''Load file content'''
with open(filename) as ifile:
return ifile.read()
def pip(filename):
"""Return path to pip requirements file"""
return file_content(os.path.join('requirements', filename))
long_description = '\n'.join((
file_content('README.md'),
file_content('CHANGELOG.md'),
''
))
install_requires = pip('install.pip')
setup(
name='udata',
version=__import__('udata').__version__,
description=__import__('udata').__description__,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/opendatateam/udata',
author='Opendata Team',
author_email='opendatateam@data.gouv.fr',
packages=find_packages(),
include_package_data=True,
python_requires='>=3.7',
install_requires=install_requires,
entry_points={
'console_scripts': [
'udata = udata.commands:cli',
],
'udata.harvesters': [
'dcat = udata.harvest.backends.dcat:DcatBackend',
],
'udata.avatars': [
'internal = udata.features.identicon.backends:internal',
'adorable = udata.features.identicon.backends:adorable',
'robohash = udata.features.identicon.backends:robohash',
],
'pytest11': [
'udata = udata.tests.plugin',
],
},
license='GNU AGPLv3+',
keywords='udata opendata portal data',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
('License :: OSI Approved :: GNU Affero General Public License v3'
' or later (AGPLv3+)'),
],
)
| opendatateam/udata | setup.py | Python | agpl-3.0 | 2,170 |
#!/usr/bin/python
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.2a1/96-10-04 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| paran0ids0ul/infernal-twin | build/pillow/build/scripts-2.7/pilprint.py | Python | gpl-3.0 | 2,393 |
"""
raven.contrib.django.urls
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from django.conf.urls import patterns, url
except ImportError:
# for Django version less then 1.4
from django.conf.urls.defaults import patterns, url # NOQA
urlpatterns = patterns('',
url(r'^api/(?:(?P<project_id>[\w_-]+)/)?store/$', 'raven.contrib.django.views.report', name='raven-report'),
url(r'^report/', 'raven.contrib.django.views.report'),
)
| collective/mr.poe | raven/contrib/django/urls.py | Python | bsd-3-clause | 560 |
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
{%if cookiecutter.use_mailhog == "n" -%}
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
{%- endif %}
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
{% if cookiecutter.use_celery == "y" %}
########## CELERY
# In development, all tasks will be executed locally by blocking until the task returns
CELERY_ALWAYS_EAGER = True
########## END CELERY
{% endif %}
# Your local stuff: Below this line define 3rd party library settings
| crdoconnor/cookiecutter-django | {{cookiecutter.repo_name}}/config/settings/local.py | Python | bsd-3-clause | 2,171 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesClearTest(test.TestCase):
# Verifies behavior of tf.Session.reset().
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_deprecated_v1
def testSameVariablesClear(self):
server = server_lib.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
sess_1.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
session.Session.reset(server.target)
with self.assertRaises(errors_impl.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be uninitialized.
sess_2 = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess_2.run(v2)
# Reinitializes the variables.
sess_2.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
if __name__ == "__main__":
test.main()
| adit-chandra/tensorflow | tensorflow/python/training/server_lib_same_variables_clear_test.py | Python | apache-2.0 | 2,744 |
#!/usr/bin/env python
#
# Copyright 2008,2009,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
DESC_KEY = 'desc'
SAMP_RATE_KEY = 'samp_rate'
LINK_RATE_KEY = 'link_rate'
GAIN_KEY = 'gain'
TX_FREQ_KEY = 'tx_freq'
DSP_FREQ_KEY = 'dsp_freq'
RF_FREQ_KEY = 'rf_freq'
AMPLITUDE_KEY = 'amplitude'
AMPL_RANGE_KEY = 'ampl_range'
WAVEFORM_FREQ_KEY = 'waveform_freq'
WAVEFORM_OFFSET_KEY = 'waveform_offset'
WAVEFORM2_FREQ_KEY = 'waveform2_freq'
FREQ_RANGE_KEY = 'freq_range'
GAIN_RANGE_KEY = 'gain_range'
TYPE_KEY = 'type'
def setter(ps, key, val): ps[key] = val
from gnuradio import gr, gru, uhd, eng_notation
from gnuradio import analog
from gnuradio import blocks
from gnuradio.gr.pubsub import pubsub
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
import math
n2s = eng_notation.num_to_str
waveforms = { analog.GR_SIN_WAVE : "Complex Sinusoid",
analog.GR_CONST_WAVE : "Constant",
analog.GR_GAUSSIAN : "Gaussian Noise",
analog.GR_UNIFORM : "Uniform Noise",
"2tone" : "Two Tone",
"sweep" : "Sweep" }
#
# GUI-unaware GNU Radio flowgraph. This may be used either with command
# line applications or GUI applications.
#
class top_block(gr.top_block, pubsub):
def __init__(self, options, args):
gr.top_block.__init__(self)
pubsub.__init__(self)
self._verbose = options.verbose
#initialize values from options
self._setup_usrpx(options)
self[SAMP_RATE_KEY] = options.samp_rate
self[TX_FREQ_KEY] = options.tx_freq
self[AMPLITUDE_KEY] = options.amplitude
self[WAVEFORM_FREQ_KEY] = options.waveform_freq
self[WAVEFORM_OFFSET_KEY] = options.offset
self[WAVEFORM2_FREQ_KEY] = options.waveform2_freq
self[DSP_FREQ_KEY] = 0
self[RF_FREQ_KEY] = 0
#subscribe set methods
self.subscribe(SAMP_RATE_KEY, self.set_samp_rate)
self.subscribe(GAIN_KEY, self.set_gain)
self.subscribe(TX_FREQ_KEY, self.set_freq)
self.subscribe(AMPLITUDE_KEY, self.set_amplitude)
self.subscribe(WAVEFORM_FREQ_KEY, self.set_waveform_freq)
self.subscribe(WAVEFORM2_FREQ_KEY, self.set_waveform2_freq)
self.subscribe(TYPE_KEY, self.set_waveform)
#force update on pubsub keys
for key in (SAMP_RATE_KEY, GAIN_KEY, TX_FREQ_KEY,
AMPLITUDE_KEY, WAVEFORM_FREQ_KEY,
WAVEFORM_OFFSET_KEY, WAVEFORM2_FREQ_KEY):
self[key] = self[key]
self[TYPE_KEY] = options.type #set type last
def _setup_usrpx(self, options):
self._u = uhd.usrp_sink(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
self._u.set_samp_rate(options.samp_rate)
# Set the subdevice spec
if(options.spec):
self._u.set_subdev_spec(options.spec, 0)
# Set the gain on the usrp from options
if(options.gain):
self._u.set_gain(options.gain)
# Set the antenna
if(options.antenna):
self._u.set_antenna(options.antenna, 0)
# Setup USRP Configuration value
try:
usrp_info = self._u.get_usrp_info()
mboard_id = usrp_info["mboard_id"]
mboard_serial = usrp_info["mboard_serial"]
if mboard_serial == "":
mboard_serial = "no serial"
dboard_subdev_name = usrp_info["tx_subdev_name"]
dboard_serial = usrp_info["tx_serial"]
if dboard_serial == "":
dboard_serial = "no serial"
subdev = self._u.get_subdev_spec()
antenna = self._u.get_antenna()
desc_key_str = "Motherboard: %s [%s]\n" % (mboard_id, mboard_serial)
if "B200" in mboard_id or "B210" in mboard_id:
desc_key_str += "Daughterboard: %s\n" % dboard_subdev_name
else:
desc_key_str += "Daughterboard: %s [%s]\n" % (dboard_subdev_name, dboard_serial)
desc_key_str += "Subdev: %s\n" % subdev
desc_key_str += "Antenna: %s" % antenna
except:
desc_key_str = "USRP configuration output not implemented in this version"
self.publish(DESC_KEY, lambda: desc_key_str)
self.publish(FREQ_RANGE_KEY, self._u.get_freq_range)
self.publish(GAIN_RANGE_KEY, self._u.get_gain_range)
self.publish(GAIN_KEY, self._u.get_gain)
print "UHD Signal Generator"
print "Version: %s" % uhd.get_version_string()
print "\nUsing USRP configuration:"
print desc_key_str + "\n"
# Direct asynchronous notifications to callback function
if options.show_async_msg:
self.async_msgq = gr.msg_queue(0)
self.async_src = uhd.amsg_source("", self.async_msgq)
self.async_rcv = gru.msgq_runner(self.async_msgq, self.async_callback)
def async_callback(self, msg):
md = self.async_src.msg_to_async_metadata_t(msg)
print "Channel: %i Time: %f Event: %i" % (md.channel, md.time_spec.get_real_secs(), md.event_code)
def _set_tx_amplitude(self, ampl):
"""
Sets the transmit amplitude sent to the USRP
Args:
ampl: the amplitude or None for automatic
"""
ampl_range = self[AMPL_RANGE_KEY]
if ampl is None:
ampl = (ampl_range[1] - ampl_range[0])*0.15 + ampl_range[0]
self[AMPLITUDE_KEY] = max(ampl_range[0], min(ampl, ampl_range[1]))
def set_samp_rate(self, sr):
self._u.set_samp_rate(sr)
sr = self._u.get_samp_rate()
if self[TYPE_KEY] in (analog.GR_SIN_WAVE, analog.GR_CONST_WAVE):
self._src.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "2tone":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "sweep":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
else:
return True # Waveform not yet set
if self._verbose:
print "Set sample rate to:", sr
return True
def set_gain(self, gain):
if gain is None:
g = self[GAIN_RANGE_KEY]
gain = float(g.start()+g.stop())/2
if self._verbose:
print "Using auto-calculated mid-point TX gain"
self[GAIN_KEY] = gain
return
self._u.set_gain(gain)
if self._verbose:
print "Set TX gain to:", gain
def set_freq(self, target_freq):
if target_freq is None:
f = self[FREQ_RANGE_KEY]
target_freq = float(f.start()+f.stop())/2.0
if self._verbose:
print "Using auto-calculated mid-point frequency"
self[TX_FREQ_KEY] = target_freq
return
tr = self._u.set_center_freq(target_freq)
fs = "%sHz" % (n2s(target_freq),)
if tr is not None:
self._freq = target_freq
self[DSP_FREQ_KEY] = tr.actual_dsp_freq
self[RF_FREQ_KEY] = tr.actual_rf_freq
if self._verbose:
print "Set center frequency to", self._u.get_center_freq()
print "Tx RF frequency: %sHz" % (n2s(tr.actual_rf_freq),)
print "Tx DSP frequency: %sHz" % (n2s(tr.actual_dsp_freq),)
elif self._verbose:
print "Failed to set freq."
return tr
def set_waveform_freq(self, freq):
if self[TYPE_KEY] == analog.GR_SIN_WAVE:
self._src.set_frequency(freq)
elif self[TYPE_KEY] == "2tone":
self._src1.set_frequency(freq)
elif self[TYPE_KEY] == 'sweep':
#there is no set sensitivity, redo fg
self[TYPE_KEY] = self[TYPE_KEY]
return True
def set_waveform2_freq(self, freq):
if freq is None:
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
return
if self[TYPE_KEY] == "2tone":
self._src2.set_frequency(freq)
elif self[TYPE_KEY] == "sweep":
self._src1.set_frequency(freq)
return True
def set_waveform(self, type):
self.lock()
self.disconnect_all()
if type == analog.GR_SIN_WAVE or type == analog.GR_CONST_WAVE:
self._src = analog.sig_source_c(self[SAMP_RATE_KEY], # Sample rate
type, # Waveform type
self[WAVEFORM_FREQ_KEY], # Waveform frequency
self[AMPLITUDE_KEY], # Waveform amplitude
self[WAVEFORM_OFFSET_KEY]) # Waveform offset
elif type == analog.GR_GAUSSIAN or type == analog.GR_UNIFORM:
self._src = analog.noise_source_c(type, self[AMPLITUDE_KEY])
elif type == "2tone":
self._src1 = analog.sig_source_c(self[SAMP_RATE_KEY],
analog.GR_SIN_WAVE,
self[WAVEFORM_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
if(self[WAVEFORM2_FREQ_KEY] is None):
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
self._src2 = analog.sig_source_c(self[SAMP_RATE_KEY],
analog.GR_SIN_WAVE,
self[WAVEFORM2_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
self._src = blocks.add_cc()
self.connect(self._src1,(self._src,0))
self.connect(self._src2,(self._src,1))
elif type == "sweep":
# rf freq is center frequency
# waveform_freq is total swept width
# waveform2_freq is sweep rate
# will sweep from (rf_freq-waveform_freq/2) to (rf_freq+waveform_freq/2)
if self[WAVEFORM2_FREQ_KEY] is None:
self[WAVEFORM2_FREQ_KEY] = 0.1
self._src1 = analog.sig_source_f(self[SAMP_RATE_KEY],
analog.GR_TRI_WAVE,
self[WAVEFORM2_FREQ_KEY],
1.0,
-0.5)
self._src2 = analog.frequency_modulator_fc(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
self._src = blocks.multiply_const_cc(self[AMPLITUDE_KEY])
self.connect(self._src1,self._src2,self._src)
else:
raise RuntimeError("Unknown waveform type")
self.connect(self._src, self._u)
self.unlock()
if self._verbose:
print "Set baseband modulation to:", waveforms[type]
if type == analog.GR_SIN_WAVE:
print "Modulation frequency: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Initial phase:", self[WAVEFORM_OFFSET_KEY]
elif type == "2tone":
print "Tone 1: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Tone 2: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
elif type == "sweep":
print "Sweeping across %sHz to %sHz" % (n2s(-self[WAVEFORM_FREQ_KEY]/2.0),n2s(self[WAVEFORM_FREQ_KEY]/2.0))
print "Sweep rate: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
print "TX amplitude:", self[AMPLITUDE_KEY]
def set_amplitude(self, amplitude):
if amplitude < 0.0 or amplitude > 1.0:
if self._verbose:
print "Amplitude out of range:", amplitude
return False
if self[TYPE_KEY] in (analog.GR_SIN_WAVE, analog.GR_CONST_WAVE, analog.GR_GAUSSIAN, analog.GR_UNIFORM):
self._src.set_amplitude(amplitude)
elif self[TYPE_KEY] == "2tone":
self._src1.set_amplitude(amplitude/2.0)
self._src2.set_amplitude(amplitude/2.0)
elif self[TYPE_KEY] == "sweep":
self._src.set_k(amplitude)
else:
return True # Waveform not yet set
if self._verbose:
print "Set amplitude to:", amplitude
return True
def get_options():
usage="%prog: [options]"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args , [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-s", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate (bandwidth) [default=%default]")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-f", "--tx-freq", type="eng_float", default=None,
help="Set carrier frequency to FREQ [default=mid-point]",
metavar="FREQ")
parser.add_option("-x", "--waveform-freq", type="eng_float", default=0,
help="Set baseband waveform frequency to FREQ [default=%default]")
parser.add_option("-y", "--waveform2-freq", type="eng_float", default=None,
help="Set 2nd waveform frequency to FREQ [default=%default]")
parser.add_option("--sine", dest="type", action="store_const", const=analog.GR_SIN_WAVE,
help="Generate a carrier modulated by a complex sine wave",
default=analog.GR_SIN_WAVE)
parser.add_option("--const", dest="type", action="store_const", const=analog.GR_CONST_WAVE,
help="Generate a constant carrier")
parser.add_option("--offset", type="eng_float", default=0,
help="Set waveform phase offset to OFFSET [default=%default]")
parser.add_option("--gaussian", dest="type", action="store_const", const=analog.GR_GAUSSIAN,
help="Generate Gaussian random output")
parser.add_option("--uniform", dest="type", action="store_const", const=analog.GR_UNIFORM,
help="Generate Uniform random output")
parser.add_option("--2tone", dest="type", action="store_const", const="2tone",
help="Generate Two Tone signal for IMD testing")
parser.add_option("--sweep", dest="type", action="store_const", const="sweep",
help="Generate a swept sine wave")
parser.add_option("", "--amplitude", type="eng_float", default=0.15,
help="Set output amplitude to AMPL (0.0-1.0) [default=%default]",
metavar="AMPL")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Use verbose console output [default=%default]")
parser.add_option("", "--show-async-msg", action="store_true", default=False,
help="Show asynchronous message notifications from UHD [default=%default]")
(options, args) = parser.parse_args()
return (options, args)
# If this script is executed, the following runs. If it is imported,
# the below does not run.
def test_main():
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Note: failed to enable realtime scheduling, continuing"
# Grab command line options and create top block
try:
(options, args) = get_options()
tb = top_block(options, args)
except RuntimeError, e:
print e
sys.exit(1)
tb.start()
raw_input('Press Enter to quit: ')
tb.stop()
tb.wait()
# Make sure to create the top block (tb) within a function:
# That code in main will allow tb to go out of scope on return,
# which will call the decontructor on usrp and stop transmit.
# Whats odd is that grc works fine with tb in the __main__,
# perhaps its because the try/except clauses around tb.
if __name__ == "__main__":
test_main()
| alkyl1978/gnuradio | gr-uhd/apps/uhd_siggen_base.py | Python | gpl-3.0 | 17,148 |
# camera.py
# Source: https://github.com/DrGFreeman/rps-cv
#
# MIT License
#
# Copyright (c) 2017-2019 Julien de la Bruere-Terreault <drgfreeman@tuta.io>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module defines the Camera class, a wrapper for the Raspberry Pi camera
# based on the picamera library to be used with OpenCV computer vision library.
import time
import cv2
import numpy as np
#from picamera import PiCamera, PiCameraCircularIO
from rpscv.utils import Filter1D, Timer
class Camera():
def __init__(self, size=10, frameRate=40, hflip=False, vflip=False):
"""A wrapper class for the Raspberry Pi camera using the picamera
python library. The size parameter sets the camera resolution to
size * (64, 48)."""
from picamera import PiCamera, PiCameraCircularIO
self.active = False
try:
if type(size) is not int:
raise TypeError("Size must be an integer")
elif 1 <= size and size <= 51:
self.size = size
self.hRes = size * 64
self.vRes = size * 48
else:
raise ValueError("Size must be in range 1 to 51")
except TypeError or ValueError:
raise
self.picam = PiCamera()
self.picam.resolution = (self.hRes, self.vRes)
self.picam.framerate = frameRate
self.picam.hflip = hflip
self.picam.vflip = vflip
time.sleep(1)
self.stream = PiCameraCircularIO(self.picam, seconds=1)
self.frameRateTimer = Timer()
self.frameRateFilter = Filter1D(maxSize=21)
self.start()
def close(self):
"""Stops the running thread and closes the PiCamera instance."""
self.stop()
self.picam.close()
def doWhiteBalance(self, awbFilename='awb_gains.txt', mode='auto'):
"""A method that performs white balance calibration, sets the PiCamera
awb_gains to fixed values and write these values in a file. For best
results, put a white objet in the camera field of view (a sheet of paper
) during the calibration process."""
## Set AWB mode for calibration
self.picam.awb_mode = mode
print('Calibrating white balance gains...')
time.sleep(1)
## Read AWB gains
gRed = 0
gBlue = 0
nbReadings = 100
for i in range(nbReadings):
gains = self.picam.awb_gains
gRed += gains[0]
gBlue += gains[1]
time.sleep(.01)
gains = gRed / nbReadings, gBlue / nbReadings
## Set AWB mode to off (manual)
self.picam.awb_mode = 'off'
## Set AWB gains to remain constant
self.picam.awb_gains = gains
## Write AWB gains to file
gRed = float(gains[0])
gBlue = float(gains[1])
f = open(awbFilename, 'w')
f.flush()
f.write(str(gRed) + ', ' + str(gBlue))
f.close()
print('AWB gains set to:', gRed, gBlue)
print('AWB gains written to ' + awbFilename)
def addFrameRateText(self, img, pos=(0, 25), bgr=(0,255,0), samples=21):
"""Returns an image with the frame rate added as text on the image
passed as argument. The framerate is calculated based on the time
between calls to this method and averaged over a number of samples.
img: image to which the framerate is to be added,
bgr: tuple defining the blue, green and red values of the text color,
samples: number of samples used for averaging.
"""
# Calculate framerate and reset timer
self.frameRateFilter.addDataPoint(1 / self.frameRateTimer.getElapsed())
self.frameRateTimer.reset()
# Get averaged framerate as a string
frString = '{}fps'.format(str(int(round(self.frameRateFilter.getMean(),
0))))
# Add text to image
cv2.putText(img, frString, pos, cv2.FONT_HERSHEY_DUPLEX, 1, bgr)
def getOpenCVImage(self):
"""Grabs a frame from the camera and returns an OpenCV image array."""
img = np.empty((self.vRes * self.hRes * 3), dtype=np.uint8)
self.picam.capture(img, 'bgr', use_video_port=True)
return img.reshape((self.vRes, self.hRes, 3))
def readWhiteBalance(self, awbFilename='awb_gains.txt'):
"""Reads white balance gains from a file created using the
.doWitheBalance() method and fixes the PiCamera awb_gains parameter
to these values."""
## Read AWB gains from file
f = open(awbFilename, 'r')
line = f.readline()
f.close()
gRed, gBlue = [float(g) for g in line.split(', ')]
## Set AWB mode to off (manual)
self.picam.awb_mode = 'off'
## Set AWB gains to remain constant
self.picam.awb_gains = gRed, gBlue
print('AWB gains set to:', gRed, gBlue)
def start(self):
"""Starts continuous recording of the camera into a PicameraCircularIO
buffer."""
if not self.active:
self.active = True
self.picam.start_recording(self.stream, format='h264',
resize=(self.hRes, self.vRes))
def startPreview(self):
"""Starts the preview of the PiCamera. Works only on the display
connected directly on the Raspberry Pi."""
self.picam.start_preview()
def stop(self):
"""Stops the camera continuous recording and stops the preview if
active."""
self.active = False
self.picam.stop_recording()
self.stopPreview()
def stopPreview(self):
"""Stops the PiCamera preview if active."""
self.picam.stop_preview()
| DrGFreeman/rps-cv | rpscv/camera.py | Python | mit | 6,769 |
# -*- coding: utf-8 -*-
from imgix import UrlBuilder
DOMAIN = 'testing.imgix.net'
TOKEN = 'MYT0KEN'
JPG_PATH = 'image.jpg'
def test_readme_500_to_2000():
ub = UrlBuilder(DOMAIN, include_library_param=False)
actual = ub.create_srcset(JPG_PATH, start=500, stop=2000)
expected = "https://testing.imgix.net/image.jpg?w=500 500w,\n" + \
"https://testing.imgix.net/image.jpg?w=580 580w,\n" + \
"https://testing.imgix.net/image.jpg?w=673 673w,\n" + \
"https://testing.imgix.net/image.jpg?w=780 780w,\n" + \
"https://testing.imgix.net/image.jpg?w=905 905w,\n" + \
"https://testing.imgix.net/image.jpg?w=1050 1050w,\n" + \
"https://testing.imgix.net/image.jpg?w=1218 1218w,\n" + \
"https://testing.imgix.net/image.jpg?w=1413 1413w,\n" + \
"https://testing.imgix.net/image.jpg?w=1639 1639w,\n" + \
"https://testing.imgix.net/image.jpg?w=1901 1901w,\n" + \
"https://testing.imgix.net/image.jpg?w=2000 2000w"
assert (expected == actual)
def test_readme_100_to_384_at_20():
ub = UrlBuilder(DOMAIN, include_library_param=False)
actual = ub.create_srcset(JPG_PATH, start=100, stop=384, tol=0.20)
expected = "https://testing.imgix.net/image.jpg?w=100 100w,\n" + \
"https://testing.imgix.net/image.jpg?w=140 140w,\n" + \
"https://testing.imgix.net/image.jpg?w=196 196w,\n" + \
"https://testing.imgix.net/image.jpg?w=274 274w,\n" + \
"https://testing.imgix.net/image.jpg?w=384 384w"
assert (expected == actual)
def test_readme_custom_widths():
builder = UrlBuilder(DOMAIN, include_library_param=False)
actual = builder.create_srcset(JPG_PATH, widths=[144, 240, 320, 446, 640])
expected = "https://testing.imgix.net/image.jpg?w=144 144w,\n" + \
"https://testing.imgix.net/image.jpg?w=240 240w,\n" + \
"https://testing.imgix.net/image.jpg?w=320 320w,\n" + \
"https://testing.imgix.net/image.jpg?w=446 446w,\n" + \
"https://testing.imgix.net/image.jpg?w=640 640w"
assert (expected == actual)
def test_readme_variable_quality():
builder = UrlBuilder(DOMAIN, include_library_param=False)
actual = builder.create_srcset(
JPG_PATH, params={"w": 100}, disable_variable_quality=False)
expected = "https://testing.imgix.net/image.jpg?dpr=1&q=75&w=100 1x,\n" + \
"https://testing.imgix.net/image.jpg?dpr=2&q=50&w=100 2x,\n" + \
"https://testing.imgix.net/image.jpg?dpr=3&q=35&w=100 3x,\n" + \
"https://testing.imgix.net/image.jpg?dpr=4&q=23&w=100 4x,\n" + \
"https://testing.imgix.net/image.jpg?dpr=5&q=20&w=100 5x"
assert (expected == actual)
| imgix/imgix-python | tests/test_readme.py | Python | bsd-2-clause | 2,675 |
"""Created on Sat Sep 10 2016 14:04.
@author: Nathan Budd
"""
import unittest
import numpy as np
import numpy.matlib as np
import matplotlib.pyplot as plt
from ..thrust_constant import ThrustConstant
from ..model_coe import ModelCOE
from ..gauss_lagrange_planetary_eqns import GaussLagrangePlanetaryEqns
from ..perturb_zero import PerturbZero
from ...mcpi.mcpi import MCPI
from ...mcpi.mcpi_approx import MCPIapprox
from ...orbital_mech.element_sets.orb_coe import OrbCOE
class TestThrustConstant(unittest.TestCase):
"""Test class for ModelCOE."""
def setUp(self):
"""."""
mu = 1.
self.vector = np.array([[0., 1., 0.]]).T
stm = GaussLagrangePlanetaryEqns(mu).coe
self.thrust = ThrustConstant(self.vector, stm)
def test_instantiation(self):
"""."""
self.assertIsInstance(self.thrust, ThrustConstant)
def test_getattr(self):
"""."""
self.assertEqual(self.thrust.vector.shape, (3, 1))
def test_setattr(self):
"""."""
self.thrust.vector = np.array([[1.]])
self.assertEqual(self.thrust.vector.shape, (1, 1))
def test_dynamics(self):
x = np.array([[2., .5, 1., .1, .1, 0.],
[4., .5, 1., .1, .1, 0.],
[8., .5, 1., .1, .1, 0.]])
t = np.array([[0.], [1.], [2.]])
Xdot = self.thrust(t, x)
print(self.thrust.Xdot)
self.assertEqual(Xdot.shape, (3, 6))
| lasr/orbital_mechanics | dynamics/tests/test_thrust_constant.py | Python | mit | 1,453 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[\w-]+)/$',
views.BlogDateDetailView.as_view(),
name="entry"
),
url(
r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$',
views.BlogDayArchiveView.as_view(),
name="archive-day"
),
url(
r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BlogMonthArchiveView.as_view(),
name="archive-month"
),
url(
r'^(?P<year>\d{4})/$',
views.BlogYearArchiveView.as_view(),
name="archive-year"
),
url(
r'^$',
views.BlogArchiveIndexView.as_view(),
name="index"
),
]
| xavierdutreilh/djangoproject.com | blog/urls.py | Python | bsd-3-clause | 754 |
import PySide
import PySide.QtCore
from PySide.QtGui import QApplication, QDialog
from gui import Ui_About
class About(QDialog, Ui_About):
""" Displays the About window.
"""
def __init__(self, parent=None):
super(About, self).__init__(parent)
self.setupUi(self)
app = PySide.QtCore.QCoreApplication.instance()
self.lbl_appname.setText("{}, version {}".format(app.name, app.__version__) )
self.lbl_pyside_ver.setText( "PySide Version {}".format(PySide.__version__) )
self.lbl_qt_ver.setText( "QtCore Version {}".format(PySide.QtCore.__version__) )
self.lbl_copyright.setText( "Copyright 2015 Jonathan D. Barton" )
| tmtowtdi/MontyLacuna | gui/ships/lib/widgets/about.py | Python | mit | 684 |
# swift_build_support/__init__.py - Helpers for building Swift -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# This file needs to be here in order for Python to treat the
# utils/swift_build_support/ directory as a module.
#
# ----------------------------------------------------------------------------
| khizkhiz/swift | utils/swift_build_support/swift_build_support/__init__.py | Python | apache-2.0 | 700 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-06 13:24
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('badges', '0003_badgedesign_bg_color'),
]
operations = [
migrations.AlterField(
model_name='badgedesign',
name='bg_color',
field=models.CharField(default='#FFFFFF', help_text='E.g. #00ff00', max_length=7, validators=[django.core.validators.RegexValidator('^#[a-fA-F0-9]{6}$')], verbose_name='Background color'),
),
]
| helfertool/helfertool | src/badges/migrations/0004_auto_20160306_1424.py | Python | agpl-3.0 | 638 |
#!/usr/bin/env python
import sys, time, random
# print >> sys.stderr, sys.argv
n = int(sys.argv[1])
sys.stdout.write(str(n*n))
sys.stdout.flush()
sys.exit(0) | jasonzliang/dcs | test_tasks/square.py | Python | unlicense | 158 |
'''
See more here: http://www.pymolwiki.org/index.php/com
DESCRIPTION
get the center of mass of selection or move selection to the origin.
ARGUMENTS
selection = string: a valid PyMOL selection {default: all}
center = 0 or 1: if center=1 center the selection {default: 0}
returns: center of mass: [ xCOM, yCOM, zCOM ]
SEE ALSO
get_extent, get_position, http://pymolwiki.org/index.php/Center_Of_Mass
# @AUTHOR: Jason Vertrees
# Copyright (c) 2008, Jason Vertrees
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# * disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# * disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived
# * from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# DATE : 2008-09-26
# REV : 1
'''
from pymol import cmd
from pymol import stored
from chempy import cpv
def COM(selection='all', center=0, quiet=1):
model = cmd.get_model(selection)
nAtom = len(model.atom)
COM = cpv.get_null()
for a in model.atom:
COM = cpv.add(COM, a.coord)
COM = cpv.scale(COM, 1. / nAtom)
if not int(quiet):
print ' COM: [%8.3f,%8.3f,%8.3f]' % tuple(COM)
if int(center):
cmd.alter_state(1, selection, "(x,y,z)=sub((x,y,z), COM)",
space={'COM': COM, 'sub': cpv.sub})
return COM
cmd.extend("COM", COM)
| julianheinrich/viewpoints | com.py | Python | mit | 2,627 |
from math import sin
sin
| indianajohn/ycmd | ycmd/tests/python/testdata/goto_file4.py | Python | gpl-3.0 | 25 |
#!/usr/bin/env python
import pytest
from utils import *
from fireplace.dsl import *
from fireplace.exceptions import *
from fireplace.card import Card
def test_selector():
game = prepare_game()
game.player1.discard_hand()
alex = game.player1.give("EX1_561")
selector = PIRATE | DRAGON + MINION
assert len(selector.eval(game.player1.hand, game.player1)) >= 1
selector = IN_HAND + DRAGON + FRIENDLY
targets = selector.eval(game, game.player1)
assert len(targets) == 1
assert targets[0] == alex
def test_empty_selector():
game = prepare_game()
game.player1.discard_hand()
game.player2.discard_hand()
selector = IN_HAND
targets = selector.eval(game.player1.hand, game.player1)
assert not targets
def test_random_card_picker():
picker = RandomCardPicker()
ids = picker.find_cards()
for id in ids:
card = Card(id)
assert card.type is not CardType.HERO
assert card.type is not CardType.ENCHANTMENT
assert card.type is not CardType.HERO_POWER
def test_controller():
game = prepare_game()
game.player1.discard_hand()
wisp = game.player1.give(WISP)
wisp.play()
assert Controller().evaluate(wisp) is game.player1
def test_self_selector():
game = prepare_game()
selector = SELF
for source in game:
targets = selector.eval(game, source)
assert len(targets) == 1
assert targets[0] is source
def test_owner_selector():
game = prepare_game()
wisp = game.player1.give(WISP)
wisp.play()
blessing_wisdom = game.player1.give("EX1_363")
blessing_wisdom.play(target=wisp)
targets = OWNER.eval(game, wisp.buffs[0])
assert len(targets) == 1
assert targets[0] is wisp
def test_id_selector():
game = prepare_game()
for entity in game.entities:
id = getattr(entity, "id", None)
if id:
targets = ID(id).eval(game, game.player1)
assert len(targets) >= 1
assert targets[0].id == id
def test_target_selector():
game = prepare_game()
moonfire = game.player1.give(MOONFIRE)
target = game.player1.hero
# Set this manually for the selector
moonfire.target = target
targets = TARGET.eval(game, moonfire)
assert len(targets) == 1
assert targets[0] == target
def test_high_low_atk_selectors():
game = prepare_game()
game.player1.discard_hand()
game.player2.discard_hand()
# Give two wisps and make sure we only get back one
wisp = game.player1.give(WISP)
wisp2 = game.player1.give(WISP)
IN_HAND = EnumSelector(Zone.HAND)
high_selector = HIGHEST_ATK(IN_HAND)
low_selector = LOWEST_ATK(IN_HAND)
targets = high_selector.eval(game, game.player1)
assert len(targets) == 1
assert targets[0] == wisp
targets = low_selector.eval(game, game.player1)
assert len(targets) == 1
assert targets[0] == wisp
alex = game.player1.give("EX1_561")
mountain = game.player1.give("EX1_105")
targets = high_selector.eval(game, game.player1)
assert len(targets) == 1
assert targets[0] in [alex, mountain]
targets = low_selector.eval(game, game.player1)
assert len(targets) == 1
assert targets[0] == wisp
def test_controlled_by_selector():
game = prepare_game()
game.player1.discard_hand()
game.player2.discard_hand()
wisp = game.player1.give(WISP)
goldshire = game.player1.give(GOLDSHIRE_FOOTMAN)
wisp.play()
goldshire.play()
MINION = EnumSelector(CardType.MINION)
IN_PLAY = EnumSelector(Zone.PLAY)
selector = MINION + IN_PLAY + FRIENDLY
for source in [wisp, goldshire, game.player1]:
targets = selector.eval(game, source)
assert len(targets) == 2
assert set(targets) == set([wisp, goldshire])
selector = MINION + IN_PLAY + ENEMY
targets = selector.eval(game, game.player2)
assert len(targets) == 2
assert set(targets) == set([wisp, goldshire])
moonfire = game.player1.give(MOONFIRE)
moonfire.target = game.player1.hero
selector = MINION + IN_PLAY + CONTROLLED_BY(TARGET) + (AttrValue(GameTag.HEALTH) == 1)
targets = selector.eval(game, moonfire)
assert len(targets) == 1
assert targets[0] == wisp
def test_random_selector():
game = prepare_game()
selector = RANDOM(EnumSelector(CardType.MINION))
targets = selector.eval(game, game.player1)
assert len(targets) == 1
targets = (selector * 3).eval(game, game.player1)
assert len(targets) == 3
def test_positional_selectors():
game = prepare_game()
wisp1 = game.player1.give(WISP)
wisp1.play()
wisp2 = game.player1.give(WISP)
wisp2.play()
wisp3 = game.player1.give(WISP)
wisp3.play()
wisp4 = game.player1.give(WISP)
wisp4.play()
defender = game.player1.give("EX1_093")
defender.play(index=2)
assert game.player1.field == [wisp1, wisp2, defender, wisp3, wisp4]
left = LEFT_OF(SELF).eval(game, defender)
assert len(left) == 1
assert left[0] is wisp2
right = RIGHT_OF(SELF).eval(game, defender)
assert len(right) == 1
assert right[0] is wisp3
adjacent = ADJACENT(SELF).eval(game, defender)
assert len(adjacent) == 2
assert adjacent[0] is wisp2
assert adjacent[1] is wisp3
def test_hijack():
game = prepare_game()
vial = game.player1.give("LOEA16_8")
with hijacked(RANDOM_ENEMY_MINION, FRIENDLY_HERO):
with pytest.raises(GameOver):
vial.play()
| smallnamespace/fireplace | tests/test_dsl.py | Python | agpl-3.0 | 5,027 |
"""Initial migration
Revision ID: 771555241255
Revises: 1ed43776064f
Create Date: 2017-04-24 09:15:59.549855
"""
# revision identifiers, used by Alembic.
revision = '771555241255'
down_revision = '1ed43776064f'
branch_labels = ('hotel',)
depends_on = None
from alembic import op
import sqlalchemy as sa
import sideboard.lib.sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def upgrade():
op.create_table('room',
sa.Column('id', sideboard.lib.sa.UUID(), nullable=False),
sa.Column('notes', sa.Unicode(), server_default='', nullable=False),
sa.Column('message', sa.Unicode(), server_default='', nullable=False),
sa.Column('locked_in', sa.Boolean(), server_default='False', nullable=False),
sa.Column('nights', sa.Unicode(), server_default='', nullable=False),
sa.Column('created', sideboard.lib.sa.UTCDateTime(), server_default=sa.text(utcnow_server_default), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_room'))
)
op.create_table('hotel_requests',
sa.Column('id', sideboard.lib.sa.UUID(), nullable=False),
sa.Column('attendee_id', sideboard.lib.sa.UUID(), nullable=False),
sa.Column('nights', sa.Unicode(), server_default='', nullable=False),
sa.Column('wanted_roommates', sa.Unicode(), server_default='', nullable=False),
sa.Column('unwanted_roommates', sa.Unicode(), server_default='', nullable=False),
sa.Column('special_needs', sa.Unicode(), server_default='', nullable=False),
sa.Column('approved', sa.Boolean(), server_default='False', nullable=False),
sa.ForeignKeyConstraint(['attendee_id'], ['attendee.id'], name=op.f('fk_hotel_requests_attendee_id_attendee')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_hotel_requests')),
sa.UniqueConstraint('attendee_id', name=op.f('uq_hotel_requests_attendee_id'))
)
op.create_table('room_assignment',
sa.Column('id', sideboard.lib.sa.UUID(), nullable=False),
sa.Column('room_id', sideboard.lib.sa.UUID(), nullable=False),
sa.Column('attendee_id', sideboard.lib.sa.UUID(), nullable=False),
sa.ForeignKeyConstraint(['attendee_id'], ['attendee.id'], name=op.f('fk_room_assignment_attendee_id_attendee')),
sa.ForeignKeyConstraint(['room_id'], ['room.id'], name=op.f('fk_room_assignment_room_id_room')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_room_assignment'))
)
op.add_column('attendee', sa.Column('hotel_eligible', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('attendee', 'hotel_eligible')
op.drop_table('room_assignment')
op.drop_table('hotel_requests')
op.drop_table('room')
| magfest/hotel | alembic/versions/771555241255_initial_migration.py | Python | agpl-3.0 | 2,888 |
from sqlalchemy.testing import assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy import Integer, PickleType, String, ForeignKey
import operator
from sqlalchemy import testing
from sqlalchemy.util import OrderedSet
from sqlalchemy.orm import mapper, relationship, create_session, \
PropComparator, synonym, comparable_property, sessionmaker, \
attributes, Session, backref, configure_mappers
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.interfaces import MapperOption
from sqlalchemy.testing import eq_, ne_
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy import event, and_, case
from sqlalchemy.testing.schema import Table, Column
class MergeTest(_fixtures.FixtureTest):
"""Session.merge() functionality"""
run_inserts = None
def load_tracker(self, cls, canary=None):
if canary is None:
def canary(instance, *args):
canary.called += 1
canary.called = 0
event.listen(cls, 'load', canary)
return canary
def test_transient_to_pending(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
load = self.load_tracker(User)
u = User(id=7, name='fred')
eq_(load.called, 0)
u2 = sess.merge(u)
eq_(load.called, 1)
assert u2 in sess
eq_(u2, User(id=7, name='fred'))
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name='fred'))
def test_transient_to_pending_no_pk(self):
"""test that a transient object with no PK attribute
doesn't trigger a needless load."""
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
u = User(name='fred')
def go():
sess.merge(u)
self.assert_sql_count(testing.db, go, 0)
def test_transient_to_pending_collection(self):
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
mapper(User, users, properties={
'addresses': relationship(Address, backref='user',
collection_class=OrderedSet)})
mapper(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
eq_(load.called, 0)
sess = create_session()
sess.merge(u)
eq_(load.called, 3)
merged_users = [e for e in sess if isinstance(e, User)]
eq_(len(merged_users), 1)
assert merged_users[0] is not u
sess.flush()
sess.expunge_all()
eq_(sess.query(User).one(),
User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
)
def test_transient_to_persistent(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
load = self.load_tracker(User)
sess = create_session()
u = User(id=7, name='fred')
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(load.called, 0)
_u2 = u2 = User(id=7, name='fred jones')
eq_(load.called, 0)
u2 = sess.merge(u2)
assert u2 is not _u2
eq_(load.called, 1)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name='fred jones'))
eq_(load.called, 2)
def test_transient_to_persistent_collection(self):
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
collection_class=OrderedSet,
order_by=addresses.c.id,
cascade="all, delete-orphan")
})
mapper(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
sess = create_session()
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(load.called, 0)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
u = sess.merge(u)
# 1. merges User object. updates into session.
# 2.,3. merges Address ids 3 & 4, saves into session.
# 4.,5. loads pre-existing elements in "addresses" collection,
# marks as deleted, Address ids 1 and 2.
eq_(load.called, 5)
eq_(u,
User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).one(),
User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
)
def test_detached_to_persistent_collection(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
order_by=addresses.c.id,
collection_class=OrderedSet)})
mapper(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
a = Address(id=1, email_address='fred1')
u = User(id=7, name='fred', addresses=OrderedSet([
a,
Address(id=2, email_address='fred2'),
]))
sess = create_session()
sess.add(u)
sess.flush()
sess.expunge_all()
u.name='fred jones'
u.addresses.add(Address(id=3, email_address='fred3'))
u.addresses.remove(a)
eq_(load.called, 0)
u = sess.merge(u)
eq_(load.called, 4)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(),
User(id=7, name='fred jones', addresses=OrderedSet([
Address(id=2, email_address='fred2'),
Address(id=3, email_address='fred3')])))
def test_unsaved_cascade(self):
"""Merge of a transient entity with two child transient
entities, with a bidirectional relationship."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
cascade="all", backref="user")
})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
u = User(id=7, name='fred')
a1 = Address(email_address='foo@bar.com')
a2 = Address(email_address='hoho@bar.com')
u.addresses.append(a1)
u.addresses.append(a2)
u2 = sess.merge(u)
eq_(load.called, 3)
eq_(u,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
eq_(u2,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
sess.flush()
sess.expunge_all()
u2 = sess.query(User).get(7)
eq_(u2, User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
eq_(load.called, 6)
def test_merge_empty_attributes(self):
User, dingalings = self.classes.User, self.tables.dingalings
mapper(User, dingalings)
sess = create_session()
# merge empty stuff. goes in as NULL.
# not sure what this was originally trying to
# test.
u1 = sess.merge(User(id=1))
sess.flush()
assert u1.data is None
# save another user with "data"
u2 = User(id=2, data="foo")
sess.add(u2)
sess.flush()
# merge User on u2's pk with
# no "data".
# value isn't whacked from the destination
# dict.
u3 = sess.merge(User(id=2))
eq_(u3.__dict__['data'], "foo")
# make a change.
u3.data = 'bar'
# merge another no-"data" user.
# attribute maintains modified state.
# (usually autoflush would have happened
# here anyway).
u4 = sess.merge(User(id=2))
eq_(u3.__dict__['data'], "bar")
sess.flush()
# and after the flush.
eq_(u3.data, "bar")
# new row.
u5 = User(id=3, data="foo")
sess.add(u5)
sess.flush()
# blow it away from u5, but don't
# mark as expired. so it would just
# be blank.
del u5.data
# the merge adds expiry to the
# attribute so that it loads.
# not sure if I like this - it currently is needed
# for test_pickled:PickleTest.test_instance_deferred_cols
u6 = sess.merge(User(id=3))
assert 'data' not in u6.__dict__
assert u6.data == "foo"
# set it to None. this is actually
# a change so gets preserved.
u6.data = None
u7 = sess.merge(User(id=3))
assert u6.__dict__['data'] is None
def test_merge_irregular_collection(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
backref='user',
collection_class=
attribute_mapped_collection('email_address')),
})
u1 = User(id=7, name='fred')
u1.addresses['foo@bar.com'] = Address(email_address='foo@bar.com')
sess = create_session()
sess.merge(u1)
sess.flush()
assert list(u1.addresses.keys()) == ['foo@bar.com']
def test_attribute_cascade(self):
"""Merge of a persistent entity with two child
persistent entities."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')
})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
# set up data and save
u = User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address = 'hoho@la.com')])
sess.add(u)
sess.flush()
# assert data was saved
sess2 = create_session()
u2 = sess2.query(User).get(7)
eq_(u2,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@la.com')]))
# make local changes to data
u.name = 'fred2'
u.addresses[1].email_address = 'hoho@lalala.com'
eq_(load.called, 3)
# new session, merge modified data into session
sess3 = create_session()
u3 = sess3.merge(u)
eq_(load.called, 6)
# ensure local changes are pending
eq_(u3, User(id=7, name='fred2', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@lalala.com')]))
# save merged data
sess3.flush()
# assert modified/merged data was saved
sess.expunge_all()
u = sess.query(User).get(7)
eq_(u, User(id=7, name='fred2', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@lalala.com')]))
eq_(load.called, 9)
# merge persistent object into another session
sess4 = create_session()
u = sess4.merge(u)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess4.flush()
# no changes; therefore flush should do nothing
self.assert_sql_count(testing.db, go, 0)
eq_(load.called, 12)
# test with "dontload" merge
sess5 = create_session()
u = sess5.merge(u, load=False)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess5.flush()
# no changes; therefore flush should do nothing
# but also, load=False wipes out any difference in committed state,
# so no flush at all
self.assert_sql_count(testing.db, go, 0)
eq_(load.called, 15)
sess4 = create_session()
u = sess4.merge(u, load=False)
# post merge change
u.addresses[1].email_address='afafds'
def go():
sess4.flush()
# afafds change flushes
self.assert_sql_count(testing.db, go, 1)
eq_(load.called, 18)
sess5 = create_session()
u2 = sess5.query(User).get(u.id)
eq_(u2.name, 'fred2')
eq_(u2.addresses[1].email_address, 'afafds')
eq_(load.called, 21)
def test_no_relationship_cascade(self):
"""test that merge doesn't interfere with a relationship()
target that specifically doesn't include 'merge' cascade.
"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses, properties={
'user':relationship(User, cascade="save-update")
})
mapper(User, users)
sess = create_session()
u1 = User(name="fred")
a1 = Address(email_address="asdf", user=u1)
sess.add(a1)
sess.flush()
a2 = Address(id=a1.id, email_address="bar", user=User(name="hoho"))
a2 = sess.merge(a2)
sess.flush()
# no expire of the attribute
assert a2.__dict__['user'] is u1
# merge succeeded
eq_(
sess.query(Address).all(),
[Address(id=a1.id, email_address="bar")]
)
# didn't touch user
eq_(
sess.query(User).all(),
[User(name="fred")]
)
def test_one_to_many_cascade(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses))})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
u = User(name='fred')
a1 = Address(email_address='foo@bar')
a2 = Address(email_address='foo@quux')
u.addresses.extend([a1, a2])
sess.add(u)
sess.flush()
eq_(load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(u.id)
eq_(load.called, 1)
u.addresses[1].email_address = 'addr 2 modified'
sess2.merge(u)
eq_(u2.addresses[1].email_address, 'addr 2 modified')
eq_(load.called, 3)
sess3 = create_session()
u3 = sess3.query(User).get(u.id)
eq_(load.called, 4)
u.name = 'also fred'
sess3.merge(u)
eq_(load.called, 6)
eq_(u3.name, 'also fred')
def test_many_to_one_cascade(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses, properties={
'user':relationship(User)
})
mapper(User, users)
u1 = User(id=1, name="u1")
a1 =Address(id=1, email_address="a1", user=u1)
u2 = User(id=2, name="u2")
sess = create_session()
sess.add_all([a1, u2])
sess.flush()
a1.user = u2
sess2 = create_session()
a2 = sess2.merge(a1)
eq_(
attributes.get_history(a2, 'user'),
([u2], (), ())
)
assert a2 in sess2.dirty
sess.refresh(a1)
sess2 = create_session()
a2 = sess2.merge(a1, load=False)
eq_(
attributes.get_history(a2, 'user'),
((), [u1], ())
)
assert a2 not in sess2.dirty
def test_many_to_many_cascade(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items':relationship(mapper(Item, items),
secondary=order_items)})
load = self.load_tracker(Order)
self.load_tracker(Item, load)
sess = create_session()
i1 = Item()
i1.description='item 1'
i2 = Item()
i2.description = 'item 2'
o = Order()
o.description = 'order description'
o.items.append(i1)
o.items.append(i2)
sess.add(o)
sess.flush()
eq_(load.called, 0)
sess2 = create_session()
o2 = sess2.query(Order).get(o.id)
eq_(load.called, 1)
o.items[1].description = 'item 2 modified'
sess2.merge(o)
eq_(o2.items[1].description, 'item 2 modified')
eq_(load.called, 3)
sess3 = create_session()
o3 = sess3.query(Order).get(o.id)
eq_( load.called, 4)
o.description = 'desc modified'
sess3.merge(o)
eq_(load.called, 6)
eq_(o3.description, 'desc modified')
def test_one_to_one_cascade(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'address':relationship(mapper(Address, addresses),
uselist = False)
})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.address = a1
sess.add(u)
sess.flush()
eq_(load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(7)
eq_(load.called, 1)
u2.name = 'fred2'
u2.address.email_address = 'hoho@lalala.com'
eq_(load.called, 2)
u3 = sess.merge(u2)
eq_(load.called, 2)
assert u3 is u
def test_value_to_none(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'address':relationship(mapper(Address, addresses),
uselist = False, backref='user')
})
sess = sessionmaker()()
u = User(id=7, name="fred",
address=Address(id=1, email_address='foo@bar.com'))
sess.add(u)
sess.commit()
sess.close()
u2 = User(id=7, name=None, address=None)
u3 = sess.merge(u2)
assert u3.name is None
assert u3.address is None
sess.close()
a1 = Address(id=1, user=None)
a2 = sess.merge(a1)
assert a2.user is None
def test_transient_no_load(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
u = User()
assert_raises_message(sa.exc.InvalidRequestError,
"load=False option does not support",
sess.merge, u, load=False)
def test_no_load_with_backrefs(self):
"""load=False populates relationships in both
directions without requiring a load"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')
})
u = User(id=7, name='fred', addresses=[
Address(email_address='ad1'),
Address(email_address='ad2')])
sess = create_session()
sess.add(u)
sess.flush()
sess.close()
assert 'user' in u.addresses[1].__dict__
sess = create_session()
u2 = sess.merge(u, load=False)
assert 'user' in u2.addresses[1].__dict__
eq_(u2.addresses[1].user, User(id=7, name='fred'))
sess.expire(u2.addresses[1], ['user'])
assert 'user' not in u2.addresses[1].__dict__
sess.close()
sess = create_session()
u = sess.merge(u2, load=False)
assert 'user' not in u.addresses[1].__dict__
eq_(u.addresses[1].user, User(id=7, name='fred'))
def test_dontload_with_eager(self):
"""
This test illustrates that with load=False, we can't just copy
the committed_state of the merged instance over; since it
references collection objects which themselves are to be merged.
This committed_state would instead need to be piecemeal
'converted' to represent the correct objects. However, at the
moment I'd rather not support this use case; if you are merging
with load=False, you're typically dealing with caching and the
merged objects shouldn't be 'dirty'.
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses))
})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
sess2 = create_session()
u2 = sess2.query(User).\
options(sa.orm.joinedload('addresses')).get(7)
sess3 = create_session()
u3 = sess3.merge(u2, load=False)
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
def test_no_load_disallows_dirty(self):
"""load=False doesn't support 'dirty' objects right now
(see test_no_load_with_eager()). Therefore lets assert it.
"""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
sess.add(u)
sess.flush()
u.name = 'ed'
sess2 = create_session()
try:
sess2.merge(u, load=False)
assert False
except sa.exc.InvalidRequestError as e:
assert "merge() with load=False option does not support "\
"objects marked as 'dirty'. flush() all changes on "\
"mapped instances before merging with load=False." \
in str(e)
u2 = sess2.query(User).get(7)
sess3 = create_session()
u3 = sess3.merge(u2, load=False)
assert not sess3.dirty
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
def test_no_load_sets_backrefs(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
assert u.addresses[0].user is u
sess2 = create_session()
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
def go():
assert u2.addresses[0].user is u2
self.assert_sql_count(testing.db, go, 0)
def test_no_load_preserves_parents(self):
"""Merge with load=False does not trigger a 'delete-orphan'
operation.
merge with load=False sets attributes without using events.
this means the 'hasparent' flag is not propagated to the newly
merged instance. in fact this works out OK, because the
'_state.parents' collection on the newly merged instance is
empty; since the mapper doesn't see an active 'False' setting in
this collection when _is_orphan() is called, it does not count
as an orphan (i.e. this is the 'optimistic' logic in
mapper._is_orphan().)
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user',
cascade="all, delete-orphan")})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
assert u.addresses[0].user is u
sess2 = create_session()
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
a2 = u2.addresses[0]
a2.email_address='somenewaddress'
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2))
sess2.flush()
sess2.expunge_all()
eq_(sess2.query(User).get(u2.id).addresses[0].email_address,
'somenewaddress')
# this use case is not supported; this is with a pending Address
# on the pre-merged object, and we currently don't support
# 'dirty' objects being merged with load=False. in this case,
# the empty '_state.parents' collection would be an issue, since
# the optimistic flag is False in _is_orphan() for pending
# instances. so if we start supporting 'dirty' with load=False,
# this test will need to pass
sess = create_session()
u = sess.query(User).get(7)
u.addresses.append(Address())
sess2 = create_session()
try:
u2 = sess2.merge(u, load=False)
assert False
# if load=False is changed to support dirty objects, this code
# needs to pass
a2 = u2.addresses[0]
a2.email_address='somenewaddress'
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2))
sess2.flush()
sess2.expunge_all()
eq_(sess2.query(User).get(u2.id).addresses[0].email_address,
'somenewaddress')
except sa.exc.InvalidRequestError as e:
assert "load=False option does not support" in str(e)
def test_synonym_comparable(self):
users = self.tables.users
class User(object):
class Comparator(PropComparator):
pass
def _getValue(self):
return self._value
def _setValue(self, value):
setattr(self, '_value', value)
value = property(_getValue, _setValue)
mapper(User, users, properties={
'uid':synonym('id'),
'foobar':comparable_property(User.Comparator,User.value),
})
sess = create_session()
u = User()
u.name = 'ed'
sess.add(u)
sess.flush()
sess.expunge(u)
sess.merge(u)
def test_cascade_doesnt_blowaway_manytoone(self):
"""a merge test that was fixed by [ticket:1202]"""
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
s = create_session(autoflush=True, autocommit=False)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')})
a1 = Address(user=s.merge(User(id=1, name='ed')), email_address='x')
before_id = id(a1.user)
a2 = Address(user=s.merge(User(id=1, name='jack')),
email_address='x')
after_id = id(a1.user)
other_id = id(a2.user)
eq_(before_id, other_id)
eq_(after_id, other_id)
eq_(before_id, after_id)
eq_(a1.user, a2.user)
def test_cascades_dont_autoflush(self):
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
sess = create_session(autoflush=True, autocommit=False)
m = mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')})
user = User(id=8, name='fred',
addresses=[Address(email_address='user')])
merged_user = sess.merge(user)
assert merged_user in sess.new
sess.flush()
assert merged_user not in sess.new
def test_cascades_dont_autoflush_2(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
cascade="all, delete-orphan")
})
mapper(Address, addresses)
u = User(id=7, name='fred', addresses=[
Address(id=1, email_address='fred1'),
])
sess = create_session(autoflush=True, autocommit=False)
sess.add(u)
sess.commit()
sess.expunge_all()
u = User(id=7, name='fred', addresses=[
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
])
sess.merge(u)
assert sess.autoflush
sess.commit()
def test_dont_expire_pending(self):
"""test that pending instances aren't expired during a merge."""
users, User = self.tables.users, self.classes.User
mapper(User, users)
u = User(id=7)
sess = create_session(autoflush=True, autocommit=False)
u = sess.merge(u)
assert not bool(attributes.instance_state(u).expired_attributes)
def go():
eq_(u.name, None)
self.assert_sql_count(testing.db, go, 0)
def test_option_state(self):
"""test that the merged takes on the MapperOption characteristics
of that which is merged.
"""
users, User = self.tables.users, self.classes.User
class Option(MapperOption):
propagate_to_loaders = True
opt1, opt2 = Option(), Option()
sess = sessionmaker()()
umapper = mapper(User, users)
sess.add_all([
User(id=1, name='u1'),
User(id=2, name='u2'),
])
sess.commit()
sess2 = sessionmaker()()
s2_users = sess2.query(User).options(opt2).all()
# test 1. no options are replaced by merge options
sess = sessionmaker()()
s1_users = sess.query(User).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path, ())
eq_(ustate.load_options, set())
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper, ))
eq_(ustate.load_options, set([opt2]))
# test 2. present options are replaced by merge options
sess = sessionmaker()()
s1_users = sess.query(User).options(opt1).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper, ))
eq_(ustate.load_options, set([opt1]))
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper, ))
eq_(ustate.load_options, set([opt2]))
class M2ONoUseGetLoadingTest(fixtures.MappedTest):
"""Merge a one-to-many. The many-to-one on the other side is set up
so that use_get is False. See if skipping the "m2o" merge
vs. doing it saves on SQL calls.
"""
@classmethod
def define_tables(cls, metadata):
Table('user', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
)
Table('address', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('user.id')),
Column('email', String(50)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
User, Address = cls.classes.User, cls.classes.Address
user, address = cls.tables.user, cls.tables.address
mapper(User, user, properties={
'addresses':relationship(Address, backref=
backref('user',
# needlessly complex primaryjoin so that the
# use_get flag is False
primaryjoin=and_(
user.c.id==address.c.user_id,
user.c.id==user.c.id
)
)
)
})
mapper(Address, address)
configure_mappers()
assert Address.user.property._use_get is False
@classmethod
def insert_data(cls):
User, Address = cls.classes.User, cls.classes.Address
s = Session()
s.add_all([
User(id=1, name='u1', addresses=[Address(id=1, email='a1'),
Address(id=2, email='a2')])
])
s.commit()
# "persistent" - we get at an Address that was already present.
# With the "skip bidirectional" check removed, the "set" emits SQL
# for the "previous" version in any case,
# address.user_id is 1, you get a load.
def test_persistent_access_none(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2)]
)
u2 = s.merge(u1)
self.assert_sql_count(testing.db, go, 2)
def test_persistent_access_one(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2)]
)
u2 = s.merge(u1)
a1 = u2.addresses[0]
assert a1.user is u2
self.assert_sql_count(testing.db, go, 3)
def test_persistent_access_two(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2)]
)
u2 = s.merge(u1)
a1 = u2.addresses[0]
assert a1.user is u2
a2 = u2.addresses[1]
assert a2.user is u2
self.assert_sql_count(testing.db, go, 4)
# "pending" - we get at an Address that is new- user_id should be
# None. But in this case the set attribute on the forward side
# already sets the backref. commenting out the "skip bidirectional"
# check emits SQL again for the other two Address objects already
# persistent.
def test_pending_access_one(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2),
Address(id=3, email='a3')]
)
u2 = s.merge(u1)
a3 = u2.addresses[2]
assert a3.user is u2
self.assert_sql_count(testing.db, go, 3)
def test_pending_access_two(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2),
Address(id=3, email='a3')]
)
u2 = s.merge(u1)
a3 = u2.addresses[2]
assert a3.user is u2
a2 = u2.addresses[1]
assert a2.user is u2
self.assert_sql_count(testing.db, go, 5)
class MutableMergeTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("data", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', PickleType(comparator=operator.eq))
)
@classmethod
def setup_classes(cls):
class Data(cls.Basic):
pass
def test_list(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data)
sess = sessionmaker()()
d = Data(data=["this", "is", "a", "list"])
sess.add(d)
sess.commit()
d2 = Data(id=d.id, data=["this", "is", "another", "list"])
d3 = sess.merge(d2)
eq_(d3.data, ["this", "is", "another", "list"])
class CompositeNullPksTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("data", metadata,
Column('pk1', String(10), primary_key=True),
Column('pk2', String(10), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Data(cls.Basic):
pass
def test_merge_allow_partial(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data)
sess = sessionmaker()()
d1 = Data(pk1="someval", pk2=None)
def go():
return sess.merge(d1)
self.assert_sql_count(testing.db, go, 1)
def test_merge_disallow_partial(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data, allow_partial_pks=False)
sess = sessionmaker()()
d1 = Data(pk1="someval", pk2=None)
def go():
return sess.merge(d1)
self.assert_sql_count(testing.db, go, 0)
class LoadOnPendingTest(fixtures.MappedTest):
"""Test interaction of merge() with load_on_pending relationships"""
@classmethod
def define_tables(cls, metadata):
rocks_table = Table("rocks", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(10)),
)
bugs_table = Table("bugs", metadata,
Column("id", Integer, primary_key=True),
Column("rockid", Integer, ForeignKey('rocks.id')),
)
@classmethod
def setup_classes(cls):
class Rock(cls.Basic, fixtures.ComparableEntity):
pass
class Bug(cls.Basic, fixtures.ComparableEntity):
pass
def _setup_delete_orphan_o2o(self):
mapper(self.classes.Rock, self.tables.rocks,
properties={'bug': relationship(self.classes.Bug,
cascade='all,delete-orphan',
load_on_pending=True,
uselist=False)
})
mapper(self.classes.Bug, self.tables.bugs)
self.sess = sessionmaker()()
def _merge_delete_orphan_o2o_with(self, bug):
# create a transient rock with passed bug
r = self.classes.Rock(id=0, description='moldy')
r.bug = bug
m = self.sess.merge(r)
# we've already passed ticket #2374 problem since merge() returned,
# but for good measure:
assert m is not r
eq_(m,r)
def test_merge_delete_orphan_o2o_none(self):
"""one to one delete_orphan relationships marked load_on_pending
should be able to merge() with attribute None"""
self._setup_delete_orphan_o2o()
self._merge_delete_orphan_o2o_with(None)
def test_merge_delete_orphan_o2o(self):
"""one to one delete_orphan relationships marked load_on_pending
should be able to merge()"""
self._setup_delete_orphan_o2o()
self._merge_delete_orphan_o2o_with(self.classes.Bug(id=1))
class PolymorphicOnTest(fixtures.MappedTest):
"""Test merge() of polymorphic object when polymorphic_on
isn't a Column"""
@classmethod
def define_tables(cls, metadata):
Table('employees', metadata,
Column('employee_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(1), nullable=False),
Column('data', String(50)),
)
@classmethod
def setup_classes(cls):
class Employee(cls.Basic, fixtures.ComparableEntity):
pass
class Manager(Employee):
pass
class Engineer(Employee):
pass
def _setup_polymorphic_on_mappers(self):
employee_mapper = mapper(self.classes.Employee,
self.tables.employees,
polymorphic_on=case(value=self.tables.employees.c.type,
whens={
'E': 'employee',
'M': 'manager',
'G': 'engineer',
'R': 'engineer',
}),
polymorphic_identity='employee')
mapper(self.classes.Manager, inherits=employee_mapper,
polymorphic_identity='manager')
mapper(self.classes.Engineer, inherits=employee_mapper,
polymorphic_identity='engineer')
self.sess = sessionmaker()()
def test_merge_polymorphic_on(self):
"""merge() should succeed with a polymorphic object even when
polymorphic_on is not a Column
"""
self._setup_polymorphic_on_mappers()
m = self.classes.Manager(employee_id=55, type='M',
data='original data')
self.sess.add(m)
self.sess.commit()
self.sess.expunge_all()
m = self.classes.Manager(employee_id=55, data='updated data')
merged = self.sess.merge(m)
# we've already passed ticket #2449 problem since
# merge() returned, but for good measure:
assert m is not merged
eq_(m,merged)
| wfxiang08/sqlalchemy | test/orm/test_merge.py | Python | mit | 45,731 |
# General options
INTERVAL = 10 # default interval in seconds
STATSD_HOST = 'localhost'
STATSD_PREFIX = ''
INSTALLED_SENDERS = (
'Diskf',
'Rabbitmq',
)
# Senders options
DISKF = {
'interval': 60,
'filesystem': ('/dev/root',),
}
RABBITMQ = {
'interval': 10,
'vhost': 'locashost',
# Items to show by the 'rabbitmq list_queues'' command, separated by space
# The name of the queue is included by default
'items': "messages",
# Names of the queues whose data will be sent to Statsd
'names': ['celery',],
} | moiseshiraldo/pystats-sender | stats_sender/settings.py | Python | gpl-3.0 | 551 |
import functools
__all__ = [
'replace_args',
]
def replace_args(attribute=None):
"""
Decorator to Apply replacements in a list of command line arguments.
:param attribute: Class attribute name which stores replacement rules.
:type attribute: ``str``
:return:
:rtype: ``callable``
"""
def _replace_args(f):
@functools.wraps(f)
def _wrapper(self, *args):
def _replace(arg):
for rule in rules:
if arg.startswith(rule):
return arg.replace(rule, rules[rule])
return arg
rules = getattr(self, attribute)
if not rules:
return f(self, *args)
return map(_replace, f(self, *args))
return _wrapper
return _replace_args
| armab/st2contrib | packs/ansible/actions/lib/shell.py | Python | apache-2.0 | 816 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Copyright 2012 zod.yslin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: zod.yslin
# Email:
# File Name: Listing1.py
# Description:
# pyflakes example.
# Edit History:
# 2012-01-27 File created.
#===============================================================================
"""
"""
import string
module_variable = 0
def functionName(self, int):
local = 5 + 5
module_variable = 5*5
return module_variable
class my_class(object):
def __init__(self, arg1, string):
self.value = True
return
def method1(self, str):
self.s = str
return self.value
def method2(self):
return
print 'How did we get here?'
def method1(self):
return self.value + 1
method2 = method1
class my_subclass(my_class):
def __init__(self, arg1, string):
self.value = arg1
return
| yslin/tools-zodlin | ubuntu/vim/script/debug/python/Listing1.py | Python | apache-2.0 | 1,526 |
from __future__ import division
from itertools import chain
from math import log10, floor, ceil
def largest_palindrome(max_factor, min_factor):
return get_extreme_palindrome_with_factors(max_factor, min_factor,
"largest")
def smallest_palindrome(max_factor, min_factor):
return get_extreme_palindrome_with_factors(max_factor, min_factor,
"smallest")
def get_extreme_palindrome_with_factors(max_factor, min_factor, extreme):
palindromes_found = palindromes(max_factor, min_factor,
reverse=(extreme == "largest"))
factor_pairs = None
for palin in palindromes_found:
factor_pairs = ((fact, palin // fact)
for fact in range(min_factor, max_factor + 1)
if palin % fact == 0)
factor_pairs = list(pair for pair in factor_pairs
if min_factor <= pair[1] <= max_factor)
if len(factor_pairs) > 0:
break
if factor_pairs is None or len(factor_pairs) == 0:
return (None, [])
return (palin, factor_pairs)
def reverse_num(n):
rev = 0
while n > 0:
rev *= 10
rev += (n % 10)
n //= 10
return rev
def num_digits(n):
return int(floor(log10(n) + 1))
def palindromes(max_factor, min_factor, reverse=False):
"""Generates all palindromes between `min_factor`**2 and max_factor`**2
If `reverse` is True, will produce the palindromes in decreasing order,
from `max_factor`**2 down to `min_factor`**2. This is needed for
`largest_palindrome`, since it won't have to iterate through a
most of the palindromes just to find the one it needs.
"""
if max_factor < min_factor:
raise ValueError("invalid input: min is {min_factor} "
"and max is {max_factor}"
.format(min_factor=min_factor,
max_factor=max_factor))
minimum = min_factor ** 2
maximum = max_factor ** 2
def gen_palins_of_length(nd, reverse=reverse):
"""Generates all palindromes with `nd` number of digits that are
within the desired range.
Again, if `reverse` is True, the palindromes are generated in
reverse order.
"""
even_nd = (nd % 2 == 0)
min_left_half = max(10 ** (int(ceil(nd / 2)) - 1),
minimum // (10 ** (nd // 2)))
max_left_half = min((10 ** int(ceil(nd / 2))) - 1,
maximum // (10 ** (nd // 2)))
current_left_half = min_left_half if not reverse else max_left_half
def make_palindrome(left_half, even_nd=False):
right_half = (reverse_num(left_half)
if even_nd
else reverse_num(left_half // 10))
return (left_half * (10 ** (nd // 2))) + right_half
if not reverse:
while current_left_half <= max_left_half:
palin = make_palindrome(current_left_half, even_nd)
if minimum <= palin <= maximum:
yield palin
elif palin > maximum:
# since palindromes are generated in increasing order,
# we break out of the loop once we've exceeded the
# maximum value
break
current_left_half += 1
else:
while current_left_half >= min_left_half:
palin = make_palindrome(current_left_half, even_nd)
if minimum <= palin <= maximum:
yield palin
elif palin < minimum:
# since palindromes are generated in decreasing order,
# we break out of the loop once we've gone below the
# minimum value
break
current_left_half -= 1
min_nd, max_nd = num_digits(minimum), num_digits(maximum)
lengths = (range(min_nd, max_nd + 1)
if not reverse
else range(max_nd, min_nd - 1, -1))
return chain(*map(gen_palins_of_length, lengths))
| N-Parsons/exercism-python | exercises/palindrome-products/example.py | Python | mit | 4,232 |
class Update:
def __init__(self, name, candidate_version, baseversion_hash):
self.name = name
self.candidateVersion = candidate_version
self.baseVersionHash = baseversion_hash
| upd89/agent | upd89/classes/update.py | Python | mit | 205 |
###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from plone.theme.interfaces import IDefaultPloneLayer
from zope.viewlet.interfaces import IViewletManager
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPlacelessPortletManager
from plone.app.portlets.interfaces import IColumn
class ISpacesRecent(IViewletManager):
"""A viewlet manager to show the Recent Spaces Viewlet
"""
class IAllSpacesMenuManager(IViewletManager):
"""A viewlet manager to show the All Spaces Menu
"""
class ISearchBoxManager(IViewletManager):
"""A viewlet manager to show the Search Box
"""
class IMyMenu(IViewletManager):
"""A viewlet manager to show the My Menu
"""
class IMyAvatar(IViewletManager):
"""A viewlet manager to show the currently logged in user's Avatar and whatever other details we want to
"""
class ISpaceIcon(IViewletManager):
"""A viewlet manager to show the icon of the Space currently being viewed.
"""
class ISpaceName(IViewletManager):
"""A viewlet manager to show the name and other basic stats of the Space currently being viewed.
"""
class ISpaceStats(IViewletManager):
"""A viewlet manager to show the basic stats of the Space currently being viewed.
"""
class IBreadcrumbsManager(IViewletManager):
"""A viewlet manager to show the breadcrumbs bar
"""
class IItemtitleManager(IViewletManager):
"""A viewlet manager to show the current Item's title
"""
class ITypetitleManager(IViewletManager):
"""A viewlet manager to show the current Item's portal type
"""
class IAddNewMenuManager(IViewletManager):
"""A viewlet manager to show the add new menu
"""
class IItemDateManager(IViewletManager):
"""A viewlet manager to show Item date in a stylized manner
"""
class IItemDescriptionManager(IViewletManager):
"""A viewlet manager to show the current item's description
"""
class IOwnerInfoManager(IViewletManager):
"""A viewlet manager to show the current Item's Owner's info
"""
class IOwnerAvatarManager(IViewletManager):
"""A viewlet manager to show the current Item's Owner's Avatar
"""
class IUbifyColophon(IViewletManager):
"""A viewlet manager to show the Ubify colophon
"""
class ICynapseColophon(IViewletManager):
"""A viewlet manager to show the Cynapse colophon
"""
class IUContentViews(IViewletManager):
"""A viewlet manager to show the View tabs of the actions bar
"""
class IUContentActions(IViewletManager):
"""A viewlet manager to show the Actions bar
"""
class ISiteHome(IViewletManager):
"""A viewlet manager to show all items on site home page
"""
class ISpaceRecentUpdates(IViewletManager):
"""A viewlet manager to show all recently updated items under a space
"""
class ISpaceMyItems(IViewletManager):
"""A viewlet manager to show all items created by current user
"""
class IFullViewMindMap(IViewletManager):
"""A viewlet manager to show all items at particular URL
"""
class ISiteTitle(IViewletManager):
"""A viewlet manager to show title of the site.
"""
class ISiteLogo(IViewletManager):
"""A viewlet manager to show logo of the site.
"""
class ISiteDescription(IViewletManager):
"""A viewlet manager to show site description
"""
class IItemMetaData(IViewletManager):
"""A viewlet manager to show item's meta data
"""
class IWorkFlowHistoryManager(IViewletManager):
"""A viewlet manager to item's workflow history
"""
class IDocumentActionsManager(IViewletManager):
"""A viewlet manager to show document actions
"""
class ICyninColophon(IViewletManager):
"""A viewlet manager to show colophon for anonymous users
"""
class ICyninDashboard(IViewletManager):
"""A viewlet manager to load viewlet containing portlet manager
"""
class ISpaceMembers(IViewletManager):
"""A viewlet manager to load viewlet containing portlet manager for space members
"""
class ISpaceMembersPage(IViewletManager):
"""A viewlet manager to load viewlet for space members
"""
class IApplicationsTabs(IViewletManager):
"""A viewlet manager to show application tabs for all our applications
"""
class IMyAreaBlock(IViewletManager):
"""A viewlet manager to show a block display of a user's own area
"""
class IItemCounts(IViewletManager):
"""A viewlet manager to display an item's count related information below the description
"""
class IGotoTop(IViewletManager):
"""A viewlet manager to goto links
"""
class IUbifySEOProvider(IViewletManager):
"""A viewlet manager to provide an SEO area at bottom of page
"""
class ITagFilterPanel(IViewletManager):
"""A viewlet manager to provide filter panel
"""
class IApplicationsMenu(IViewletManager):
"""A viewlet manager to display a menu for the applications"""
class INavigationManager(IViewletManager):
"""A viewlet manager to display vertical navigation"""
class IAddDiscussionManager(IViewletManager):
"""A viewlet manager to show a add discussion block
"""
class ILanguageSelectionManager(IViewletManager):
"""A viewlet manager to allow user to choose UI language
"""
# Portlet Manager
class IDashboardColumn(IPortletManager):
"""Common base class for cynin dashboard.
"""
class IHomeContent(IPortletManager,IDashboardColumn):
"""we need our own portlet manager above the content area.
""" | erikriver/eduIntelligent-cynin | src/ubify.viewlets/ubify/viewlets/browser/interfaces.py | Python | gpl-3.0 | 7,228 |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from functools import partial
from six.moves import range
import sys
from threading import Thread, Event
import time
from cassandra import ConsistencyLevel, OperationTimedOut
from cassandra.cluster import NoHostAvailable, Cluster
from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.protocol import QueryMessage
from cassandra.connection import Connection
from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener
from cassandra.pool import HostConnectionPool
from tests import is_monkey_patched
from tests.integration import use_singledc, PROTOCOL_VERSION, get_node
try:
from cassandra.io.libevreactor import LibevConnection
except ImportError:
LibevConnection = None
def setup_module():
use_singledc()
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.defaultInFlight = Connection.max_in_flight
Connection.max_in_flight = 2
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
self.session = self.cluster.connect()
def tearDown(self):
Connection.max_in_flight = self.defaultInFlight
self.cluster.shutdown()
def test_in_flight_timeout(self):
"""
Test to ensure that connection id fetching will block when max_id is reached/
In previous versions of the driver this test will cause a
NoHostAvailable exception to be thrown, when the max_id is restricted
@since 3.3
@jira_ticket PYTHON-514
@expected_result When many requests are run on a single node connection acquisition should block
until connection is available or the request times out.
@test_category connection timeout
"""
futures = []
query = '''SELECT * FROM system.local'''
for i in range(100):
futures.append(self.session.execute_async(query))
for future in futures:
future.result()
class TestHostListener(HostStateListener):
host_down = None
def on_down(self, host):
host_down = host
class HeartbeatTest(unittest.TestCase):
"""
Test to validate failing a heartbeat check doesn't mark a host as down
@since 3.3
@jira_ticket PYTHON-286
@expected_result host should not be marked down when heartbeat fails
@test_category connection heartbeat
"""
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=1)
self.session = self.cluster.connect(wait_for_all_pools=True)
def tearDown(self):
self.cluster.shutdown()
def test_heart_beat_timeout(self):
# Setup a host listener to ensure the nodes don't go down
test_listener = TestHostListener()
host = "127.0.0.1"
node = get_node(1)
initial_connections = self.fetch_connections(host, self.cluster)
self.assertNotEqual(len(initial_connections), 0)
self.cluster.register_listener(test_listener)
# Pause the node
try:
node.pause()
# Wait for connections associated with this host go away
self.wait_for_no_connections(host, self.cluster)
# Resume paused node
finally:
node.resume()
# Run a query to ensure connections are re-established
current_host = ""
count = 0
while current_host != host and count < 100:
rs = self.session.execute_async("SELECT * FROM system.local", trace=False)
rs.result()
current_host = str(rs._current_host)
count += 1
time.sleep(.1)
self.assertLess(count, 100, "Never connected to the first node")
new_connections = self.wait_for_connections(host, self.cluster)
self.assertIsNone(test_listener.host_down)
# Make sure underlying new connections don't match previous ones
for connection in initial_connections:
self.assertFalse(connection in new_connections)
def fetch_connections(self, host, cluster):
# Given a cluster object and host grab all connection associated with that host
connections = []
holders = cluster.get_connection_holders()
for conn in holders:
if host == str(getattr(conn, 'host', '')):
if isinstance(conn, HostConnectionPool):
if conn._connections is not None:
connections.append(conn._connections)
else:
if conn._connection is not None:
connections.append(conn._connection)
return connections
def wait_for_connections(self, host, cluster):
retry = 0
while(retry < 300):
retry += 1
connections = self.fetch_connections(host, cluster)
if len(connections) is not 0:
return connections
time.sleep(.1)
self.fail("No new connections found")
def wait_for_no_connections(self, host, cluster):
retry = 0
while(retry < 100):
retry += 1
connections = self.fetch_connections(host, cluster)
if len(connections) is 0:
return
time.sleep(.5)
self.fail("Connections never cleared")
class ConnectionTests(object):
klass = None
def setUp(self):
self.klass.initialize_reactor()
def get_connection(self, timeout=5):
"""
Helper method to solve automated testing issues within Jenkins.
Officially patched under the 2.0 branch through
17998ef72a2fe2e67d27dd602b6ced33a58ad8ef, but left as is for the
1.0 branch due to possible regressions for fixing an
automated testing edge-case.
"""
conn = None
e = None
for i in range(5):
try:
conn = self.klass.factory(host='127.0.0.1', timeout=timeout, protocol_version=PROTOCOL_VERSION)
break
except (OperationTimedOut, NoHostAvailable) as e:
continue
if conn:
return conn
else:
raise e
def test_single_connection(self):
"""
Test a single connection with sequential requests.
"""
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
event = Event()
def cb(count, *args, **kwargs):
count += 1
if count >= 10:
conn.close()
event.set()
else:
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, count))
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, 0))
event.wait()
def test_single_connection_pipelined_requests(self):
"""
Test a single connection with pipelined requests.
"""
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
responses = [False] * 100
event = Event()
def cb(response_list, request_num, *args, **kwargs):
response_list[request_num] = True
if all(response_list):
conn.close()
event.set()
for i in range(100):
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=i,
cb=partial(cb, responses, i))
event.wait()
def test_multiple_connections(self):
"""
Test multiple connections with pipelined requests.
"""
conns = [self.get_connection() for i in range(5)]
events = [Event() for i in range(5)]
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(event, conn, count, *args, **kwargs):
count += 1
if count >= 10:
conn.close()
event.set()
else:
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=count,
cb=partial(cb, event, conn, count))
for event, conn in zip(events, conns):
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, event, conn, 0))
for event in events:
event.wait()
def test_multiple_threads_shared_connection(self):
"""
Test sharing a single connections across multiple threads,
which will result in pipelined requests.
"""
num_requests_per_conn = 25
num_threads = 5
event = Event()
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(all_responses, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(map(all, all_responses)):
conn.close()
event.set()
def send_msgs(all_responses, thread_responses):
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, all_responses, thread_responses, i))
all_responses = []
threads = []
for i in range(num_threads):
thread_responses = [False] * num_requests_per_conn
all_responses.append(thread_responses)
t = Thread(target=send_msgs, args=(all_responses, thread_responses))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
event.wait()
def test_multiple_threads_multiple_connections(self):
"""
Test several threads, each with their own Connection and pipelined
requests.
"""
num_requests_per_conn = 25
num_conns = 5
events = [Event() for i in range(5)]
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(conn, event, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(thread_responses):
conn.close()
event.set()
def send_msgs(conn, event):
thread_responses = [False] * num_requests_per_conn
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, conn, event, thread_responses, i))
event.wait()
threads = []
for i in range(num_conns):
conn = self.get_connection()
t = Thread(target=send_msgs, args=(conn, events[i]))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def test_connect_timeout(self):
# Underlying socket implementations don't always throw a socket timeout even with min float
# This can be timing sensitive, added retry to ensure failure occurs if it can
max_retry_count = 10
exception_thrown = False
for i in range(max_retry_count):
start = time.time()
try:
self.get_connection(timeout=sys.float_info.min)
except Exception as e:
end = time.time()
self.assertAlmostEqual(start, end, 1)
exception_thrown = True
break
self.assertTrue(exception_thrown)
class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase):
klass = AsyncoreConnection
def setUp(self):
if is_monkey_patched():
raise unittest.SkipTest("Can't test asyncore with monkey patching")
ConnectionTests.setUp(self)
class LibevConnectionTests(ConnectionTests, unittest.TestCase):
klass = LibevConnection
def setUp(self):
if is_monkey_patched():
raise unittest.SkipTest("Can't test libev with monkey patching")
if LibevConnection is None:
raise unittest.SkipTest(
'libev does not appear to be installed properly')
ConnectionTests.setUp(self)
| Richard-Mathie/cassandra_benchmark | vendor/github.com/datastax/python-driver/tests/integration/standard/test_connection.py | Python | apache-2.0 | 13,495 |
#!/usr/bin/env python
"""
Deployment file to facilitate releases of symmetry.
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Apr 29, 2012"
import glob
import os
from fabric.api import local, lcd
from symmetry import __version__ as ver
def makedoc():
with lcd("docs"):
local("sphinx-apidoc -o . -f ../symmetry")
local("rm symmetry*.tests.rst")
for f in glob.glob("docs/*.rst"):
if f.startswith('docs/symmetry') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("symmetry") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
local("make html")
def publish():
local("python setup.py release")
def test():
local("nosetests")
def setver():
local("sed s/version=.*,/version=\\\"{}\\\",/ setup.py > newsetup".format(ver))
local("mv newsetup setup.py")
def release():
setver()
test()
makedoc()
publish()
def opendoc():
import webbrowser
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
| materialsvirtuallab/nano106 | fabfile.py | Python | bsd-3-clause | 2,036 |
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import datetime
import email.mime.text
import getpass
import os
import re
import smtplib
import subprocess
import sys
import tempfile
import urllib2
BUILD_DIR = os.path.dirname(__file__)
NACL_DIR = os.path.dirname(BUILD_DIR)
TOOLCHAIN_REV_DIR = os.path.join(NACL_DIR, 'toolchain_revisions')
PKG_VER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
PKGS = ['pnacl_newlib', 'pnacl_translator']
REV_FILES = [os.path.join(TOOLCHAIN_REV_DIR, '%s.json' % package)
for package in PKGS]
def ParseArgs(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Update pnacl_newlib.json PNaCl version.
LLVM and other projects are checked-in to the NaCl repository, but their
head isn't necessarily the one that we currently use in PNaCl. The
pnacl_newlib.json and pnacl_translator.json files point at subversion
revisions to use for tools such as LLVM. Our build process then
downloads pre-built tool tarballs from the toolchain build waterfall.
git repository before running this script:
______________________
| |
v |
...----A------B------C------D------ NaCl HEAD
^ ^ ^ ^
| | | |__ Latest pnacl_{newlib,translator}.json update.
| | |
| | |__ A newer LLVM change (LLVM repository HEAD).
| |
| |__ Oldest LLVM change since this PNaCl version.
|
|__ pnacl_{newlib,translator}.json points at an older LLVM change.
git repository after running this script:
_______________
| |
v |
...----A------B------C------D------E------ NaCl HEAD
Note that there could be any number of non-PNaCl changes between each of
these changelists, and that the user can also decide to update the
pointer to B instead of C.
There is further complication when toolchain builds are merged.
""")
parser.add_argument('--email', metavar='ADDRESS', type=str,
default=getpass.getuser()+'@chromium.org',
help="Email address to send errors to.")
parser.add_argument('--svn-id', metavar='SVN_ID', type=int, default=0,
help="Update to a specific SVN ID instead of the most "
"recent SVN ID with a PNaCl change. This value must "
"be more recent than the one in the current "
"pnacl_newlib.json. This option is useful when multiple "
"changelists' toolchain builds were merged, or when "
"too many PNaCl changes would be pulled in at the "
"same time.")
parser.add_argument('--dry-run', default=False, action='store_true',
help="Print the changelist that would be sent, but "
"don't actually send anything to review.")
# TODO(jfb) The following options come from download_toolchain.py and
# should be shared in some way.
parser.add_argument('--filter_out_predicates', default=[],
help="Toolchains to filter out.")
return parser.parse_args()
def ExecCommand(command):
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.stderr.write('\nRunning `%s` returned %i, got:\n%s\n' %
(' '.join(e.cmd), e.returncode, e.output))
raise
def GetCurrentRevision():
return [ExecCommand([sys.executable, PKG_VER,
'getrevision',
'--revision-package', package]).strip()
for package in PKGS]
def SetCurrentRevision(revision_num):
for package in PKGS:
ExecCommand([sys.executable, PKG_VER] +
# TODO(dschuff) pnacl_newlib shouldn't use cloud-bucket
# once we switch fully to toolchain_build.
(['--cloud-bucket', 'nativeclient-archive2/pnacl_buildsh'] if
package == 'pnacl_newlib' else []) +
['setrevision',
'--revision-package', package,
'--revision', str(revision_num)])
def GitCurrentBranch():
return ExecCommand(['git', 'symbolic-ref', 'HEAD', '--short']).strip()
def GitStatus():
"""List of statuses, one per path, of paths in the current git branch.
Ignores untracked paths."""
out = ExecCommand(['git', 'status', '--porcelain']).strip().split('\n')
return [f.strip() for f in out if not re.match('^\?\? (.*)$', f.strip())]
def SyncSources():
"""Assumes a git-svn checkout of NaCl. See:
www.chromium.org/nativeclient/how-tos/how-to-use-git-svn-with-native-client
"""
ExecCommand(['gclient', 'sync'])
def GitCommitInfo(info='', obj=None, num=None, extra=[]):
"""Commit information, where info is one of the shorthands in git_formats.
obj can be a path or a hash.
num is the number of results to return.
extra is a list of optional extra arguments."""
# Shorthands for git's pretty formats.
# See PRETTY FORMATS format:<string> in `git help log`.
git_formats = {
'': '',
'hash': '%H',
'date': '%ci',
'author': '%aN',
'subject': '%s',
'body': '%b',
}
cmd = ['git', 'log', '--format=format:%s' % git_formats[info]] + extra
if num: cmd += ['-n'+str(num)]
if obj: cmd += [obj]
return ExecCommand(cmd).strip()
def GitCommitsSince(date):
"""List of commit hashes since a particular date,
in reverse chronological order."""
return GitCommitInfo(info='hash',
extra=['--since="%s"' % date]).split('\n')
def GitFilesChanged(commit_hash):
"""List of files changed in a commit."""
return GitCommitInfo(obj=commit_hash, num=1,
extra=['--name-only']).split('\n')
def GitChangesPath(commit_hash, path):
"""Returns True if the commit changes a file under the given path."""
return any([
re.search('^' + path, f.strip()) for f in
GitFilesChanged(commit_hash)])
def GitBranchExists(name):
return len(ExecCommand(['git', 'branch', '--list', name]).strip()) != 0
def GitCheckout(branch, force=False):
"""Checkout an existing branch.
force throws away local changes."""
ExecCommand(['git', 'checkout'] +
(['--force'] if force else []) +
[branch])
def GitCheckoutNewBranch(branch):
"""Create and checkout a new git branch."""
ExecCommand(['git', 'checkout', '-b', branch])
def GitDeleteBranch(branch, force=False):
"""Force-delete a branch."""
ExecCommand(['git', 'branch', '-D' if force else '-d', branch])
def GitAdd(file):
ExecCommand(['git', 'add', file])
def GitCommit(message):
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(message)
tmp.flush()
ExecCommand(['git', 'commit', '--file=%s' % tmp.name])
def UploadChanges():
"""Upload changes, don't prompt."""
# TODO(jfb) Using the commit queue and avoiding git try + manual commit
# would be much nicer. See '--use-commit-queue'
return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f'])
def GitTry():
return ExecCommand(['git', 'try'])
def FindCommitWithGitSvnId(git_svn_id):
while True:
# This command needs to retry because git-svn partially rebuild its
# revision map for every commit. Asking it a second time fixes the
# issue.
out = ExecCommand(['git', 'svn', 'find-rev', 'r' + git_svn_id]).strip()
if not re.match('^Partial-rebuilding ', out):
break
return out
def CommitMessageToCleanDict(commit_message):
"""Extract and clean commit message fields that follow the NaCl commit
message convention. Don't repeat them as-is, to avoid confusing our
infrastructure."""
res = {}
fields = [
['git svn id', ('\s*git-svn-id: '
'svn://[^@]+@([0-9]+) [a-f0-9\-]+'), '<none>'],
['reviewers tbr', '\s*TBR=([^\n]+)', ''],
['reviewers', '\s*R=([^\n]+)', ''],
['review url', '\s*Review URL: *([^\n]+)', '<none>'],
['bug', '\s*BUG=([^\n]+)', '<none>'],
['test', '\s*TEST=([^\n]+)', '<none>'],
]
for key, regex, none in fields:
found = re.search(regex, commit_message)
if found:
commit_message = commit_message.replace(found.group(0), '')
res[key] = found.group(1).strip()
else:
res[key] = none
res['body'] = commit_message.strip()
return res
def SendEmail(user_email, out):
if user_email:
sys.stderr.write('\nSending email to %s.\n' % user_email)
msg = email.mime.text.MIMEText(out)
msg['Subject'] = '[PNaCl revision updater] failure!'
msg['From'] = 'tool_revisions-bot@chromium.org'
msg['To'] = user_email
s = smtplib.SMTP('localhost')
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
else:
sys.stderr.write('\nNo email address specified.')
def DryRun(out):
sys.stdout.write("DRY RUN: " + out + "\n")
def Done(out):
sys.stdout.write(out)
sys.exit(0)
class CLInfo:
"""Changelist information: sorted dictionary of NaCl-standard fields."""
def __init__(self, desc):
self._desc = desc
self._vals = collections.OrderedDict([
('git svn id', None),
('hash', None),
('author', None),
('date', None),
('subject', None),
('commits since', None),
('bug', None),
('test', None),
('review url', None),
('reviewers tbr', None),
('reviewers', None),
('body', None),
])
def __getitem__(self, key):
return self._vals[key]
def __setitem__(self, key, val):
assert key in self._vals.keys()
self._vals[key] = str(val)
def __str__(self):
"""Changelist to string.
A short description of the change, e.g.:
r12345: (tom@example.com) Subject of the change.
If the change is itself pulling in other changes from
sub-repositories then take its relevant description and append it to
the string. These sub-directory updates are also script-generated
and therefore have a predictable format. e.g.:
r12345: (tom@example.com) Subject of the change.
| dead123: (dick@example.com) Other change in another repository.
| beef456: (harry@example.com) Yet another cross-repository change.
"""
desc = (' r' + self._vals['git svn id'] + ': (' +
self._vals['author'] + ') ' +
self._vals['subject'])
if GitChangesPath(self._vals['hash'], 'pnacl/COMPONENT_REVISIONS'):
git_hash_abbrev = '[0-9a-fA-F]{7}'
email = '[^@)]+@[^)]+\.[^)]+'
desc = '\n'.join([desc] + [
' | ' + line for line in self._vals['body'].split('\n') if
re.match('^ *%s: \(%s\) .*$' % (git_hash_abbrev, email), line)])
return desc
def FmtOut(tr_points_at, pnacl_changes, err=[], msg=[]):
assert isinstance(err, list)
assert isinstance(msg, list)
old_svn_id = tr_points_at['git svn id']
new_svn_id = pnacl_changes[-1]['git svn id'] if pnacl_changes else '?'
changes = '\n'.join([str(cl) for cl in pnacl_changes])
bugs = '\n'.join(list(set(
['BUG= ' + cl['bug'].strip() if cl['bug'] else '' for
cl in pnacl_changes]) - set([''])))
reviewers = ', '.join(list(set(
[r.strip() for r in
(','.join([
cl['author'] + ',' + cl['reviewers tbr'] + ',' + cl['reviewers']
for cl in pnacl_changes])).split(',')]) - set([''])))
return (('*** ERROR ***\n' if err else '') +
'\n\n'.join(err) +
'\n\n'.join(msg) +
('\n\n' if err or msg else '') +
('Update revision for PNaCl r%s->r%s\n\n'
'Pull the following PNaCl changes into NaCl:\n%s\n\n'
'%s\n'
'R= %s\n'
'TEST=git try\n'
'NOTRY=true\n'
'(Please LGTM this change and tick the "commit" box)\n' %
(old_svn_id, new_svn_id, changes, bugs, reviewers)))
def Main():
args = ParseArgs(sys.argv[1:])
tr_points_at = CLInfo('revision update points at PNaCl version')
pnacl_changes = []
msg = []
branch = GitCurrentBranch()
assert branch == 'master', ('Must be on branch master, currently on %s' %
branch)
try:
status = GitStatus()
assert len(status) == 0, ("Repository isn't clean:\n %s" %
'\n '.join(status))
SyncSources()
# The current revision file points at a specific PNaCl LLVM
# version. LLVM is checked-in to the NaCl repository, but its head
# isn't necessarily the one that we currently use in PNaCl.
(pnacl_revision, translator_revision) = GetCurrentRevision()
tr_points_at['git svn id'] = pnacl_revision
tr_points_at['hash'] = FindCommitWithGitSvnId(tr_points_at['git svn id'])
tr_points_at['date'] = GitCommitInfo(
info='date', obj=tr_points_at['hash'], num=1)
tr_points_at['subject'] = GitCommitInfo(
info='subject', obj=tr_points_at['hash'], num=1)
recent_commits = GitCommitsSince(tr_points_at['date'])
tr_points_at['commits since'] = len(recent_commits)
assert len(recent_commits) > 1
if args.svn_id and args.svn_id <= int(tr_points_at['git svn id']):
Done(FmtOut(tr_points_at, pnacl_changes,
err=["Can't update to SVN ID r%s, the current "
"PNaCl revision's SVN ID (r%s) is more recent." %
(args.svn_id, tr_points_at['git svn id'])]))
# Find the commits changing PNaCl files that follow the previous
# PNaCl revision pointer.
pnacl_pathes = ['pnacl/', 'toolchain_build/']
pnacl_hashes = list(set(reduce(
lambda acc, lst: acc + lst,
[[cl for cl in recent_commits[:-1] if
GitChangesPath(cl, path)] for
path in pnacl_pathes])))
for hash in pnacl_hashes:
cl = CLInfo('PNaCl change ' + hash)
cl['hash'] = hash
for i in ['author', 'date', 'subject']:
cl[i] = GitCommitInfo(info=i, obj=hash, num=1)
for k,v in CommitMessageToCleanDict(
GitCommitInfo(info='body', obj=hash, num=1)).iteritems():
cl[k] = v
pnacl_changes.append(cl)
# The PNaCl hashes weren't ordered chronologically, make sure the
# changes are.
pnacl_changes.sort(key=lambda x: int(x['git svn id']))
if args.svn_id:
pnacl_changes = [cl for cl in pnacl_changes if
int(cl['git svn id']) <= args.svn_id]
if len(pnacl_changes) == 0:
Done(FmtOut(tr_points_at, pnacl_changes,
msg=['No PNaCl change since r%s.' %
tr_points_at['git svn id']]))
new_pnacl_revision = pnacl_changes[-1]['git svn id']
new_branch_name = ('pnacl-revision-update-to-%s' %
new_pnacl_revision)
if GitBranchExists(new_branch_name):
# TODO(jfb) Figure out if git-try succeeded, checkout the branch
# and dcommit.
raise Exception("Branch %s already exists, the change hasn't "
"landed yet.\nPlease check trybots and dcommit it "
"manually." % new_branch_name)
if args.dry_run:
DryRun("Would check out branch: " + new_branch_name)
else:
GitCheckoutNewBranch(new_branch_name)
if args.dry_run:
DryRun("Would update PNaCl revision to: %s" % new_pnacl_revision)
else:
SetCurrentRevision(new_pnacl_revision)
for f in REV_FILES:
GitAdd(f)
GitCommit(FmtOut(tr_points_at, pnacl_changes))
upload_res = UploadChanges()
msg += ['Upload result:\n%s' % upload_res]
try_res = GitTry()
msg += ['Try result:\n%s' % try_res]
GitCheckout('master', force=False)
Done(FmtOut(tr_points_at, pnacl_changes, msg=msg))
except SystemExit as e:
# Normal exit.
raise
except (BaseException, Exception) as e:
# Leave the branch around, if any was created: it'll prevent next
# runs of the cronjob from succeeding until the failure is fixed.
out = FmtOut(tr_points_at, pnacl_changes, msg=msg,
err=['Failed at %s: %s' % (datetime.datetime.now(), e)])
sys.stderr.write(out)
if not args.dry_run:
SendEmail(args.email, out)
GitCheckout('master', force=True)
raise
if __name__ == '__main__':
Main()
| wilsonianb/nacl_contracts | build/update_pnacl_tool_revisions.py | Python | bsd-3-clause | 16,688 |
from unittest import TestCase
from webtest import TestApp
from nose.tools import * # noqa
from ..main import app
class TestAUser(TestCase):
def setUp(self):
self.app = TestApp(app)
def tearDown(self):
pass
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('')
assert_equal(res.status_code, 200)
assert_in("All I want to do is", res)
def test_can_see_a_page(self):
# Goes to homepage
res = self.app.get('')
# Sees titles for a page
assert_in('install Python', res)
# Clicks on a title
res = res.click('install Python 2 and/or 3')
assert_equal(res.status_code, 200)
# Is at the page
# Can see the title
assert_in("Install Python", res)
# And the OS's
assert_in("macosx", res)
# And the content
assert_in('brew install python3', res)
def test_can_see_deps(self):
# Goes to homepage
res = self.app.get('')
# Clicks on a page
res = res.click('install Python 2 and/or 3')
# The page has dependency
# The dependency titles are listed
assert_in("install-homebrew", res)
# Clicks on the dependency link (full instructions)
res = res.click('full instructions', index=0)
# Is at the dependency's page
assert_in('ruby', res)
| vfulco/killtheyak.github.io | killtheyak/test/webtest_tests.py | Python | mit | 1,409 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-16 22:32
from __future__ import unicode_literals
import django.db.models.deletion
import jsonfield.fields
import mptt.fields
from django.db import migrations
from django.db import models
import kolibri.core.content.models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="ChannelMetadataCache",
fields=[
(
"id",
kolibri.core.content.models.UUIDField(
primary_key=True, serialize=False
),
),
("name", models.CharField(max_length=200)),
("description", models.CharField(blank=True, max_length=400)),
("author", models.CharField(blank=True, max_length=400)),
("version", models.IntegerField(default=0)),
("thumbnail", models.TextField(blank=True)),
("root_pk", kolibri.core.content.models.UUIDField()),
],
options={"abstract": False},
)
]
| lyw07/kolibri | kolibri/core/content/migrations/0001_initial.py | Python | mit | 1,165 |
"""
This page is in the table of contents.
Mill is a script to mill the outlines.
==Operation==
The default 'Activate Mill' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Add Loops===
====Add Inner Loops====
Default is on.
When selected, the inner milling loops will be added.
====Add Outer Loops====
Default is on.
When selected, the outer milling loops will be added.
===Cross Hatch===
Default is on.
When selected, there will be alternating horizontal and vertical milling paths, if it is off there will only be horizontal milling paths.
===Loop Outset===
====Loop Inner Outset over Perimeter Width====
Default is 0.5.
Defines the ratio of the amount the inner milling loop will be outset over the perimeter width.
====Loop Outer Outset over Perimeter Width====
Default is one.
Defines the ratio of the amount the outer milling loop will be outset over the perimeter width. The 'Loop Outer Outset over Perimeter Width' ratio should be greater than the 'Loop Inner Outset over Perimeter Width' ratio.
===Mill Width over Perimeter Width===
Default is one.
Defines the ratio of the mill line width over the perimeter width. If the ratio is one, all the material will be milled. The greater the 'Mill Width over Perimeter Width' the farther apart the mill lines will be and so less of the material will be directly milled, the remaining material might still be removed in chips if the ratio is not much greater than one.
==Examples==
The following examples mill the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and mill.py.
> python mill.py
This brings up the mill dialog.
> python mill.py Screw Holder Bottom.stl
The mill tool is parsing the file:
Screw Holder Bottom.stl
..
The mill tool has created the file:
Screw Holder Bottom_mill.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, gcodeText = '', repository=None):
'Mill the file or gcodeText.'
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, gcodeText), repository )
def getCraftedTextFromText(gcodeText, repository=None):
'Mill a gcode linear move gcodeText.'
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'mill'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( MillRepository() )
if not repository.activateMill.value:
return gcodeText
return MillSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return MillRepository()
def getPointsFromSegmentTable(segmentTable):
'Get the points from the segment table.'
points = []
segmentTableKeys = segmentTable.keys()
segmentTableKeys.sort()
for segmentTableKey in segmentTableKeys:
for segment in segmentTable[segmentTableKey]:
for endpoint in segment:
points.append(endpoint.point)
return points
def isPointOfTableInLoop( loop, pointTable ):
'Determine if a point in the point table is in the loop.'
for point in loop:
if point in pointTable:
return True
return False
def writeOutput(fileName, shouldAnalyze=True):
'Mill a gcode linear move file.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'mill', shouldAnalyze)
class Average:
'A class to hold values and get the average.'
def __init__(self):
self.reset()
def addValue( self, value ):
'Add a value to the total and the number of values.'
self.numberOfValues += 1
self.total += value
def getAverage(self):
'Get the average.'
if self.numberOfValues == 0:
print('should never happen, self.numberOfValues in Average is zero')
return 0.0
return self.total / float( self.numberOfValues )
def reset(self):
'Set the number of values and the total to the default.'
self.numberOfValues = 0
self.total = 0.0
class MillRepository:
'A class to handle the mill settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.mill.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Mill', self, '')
self.activateMill = settings.BooleanSetting().getFromValue('Activate Mill', self, True )
settings.LabelDisplay().getFromName('- Add Loops -', self )
self.addInnerLoops = settings.BooleanSetting().getFromValue('Add Inner Loops', self, True )
self.addOuterLoops = settings.BooleanSetting().getFromValue('Add Outer Loops', self, True )
self.crossHatch = settings.BooleanSetting().getFromValue('Cross Hatch', self, True )
settings.LabelDisplay().getFromName('- Loop Outset -', self )
self.loopInnerOutsetOverPerimeterWidth = settings.FloatSpin().getFromValue( 0.3, 'Loop Inner Outset over Perimeter Width (ratio):', self, 0.7, 0.5 )
self.loopOuterOutsetOverPerimeterWidth = settings.FloatSpin().getFromValue( 0.8, 'Loop Outer Outset over Perimeter Width (ratio):', self, 1.4, 1.0 )
self.millWidthOverPerimeterWidth = settings.FloatSpin().getFromValue( 0.8, 'Mill Width over Perimeter Width (ratio):', self, 1.8, 1.0 )
self.executeTitle = 'Mill'
def execute(self):
'Mill button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class MillSkein:
'A class to mill a skein of extrusions.'
def __init__(self):
self.aroundPixelTable = {}
self.average = Average()
self.boundaryLayers = []
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.isExtruderActive = False
self.layerIndex = 0
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.perimeterWidth = 0.6
def addGcodeFromLoops(self, loops, z):
'Add gcode from loops.'
if self.oldLocation == None:
self.oldLocation = Vector3()
self.oldLocation.z = z
for loop in loops:
self.distanceFeedRate.addGcodeFromThreadZ(loop, z)
euclidean.addToThreadsFromLoop(self.halfPerimeterWidth, 'loop', loop, self.oldLocation, self)
def addGcodeFromThreadZ( self, thread, z ):
'Add a thread to the output.'
self.distanceFeedRate.addGcodeFromThreadZ( thread, z )
def addMillThreads(self):
'Add the mill threads to the skein.'
boundaryLayer = self.boundaryLayers[self.layerIndex]
endpoints = euclidean.getEndpointsFromSegmentTable( boundaryLayer.segmentTable )
if len(endpoints) < 1:
return
paths = euclidean.getPathsFromEndpoints(endpoints, 5.0 * self.millWidth, self.aroundPixelTable, self.aroundWidth)
averageZ = self.average.getAverage()
if self.repository.addInnerLoops.value:
self.addGcodeFromLoops( boundaryLayer.innerLoops, averageZ )
if self.repository.addOuterLoops.value:
self.addGcodeFromLoops( boundaryLayer.outerLoops, averageZ )
for path in paths:
simplifiedPath = euclidean.getSimplifiedPath( path, self.millWidth )
self.distanceFeedRate.addGcodeFromThreadZ( simplifiedPath, averageZ )
def addSegmentTableLoops( self, boundaryLayerIndex ):
'Add the segment tables and loops to the boundary.'
boundaryLayer = self.boundaryLayers[boundaryLayerIndex]
euclidean.subtractXIntersectionsTable(boundaryLayer.outerHorizontalTable, boundaryLayer.innerHorizontalTable)
euclidean.subtractXIntersectionsTable(boundaryLayer.outerVerticalTable, boundaryLayer.innerVerticalTable)
boundaryLayer.horizontalSegmentTable = self.getHorizontalSegmentTableForXIntersectionsTable(
boundaryLayer.outerHorizontalTable)
boundaryLayer.verticalSegmentTable = self.getVerticalSegmentTableForXIntersectionsTable(
boundaryLayer.outerVerticalTable)
betweenPoints = getPointsFromSegmentTable(boundaryLayer.horizontalSegmentTable)
betweenPoints += getPointsFromSegmentTable(boundaryLayer.verticalSegmentTable)
innerPoints = euclidean.getPointsByHorizontalDictionary(self.millWidth, boundaryLayer.innerHorizontalTable)
innerPoints += euclidean.getPointsByVerticalDictionary(self.millWidth, boundaryLayer.innerVerticalTable)
innerPointTable = {}
for innerPoint in innerPoints:
innerPointTable[innerPoint] = None
boundaryLayer.innerLoops = []
boundaryLayer.outerLoops = []
millRadius = 0.75 * self.millWidth
loops = triangle_mesh.getDescendingAreaOrientedLoops(betweenPoints, betweenPoints, millRadius)
for loop in loops:
if isPointOfTableInLoop(loop, innerPointTable):
boundaryLayer.innerLoops.append(loop)
else:
boundaryLayer.outerLoops.append(loop)
if self.repository.crossHatch.value and boundaryLayerIndex % 2 == 1:
boundaryLayer.segmentTable = boundaryLayer.verticalSegmentTable
else:
boundaryLayer.segmentTable = boundaryLayer.horizontalSegmentTable
def getCraftedGcode(self, gcodeText, repository):
'Parse gcode text and store the mill gcode.'
self.repository = repository
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
self.parseBoundaries()
for line in self.lines[self.lineIndex :]:
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def getHorizontalSegmentTableForXIntersectionsTable( self, xIntersectionsTable ):
'Get the horizontal segment table from the xIntersectionsTable.'
horizontalSegmentTable = {}
xIntersectionsTableKeys = xIntersectionsTable.keys()
xIntersectionsTableKeys.sort()
for xIntersectionsTableKey in xIntersectionsTableKeys:
xIntersections = xIntersectionsTable[ xIntersectionsTableKey ]
segments = euclidean.getSegmentsFromXIntersections( xIntersections, xIntersectionsTableKey * self.millWidth )
horizontalSegmentTable[ xIntersectionsTableKey ] = segments
return horizontalSegmentTable
def getHorizontalXIntersectionsTable(self, loops):
'Get the horizontal x intersections table from the loops.'
horizontalXIntersectionsTable = {}
euclidean.addXIntersectionsFromLoopsForTable(loops, horizontalXIntersectionsTable, self.millWidth)
return horizontalXIntersectionsTable
def getVerticalSegmentTableForXIntersectionsTable( self, xIntersectionsTable ):
'Get the vertical segment table from the xIntersectionsTable which has the x and y swapped.'
verticalSegmentTable = {}
xIntersectionsTableKeys = xIntersectionsTable.keys()
xIntersectionsTableKeys.sort()
for xIntersectionsTableKey in xIntersectionsTableKeys:
xIntersections = xIntersectionsTable[ xIntersectionsTableKey ]
segments = euclidean.getSegmentsFromXIntersections( xIntersections, xIntersectionsTableKey * self.millWidth )
for segment in segments:
for endpoint in segment:
endpoint.point = complex( endpoint.point.imag, endpoint.point.real )
verticalSegmentTable[ xIntersectionsTableKey ] = segments
return verticalSegmentTable
def parseBoundaries(self):
'Parse the boundaries and add them to the boundary layers.'
boundaryLoop = None
boundaryLayer = None
for line in self.lines[self.lineIndex :]:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == '(</boundaryPerimeter>)':
boundaryLoop = None
elif firstWord == '(<boundaryPoint>':
location = gcodec.getLocationFromSplitLine(None, splitLine)
if boundaryLoop == None:
boundaryLoop = []
boundaryLayer.loops.append(boundaryLoop)
boundaryLoop.append(location.dropAxis())
elif firstWord == '(<layer>':
boundaryLayer = euclidean.LoopLayer(float(splitLine[1]))
self.boundaryLayers.append(boundaryLayer)
if len(self.boundaryLayers) < 2:
return
for boundaryLayer in self.boundaryLayers:
boundaryLayer.innerOutsetLoops = intercircle.getInsetSeparateLoopsFromLoops(boundaryLayer.loops, -self.loopInnerOutset)
boundaryLayer.outerOutsetLoops = intercircle.getInsetSeparateLoopsFromLoops(boundaryLayer.loops, -self.loopOuterOutset)
boundaryLayer.innerHorizontalTable = self.getHorizontalXIntersectionsTable( boundaryLayer.innerOutsetLoops )
boundaryLayer.outerHorizontalTable = self.getHorizontalXIntersectionsTable( boundaryLayer.outerOutsetLoops )
boundaryLayer.innerVerticalTable = self.getHorizontalXIntersectionsTable( euclidean.getDiagonalFlippedLoops( boundaryLayer.innerOutsetLoops ) )
boundaryLayer.outerVerticalTable = self.getHorizontalXIntersectionsTable( euclidean.getDiagonalFlippedLoops( boundaryLayer.outerOutsetLoops ) )
for boundaryLayerIndex in xrange( len(self.boundaryLayers) - 2, - 1, - 1 ):
boundaryLayer = self.boundaryLayers[ boundaryLayerIndex ]
boundaryLayerBelow = self.boundaryLayers[ boundaryLayerIndex + 1 ]
euclidean.joinXIntersectionsTables( boundaryLayerBelow.outerHorizontalTable, boundaryLayer.outerHorizontalTable )
euclidean.joinXIntersectionsTables( boundaryLayerBelow.outerVerticalTable, boundaryLayer.outerVerticalTable )
for boundaryLayerIndex in xrange( 1, len(self.boundaryLayers) ):
boundaryLayer = self.boundaryLayers[ boundaryLayerIndex ]
boundaryLayerAbove = self.boundaryLayers[ boundaryLayerIndex - 1 ]
euclidean.joinXIntersectionsTables( boundaryLayerAbove.innerHorizontalTable, boundaryLayer.innerHorizontalTable )
euclidean.joinXIntersectionsTables( boundaryLayerAbove.innerVerticalTable, boundaryLayer.innerVerticalTable )
for boundaryLayerIndex in xrange( len(self.boundaryLayers) ):
self.addSegmentTableLoops(boundaryLayerIndex)
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('mill')
return
elif firstWord == '(<perimeterWidth>':
self.perimeterWidth = float(splitLine[1])
self.aroundWidth = 0.1 * self.perimeterWidth
self.halfPerimeterWidth = 0.5 * self.perimeterWidth
self.millWidth = self.perimeterWidth * self.repository.millWidthOverPerimeterWidth.value
self.loopInnerOutset = self.halfPerimeterWidth + self.perimeterWidth * self.repository.loopInnerOutsetOverPerimeterWidth.value
self.loopOuterOutset = self.halfPerimeterWidth + self.perimeterWidth * self.repository.loopOuterOutsetOverPerimeterWidth.value
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
'Parse a gcode line and add it to the mill skein.'
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
if self.isExtruderActive:
self.average.addValue(location.z)
if self.oldLocation != None:
euclidean.addValueSegmentToPixelTable( self.oldLocation.dropAxis(), location.dropAxis(), self.aroundPixelTable, None, self.aroundWidth )
self.oldLocation = location
elif firstWord == 'M101':
self.isExtruderActive = True
elif firstWord == 'M103':
self.isExtruderActive = False
elif firstWord == '(<layer>':
settings.printProgress(self.layerIndex, 'mill')
self.aroundPixelTable = {}
self.average.reset()
elif firstWord == '(</layer>)':
if len(self.boundaryLayers) > self.layerIndex:
self.addMillThreads()
self.layerIndex += 1
self.distanceFeedRate.addLine(line)
def main():
'Display the mill dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
| makerbot/ReplicatorG | skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/mill.py | Python | gpl-2.0 | 16,690 |
"""
# Copyright (c) 04 2015 | surya
# 20/04/15 nanang.ask@kubuskotak.com
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# manga.py
"""
import logging
import os
from natsort import natsorted
LOG = logging.getLogger(__name__)
class MangaUtil(object):
items = None
def __init__(self, dirpath=".", manga="", chapter=""):
self.path = os.path.join(dirpath, manga, chapter)
def get_image_names(self):
# The top argument for name in files
extens = ['jpg', 'png', 'gif'] # the extensions to search for
names = []
# LOG.debug(self.path)
for root, dirs, files in os.walk(self.path):
# Loop through the file names for the current step
for name in files:
# Split the name by '.' & get the last element
ext = name.lower().rsplit('.', 1)[-1]
# Save the full name if ext matches
if ext in extens:
# names.append(os.path.join(self.path, name))
# print(os.path.join(dirname, filename))
names.append(name)
# LOG.debug('{dirs}-{files}'.format(dirs=dirs, files=name))
return names
def build_image_lookup_dict(self):
names = natsorted(self.get_image_names())
self.items = dict(zip(range(len(names)), names))
def get_keys(self):
try:
keys = list(self.items.keys())
return keys
except AttributeError:
raise ValueError("No Images in archive! \n Archive contents = %s" % "\n ".join(self.path))
def get_item_by_key(self, itemkey):
if not itemkey in self.items:
raise KeyError("Invalid key. Not in archive!")
internalpath = self.items[itemkey]
# itemcontent = self.archHandle.open(internalpath)
return itemkey, internalpath
def __del__(self):
pass
def parse_chapter(chapter):
# e.g Chapter-010
return int(str(chapter).lower().rsplit('-', 1)[-1]) | suryakencana/niimanga | niimanga/libs/manga.py | Python | lgpl-3.0 | 2,684 |
# -*- coding: utf-8 -*-
import dbm
from urlparse import parse_qs
from user_credentials_dbm import dbm_create, dbm_fetch
from packages.clint import resources
from packages import requests
from packages.requests_oauthlib import OAuth1
# these are the client key and secret needed
# for oauth authentication with the twitter api
client_key = "moNHqPmAoEvS21mpajk6lNXl3"
client_secret = "WXQHh9tykUivPFiVGQlwXhjOqR1S3y6W6z8LojSRCuw3vwYUeT"
# these are the user credentials for oauth authentication
# with the twitter api
user_token = ""
user_secret = ""
def get_token(client_key, client_secret, user_token, user_secret):
oauth = OAuth1(client_key, client_secret)
url = 'https://api.twitter.com/oauth/request_token'
token_requests = requests.post(url, auth=oauth)
print token_requests
credentials = parse_qs(token_requests.content)
user_token = credentials.get('oauth_token')[0]
user_secret = credentials.get('oauth_token_secret')[0]
return user_token, user_secret
def authorize(client_key, client_secret, user_token, user_secret):
url = 'https://api.twitter.com/oauth/authorize?oauth_token=' + user_token
print ''
print 'Copy Paste in your browser this link %s' % url
print (
'Click the authorize button to give TweeтCoммander access to your'
' twitter account information and put the pin code in the prompt'
' below.'
)
print ''
print '+----------------------------------------------+'
verifier = raw_input('Put your PIN code here: ')
return verifier
def get_access(client_key, client_secret, user_token, user_secret, verifier):
url = 'https://api.twitter.com/oauth/access_token'
oauth = OAuth1(client_key,
client_secret,
user_token,
user_secret,
verifier=verifier)
access_request = requests.post(url=url, auth=oauth)
credentials = parse_qs(access_request.content)
user_token = credentials.get('oauth_token')[0]
user_secret = credentials.get('oauth_token_secret')[0]
dbm_create(user_token, user_secret)
def connection(client_key=client_key, client_secret=client_secret):
user_token, user_secret = dbm_fetch()
oauth = OAuth1(client_key, client_secret, user_token, user_secret)
return oauth
def first_time_oauth(client_key=client_key, client_secret=client_secret,
user_token=user_token, user_secret=user_secret):
print (
'This is your first time with TweeтCoммander.'
' Please follow the instructions below.'
)
user_token, user_secret = get_token(client_key, client_secret,
user_token, user_secret)
verifier = authorize(client_key, client_secret, user_token, user_secret)
get_access(client_key, client_secret, user_token, user_secret, verifier)
| raqqun/tweetcommander | oauth.py | Python | gpl-3.0 | 2,900 |
from GPIOLibrary import GPIOProcessor
import time
GP = GPIOProcessor()
# GPIO Assignments
#Din = 27
#A1 = 34 Green
#A2 = 33 White
#A3 = 24 Black
#A4 = 26 Yellow
#PIR = 29
#Ind = 30
Din = GP.getPin27()
Din.input()
A1 = GP.getPin34()
A1.out()
A2 = GP.getPin33()
A2.out()
A3 = GP.getPin24()
A3.out()
A4 = GP.getPin26()
A4.out()
PIR = GP.getPin29()
PIR.out()
PIR.low()
Ind = GP.getPin30()
Ind.out()
Ind.low()
# Remote Average Pulse
M = 800
# Stepper Motor Delay
t = 0.001
# Stepper Motor Sequence (Forward / Reverse)
A = [[[0,1,0,1],[1,0,0,1],[1,0,1,0],[0,1,1,0]],
[[0,1,0,1],[0,1,1,0],[1,0,1,0],[1,0,0,1]]]
# Indicators
FR = 0
PIR_status = 0
# Number of clicks
n_PIR = 1
n_90 = 2
n_R90 = 3
n_180 = 4
try:
print 'Calibrate? [y/n]'
r = raw_input()
if r == 'y':
while True:
print 'Click button 5 times.'
counter = 0
time.sleep(0.2)
timeout = time.time() + 2.5
while True:
if Din.getValue() == 0:
counter += 1
if time.time() > timeout:
break;
M = counter/5
print M
print 'Retry? [y/n]'
r = raw_input()
if r == 'n':
break
while True:
read = 0
counter = 0
timeout = time.time() + 0.2
# Determine if read mode should be activated
while True:
if Din.getValue() == 0:
counter += 1
if counter > 0.2*M:
read = 1
if time.time() > timeout:
break;
# Enter read mode
if read == 1:
Ind.high()
x = 0
counter = 0
print 'Read:'
stop_time = time.time() + 2
while True:
if Din.getValue() == 0:
counter += 1
if time.time() > stop_time:
break
# Decide what was chosen
Ind.low()
time.sleep(0.5)
if counter < 0.5*M:
print 'No Input'
elif n_PIR*M - 0.5*M < counter < n_PIR*M + 0.5*M:
if PIR_status == 0:
PIR.high()
PIR_status = 1
print 'PIR on'
else:
PIR.low()
PIR_status = 0
print 'PIR off'
elif n_90*M - 0.5*M < counter < n_90*M + 0.5*M:
FR = 0
x = int(90/1.8)
print '90'
elif n_R90*M - 0.5*M < counter < n_R90*M + 0.5*M:
FR = 1
x = int(90/1.8)
print '-90'
elif n_180*M - 0.5*M < counter < n_180*M + 0.5*M:
FR = 0
x = int(180/1.8)
print '180'
else:
clicks = counter/M
print counter
# Sequencing for Stepper Motor
for i in range(0,x):
A1.setValue(A[FR][i%4][0])
time.sleep(t)
A2.setValue(A[FR][i%4][1])
time.sleep(t)
A3.setValue(A[FR][i%4][2])
time.sleep(t)
A4.setValue(A[FR][i%4][3])
time.sleep(t)
finally:
GP.cleanup()
| IOT-410c/IOT-DB410c-Course-3 | Modules/Module_6_Infrared_Sensors/Lesson_3_IR_Remote/IRRemote.py | Python | apache-2.0 | 3,447 |
#!/usr/bin/env python
'''
Utility script for computing source separation metrics
Usage:
./separation_eval.py PATH_TO_REFERENCE_WAVS PATH_TO_ESTIMATED_WAVS
'''
import argparse
import sys
import os
import glob
import os
import numpy as np
import eval_utilities
import mir_eval
def process_arguments():
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='mir_eval source separation '
'evaluation')
parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')
parser.add_argument('reference_directory',
action='store',
help='path to directory containing reference source '
'.wav files')
parser.add_argument('estimated_directory',
action='store',
help='path to directory containing estimated source '
'.wav files')
return vars(parser.parse_args(sys.argv[1:]))
if __name__ == '__main__':
# Get the parameters
parameters = process_arguments()
reference_data = []
estimated_data = []
global_fs = None
reference_glob = os.path.join(parameters['reference_directory'], '*.wav')
# Load in each reference file in the supplied dir
for reference_file in glob.glob(reference_glob):
audio_data, fs = mir_eval.io.load_wav(reference_file)
# Make sure fs is the same for all files
assert (global_fs is None or fs == global_fs)
global_fs = fs
reference_data.append(audio_data)
estimated_glob = os.path.join(parameters['estimated_directory'], '*.wav')
for estimated_file in glob.glob(estimated_glob):
audio_data, fs = mir_eval.io.load_wav(estimated_file)
assert (global_fs is None or fs == global_fs)
global_fs = fs
estimated_data.append(audio_data)
# Turn list of audio data arrays into nsrc x nsample arrays
reference_sources = np.vstack(reference_data)
estimated_sources = np.vstack(estimated_data)
# Compute all the scores
scores = mir_eval.separation.evaluate(reference_sources, estimated_sources)
last_dir = lambda d: os.path.basename(os.path.normpath(d))
print "{} vs. {}".format(last_dir(parameters['reference_directory']),
last_dir(parameters['estimated_directory']))
eval_utilities.print_evaluation(scores)
if parameters['output_file']:
print 'Saving results to: ', parameters['output_file']
eval_utilities.save_results(scores, parameters['output_file'])
| mrgloom/mir_eval | evaluators/separation_eval.py | Python | mit | 2,812 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from xenon.proto import xenon_pb2 as xenon_dot_proto_dot_xenon__pb2
class FileSystemServiceStub(object):
"""XenonFiles represents the Xenon nl.esciencecenter.xenon.filesystems.FileSystem class.
This interface contains various methods for creating and closing FileSystems, creating Paths and operations on these Paths.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.getAdaptorDescriptions = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorDescriptions',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystemAdaptorDescriptions.FromString,
)
self.getAdaptorNames = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorNames',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorNames.FromString,
)
self.getAdaptorDescription = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorDescription',
request_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystemAdaptorDescription.FromString,
)
self.create = channel.unary_unary(
'/xenon.FileSystemService/create',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CreateFileSystemRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
)
self.getAdaptorName = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorName',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.FromString,
)
self.getLocation = channel.unary_unary(
'/xenon.FileSystemService/getLocation',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Location.FromString,
)
self.getCredential = channel.unary_unary(
'/xenon.FileSystemService/getCredential',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetCredentialResponse.FromString,
)
self.getProperties = channel.unary_unary(
'/xenon.FileSystemService/getProperties',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Properties.FromString,
)
self.createDirectories = channel.unary_unary(
'/xenon.FileSystemService/createDirectories',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.createDirectory = channel.unary_unary(
'/xenon.FileSystemService/createDirectory',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.createFile = channel.unary_unary(
'/xenon.FileSystemService/createFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.createSymbolicLink = channel.unary_unary(
'/xenon.FileSystemService/createSymbolicLink',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CreateSymbolicLinkRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.copy = channel.unary_unary(
'/xenon.FileSystemService/copy',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CopyRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyOperation.FromString,
)
self.cancel = channel.unary_unary(
'/xenon.FileSystemService/cancel',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CopyOperationRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.FromString,
)
self.getStatus = channel.unary_unary(
'/xenon.FileSystemService/getStatus',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CopyOperationRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.FromString,
)
self.rename = channel.unary_unary(
'/xenon.FileSystemService/rename',
request_serializer=xenon_dot_proto_dot_xenon__pb2.RenameRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.delete = channel.unary_unary(
'/xenon.FileSystemService/delete',
request_serializer=xenon_dot_proto_dot_xenon__pb2.DeleteRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.exists = channel.unary_unary(
'/xenon.FileSystemService/exists',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Is.FromString,
)
self.readFromFile = channel.unary_stream(
'/xenon.FileSystemService/readFromFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.ReadFromFileResponse.FromString,
)
self.writeToFile = channel.stream_unary(
'/xenon.FileSystemService/writeToFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WriteToFileRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.appendToFile = channel.stream_unary(
'/xenon.FileSystemService/appendToFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.AppendToFileRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.list = channel.unary_stream(
'/xenon.FileSystemService/list',
request_serializer=xenon_dot_proto_dot_xenon__pb2.ListRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.PathAttributes.FromString,
)
self.getAttributes = channel.unary_unary(
'/xenon.FileSystemService/getAttributes',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.PathAttributes.FromString,
)
self.getWorkingDirectory = channel.unary_unary(
'/xenon.FileSystemService/getWorkingDirectory',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Path.FromString,
)
self.setWorkingDirectory = channel.unary_unary(
'/xenon.FileSystemService/setWorkingDirectory',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.setPosixFilePermissions = channel.unary_unary(
'/xenon.FileSystemService/setPosixFilePermissions',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SetPosixFilePermissionsRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.readSymbolicLink = channel.unary_unary(
'/xenon.FileSystemService/readSymbolicLink',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Path.FromString,
)
self.getPathSeparator = channel.unary_unary(
'/xenon.FileSystemService/getPathSeparator',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetPathSeparatorResponse.FromString,
)
self.isOpen = channel.unary_unary(
'/xenon.FileSystemService/isOpen',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Is.FromString,
)
self.close = channel.unary_unary(
'/xenon.FileSystemService/close',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.waitUntilDone = channel.unary_unary(
'/xenon.FileSystemService/waitUntilDone',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WaitUntilDoneRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.FromString,
)
self.localFileSystems = channel.unary_unary(
'/xenon.FileSystemService/localFileSystems',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystems.FromString,
)
self.listFileSystems = channel.unary_unary(
'/xenon.FileSystemService/listFileSystems',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystems.FromString,
)
class FileSystemServiceServicer(object):
"""XenonFiles represents the Xenon nl.esciencecenter.xenon.filesystems.FileSystem class.
This interface contains various methods for creating and closing FileSystems, creating Paths and operations on these Paths.
"""
def getAdaptorDescriptions(self, request, context):
"""Gives a list of the descriptions of the available adaptors.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAdaptorNames(self, request, context):
"""Gives a list names of the available adaptors.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAdaptorDescription(self, request, context):
"""Gives the description of the adaptor with the given name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def create(self, request, context):
"""Create a new FileSystem using the adaptor that connects to a data store at location using the credentials to get access.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAdaptorName(self, request, context):
"""Get the name of the adaptor that created this FileSystem.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getLocation(self, request, context):
"""Get the location that this FileSystem is connected to.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getCredential(self, request, context):
"""Get the credential used to create this FileSystem.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getProperties(self, request, context):
"""Get the properties used to create this FileSystem.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def createDirectories(self, request, context):
"""Creates a new directory, including parent directories, failing if the directory already exists.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def createDirectory(self, request, context):
"""Creates a new directory, failing if the directory already exists.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def createFile(self, request, context):
"""Creates a new empty file, failing if the file already exists.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def createSymbolicLink(self, request, context):
"""Creates a new symbolic link, failing if the link already exists
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def copy(self, request, context):
"""Asynchronously Copy an existing source path to a target path on a different file system.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def cancel(self, request, context):
"""Cancel a copy operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getStatus(self, request, context):
"""Retrieve the status of an copy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rename(self, request, context):
"""Rename an existing source path to a non-existing target path
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete(self, request, context):
"""Deletes an existing path.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def exists(self, request, context):
"""Tests if a path exists.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def readFromFile(self, request, context):
"""Open an existing file and return an InputStream to read from this file.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def writeToFile(self, request_iterator, context):
"""Open a file and return an OutputStream to write to this file.
In Xenon library if request is missing size field then FileSystem.writeToFile(Path file) is used
else FileSystem.writeToFile(Path path, long size) is used
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def appendToFile(self, request_iterator, context):
"""Open an existing file and return an OutputStream to append data to this file.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list(self, request, context):
"""List all entries in the directory dir.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAttributes(self, request, context):
"""Get the PathAttributes of an existing path.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getWorkingDirectory(self, request, context):
"""Get the current working directory of this file system.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def setWorkingDirectory(self, request, context):
"""Set the current working directory of this file system to directory.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def setPosixFilePermissions(self, request, context):
"""Sets the POSIX permissions of a path
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def readSymbolicLink(self, request, context):
"""Reads the target of a symbolic link
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getPathSeparator(self, request, context):
"""Get the path separator used by this file system.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def isOpen(self, request, context):
"""Return if the connection to the FileSystem is open.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def close(self, request, context):
"""Close this filestem
Any pending/running copy operations of this filestystem will be terminated
Will also forget this filesystem
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def waitUntilDone(self, request, context):
"""Wait until a copy operation is done or until a timeout expires.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def localFileSystems(self, request, context):
"""Returns filesystems for all local drives
Not part of FileSystem class in Xenon library
In Xenon library available as LocalFileSystemUtils.getLocalFileSystems()
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def listFileSystems(self, request, context):
"""List the created filesystems
Specific to grpc, not part of Xenon library
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FileSystemServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'getAdaptorDescriptions': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorDescriptions,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystemAdaptorDescriptions.SerializeToString,
),
'getAdaptorNames': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorNames,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorNames.SerializeToString,
),
'getAdaptorDescription': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorDescription,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystemAdaptorDescription.SerializeToString,
),
'create': grpc.unary_unary_rpc_method_handler(
servicer.create,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.CreateFileSystemRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
),
'getAdaptorName': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorName,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.SerializeToString,
),
'getLocation': grpc.unary_unary_rpc_method_handler(
servicer.getLocation,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Location.SerializeToString,
),
'getCredential': grpc.unary_unary_rpc_method_handler(
servicer.getCredential,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.GetCredentialResponse.SerializeToString,
),
'getProperties': grpc.unary_unary_rpc_method_handler(
servicer.getProperties,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Properties.SerializeToString,
),
'createDirectories': grpc.unary_unary_rpc_method_handler(
servicer.createDirectories,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'createDirectory': grpc.unary_unary_rpc_method_handler(
servicer.createDirectory,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'createFile': grpc.unary_unary_rpc_method_handler(
servicer.createFile,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'createSymbolicLink': grpc.unary_unary_rpc_method_handler(
servicer.createSymbolicLink,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.CreateSymbolicLinkRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'copy': grpc.unary_unary_rpc_method_handler(
servicer.copy,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.CopyOperation.SerializeToString,
),
'cancel': grpc.unary_unary_rpc_method_handler(
servicer.cancel,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyOperationRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.SerializeToString,
),
'getStatus': grpc.unary_unary_rpc_method_handler(
servicer.getStatus,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyOperationRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.SerializeToString,
),
'rename': grpc.unary_unary_rpc_method_handler(
servicer.rename,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.RenameRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'delete': grpc.unary_unary_rpc_method_handler(
servicer.delete,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.DeleteRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'exists': grpc.unary_unary_rpc_method_handler(
servicer.exists,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Is.SerializeToString,
),
'readFromFile': grpc.unary_stream_rpc_method_handler(
servicer.readFromFile,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.ReadFromFileResponse.SerializeToString,
),
'writeToFile': grpc.stream_unary_rpc_method_handler(
servicer.writeToFile,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.WriteToFileRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'appendToFile': grpc.stream_unary_rpc_method_handler(
servicer.appendToFile,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.AppendToFileRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'list': grpc.unary_stream_rpc_method_handler(
servicer.list,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.ListRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.PathAttributes.SerializeToString,
),
'getAttributes': grpc.unary_unary_rpc_method_handler(
servicer.getAttributes,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.PathAttributes.SerializeToString,
),
'getWorkingDirectory': grpc.unary_unary_rpc_method_handler(
servicer.getWorkingDirectory,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Path.SerializeToString,
),
'setWorkingDirectory': grpc.unary_unary_rpc_method_handler(
servicer.setWorkingDirectory,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'setPosixFilePermissions': grpc.unary_unary_rpc_method_handler(
servicer.setPosixFilePermissions,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.SetPosixFilePermissionsRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'readSymbolicLink': grpc.unary_unary_rpc_method_handler(
servicer.readSymbolicLink,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Path.SerializeToString,
),
'getPathSeparator': grpc.unary_unary_rpc_method_handler(
servicer.getPathSeparator,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.GetPathSeparatorResponse.SerializeToString,
),
'isOpen': grpc.unary_unary_rpc_method_handler(
servicer.isOpen,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Is.SerializeToString,
),
'close': grpc.unary_unary_rpc_method_handler(
servicer.close,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'waitUntilDone': grpc.unary_unary_rpc_method_handler(
servicer.waitUntilDone,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.WaitUntilDoneRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.SerializeToString,
),
'localFileSystems': grpc.unary_unary_rpc_method_handler(
servicer.localFileSystems,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystems.SerializeToString,
),
'listFileSystems': grpc.unary_unary_rpc_method_handler(
servicer.listFileSystems,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystems.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'xenon.FileSystemService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class SchedulerServiceStub(object):
"""The Jobs API of Xenon. This interface creates various methods for creating and closing Schedulers, submitting jobs, and retrieving information about schedulers and jobs.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.getAdaptorDescriptions = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorDescriptions',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAdaptorDescriptions.FromString,
)
self.getAdaptorNames = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorNames',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorNames.FromString,
)
self.getAdaptorDescription = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorDescription',
request_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAdaptorDescription.FromString,
)
self.create = channel.unary_unary(
'/xenon.SchedulerService/create',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CreateSchedulerRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
)
self.getAdaptorName = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorName',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.FromString,
)
self.getLocation = channel.unary_unary(
'/xenon.SchedulerService/getLocation',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Location.FromString,
)
self.getCredential = channel.unary_unary(
'/xenon.SchedulerService/getCredential',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetCredentialResponse.FromString,
)
self.getProperties = channel.unary_unary(
'/xenon.SchedulerService/getProperties',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Properties.FromString,
)
self.submitBatchJob = channel.unary_unary(
'/xenon.SchedulerService/submitBatchJob',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SubmitBatchJobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Job.FromString,
)
self.submitInteractiveJob = channel.stream_stream(
'/xenon.SchedulerService/submitInteractiveJob',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SubmitInteractiveJobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.SubmitInteractiveJobResponse.FromString,
)
self.getQueueNames = channel.unary_unary(
'/xenon.SchedulerService/getQueueNames',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Queues.FromString,
)
self.getDefaultQueueName = channel.unary_unary(
'/xenon.SchedulerService/getDefaultQueueName',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Queue.FromString,
)
self.getJobs = channel.unary_unary(
'/xenon.SchedulerService/getJobs',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAndQueues.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Jobs.FromString,
)
self.getJobStatus = channel.unary_unary(
'/xenon.SchedulerService/getJobStatus',
request_serializer=xenon_dot_proto_dot_xenon__pb2.JobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.getJobStatuses = channel.unary_unary(
'/xenon.SchedulerService/getJobStatuses',
request_serializer=xenon_dot_proto_dot_xenon__pb2.GetJobStatusesRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetJobStatusesResponse.FromString,
)
self.getQueueStatus = channel.unary_unary(
'/xenon.SchedulerService/getQueueStatus',
request_serializer=xenon_dot_proto_dot_xenon__pb2.GetQueueStatusRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.QueueStatus.FromString,
)
self.getQueueStatuses = channel.unary_unary(
'/xenon.SchedulerService/getQueueStatuses',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAndQueues.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.QueueStatuses.FromString,
)
self.waitUntilDone = channel.unary_unary(
'/xenon.SchedulerService/waitUntilDone',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WaitRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.waitUntilRunning = channel.unary_unary(
'/xenon.SchedulerService/waitUntilRunning',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WaitRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.isOpen = channel.unary_unary(
'/xenon.SchedulerService/isOpen',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Is.FromString,
)
self.cancelJob = channel.unary_unary(
'/xenon.SchedulerService/cancelJob',
request_serializer=xenon_dot_proto_dot_xenon__pb2.JobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.getDefaultRuntime = channel.unary_unary(
'/xenon.SchedulerService/getDefaultRuntime',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetDefaultRuntimeResponse.FromString,
)
self.getFileSystem = channel.unary_unary(
'/xenon.SchedulerService/getFileSystem',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
)
self.close = channel.unary_unary(
'/xenon.SchedulerService/close',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.localScheduler = channel.unary_unary(
'/xenon.SchedulerService/localScheduler',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
)
self.listSchedulers = channel.unary_unary(
'/xenon.SchedulerService/listSchedulers',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Schedulers.FromString,
)
class SchedulerServiceServicer(object):
"""The Jobs API of Xenon. This interface creates various methods for creating and closing Schedulers, submitting jobs, and retrieving information about schedulers and jobs.
"""
def getAdaptorDescriptions(self, request, context):
"""Gives a list of the descriptions of the available adaptors.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAdaptorNames(self, request, context):
"""Gives a list names of the available adaptors.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAdaptorDescription(self, request, context):
"""Gives the description of the adaptor with the given name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def create(self, request, context):
"""Create a new Scheduler using the adaptor connecting to the location using credentials to get access.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAdaptorName(self, request, context):
"""Get the name of the adaptor that created this Scheduler.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getLocation(self, request, context):
"""Get the location that this Scheduler is connected to.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getCredential(self, request, context):
"""Get the credential used to create this Scheduler.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getProperties(self, request, context):
"""Get the properties used to create this Scheduler.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def submitBatchJob(self, request, context):
"""Submit a batch job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def submitInteractiveJob(self, request_iterator, context):
"""Submit an interactive job
The first response message in the response stream will contain the job identifier and empty stdout and stdout.
Other response messages will also contain the job identifier and filled stdout and/or stderr.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getQueueNames(self, request, context):
"""Get the queue names supported by this Scheduler.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getDefaultQueueName(self, request, context):
"""Get the name of the default queue.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getJobs(self, request, context):
"""Get all job identifier of jobs currently in (one ore more) queues.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getJobStatus(self, request, context):
"""Get the status of a Job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getJobStatuses(self, request, context):
"""Get the status of all specified jobs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getQueueStatus(self, request, context):
"""Get the status of the queue.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getQueueStatuses(self, request, context):
"""Get the status of all queues.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def waitUntilDone(self, request, context):
"""Wait until a job is done or until a timeout expires.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def waitUntilRunning(self, request, context):
"""Wait until a job starts running, or until a timeout expires.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def isOpen(self, request, context):
"""Test if the connection of this Scheduler is open.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def cancelJob(self, request, context):
"""Cancel a job
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getDefaultRuntime(self, request, context):
"""Get the default runtime of a job in minutes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getFileSystem(self, request, context):
"""Retrieve the FileSystem used internally by this Scheduler.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def close(self, request, context):
"""Close this Scheduler.
If scheduler is embedded then any pending/running jobs will be killed
Will also forget this scheduler
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def localScheduler(self, request, context):
"""Get scheduler on local filesystem with default location, credential and no properties
Not part of Scheduler class in Xenon library
In Xenon library available as Scheduler.create("local")
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def listSchedulers(self, request, context):
"""List the created schedulers
Specific to grpc, not part of Xenon library
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SchedulerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'getAdaptorDescriptions': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorDescriptions,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAdaptorDescriptions.SerializeToString,
),
'getAdaptorNames': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorNames,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorNames.SerializeToString,
),
'getAdaptorDescription': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorDescription,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAdaptorDescription.SerializeToString,
),
'create': grpc.unary_unary_rpc_method_handler(
servicer.create,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.CreateSchedulerRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
),
'getAdaptorName': grpc.unary_unary_rpc_method_handler(
servicer.getAdaptorName,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.SerializeToString,
),
'getLocation': grpc.unary_unary_rpc_method_handler(
servicer.getLocation,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Location.SerializeToString,
),
'getCredential': grpc.unary_unary_rpc_method_handler(
servicer.getCredential,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.GetCredentialResponse.SerializeToString,
),
'getProperties': grpc.unary_unary_rpc_method_handler(
servicer.getProperties,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Properties.SerializeToString,
),
'submitBatchJob': grpc.unary_unary_rpc_method_handler(
servicer.submitBatchJob,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.SubmitBatchJobRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Job.SerializeToString,
),
'submitInteractiveJob': grpc.stream_stream_rpc_method_handler(
servicer.submitInteractiveJob,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.SubmitInteractiveJobRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.SubmitInteractiveJobResponse.SerializeToString,
),
'getQueueNames': grpc.unary_unary_rpc_method_handler(
servicer.getQueueNames,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Queues.SerializeToString,
),
'getDefaultQueueName': grpc.unary_unary_rpc_method_handler(
servicer.getDefaultQueueName,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Queue.SerializeToString,
),
'getJobs': grpc.unary_unary_rpc_method_handler(
servicer.getJobs,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAndQueues.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Jobs.SerializeToString,
),
'getJobStatus': grpc.unary_unary_rpc_method_handler(
servicer.getJobStatus,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.JobRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.SerializeToString,
),
'getJobStatuses': grpc.unary_unary_rpc_method_handler(
servicer.getJobStatuses,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.GetJobStatusesRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.GetJobStatusesResponse.SerializeToString,
),
'getQueueStatus': grpc.unary_unary_rpc_method_handler(
servicer.getQueueStatus,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.GetQueueStatusRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.QueueStatus.SerializeToString,
),
'getQueueStatuses': grpc.unary_unary_rpc_method_handler(
servicer.getQueueStatuses,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAndQueues.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.QueueStatuses.SerializeToString,
),
'waitUntilDone': grpc.unary_unary_rpc_method_handler(
servicer.waitUntilDone,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.WaitRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.SerializeToString,
),
'waitUntilRunning': grpc.unary_unary_rpc_method_handler(
servicer.waitUntilRunning,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.WaitRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.SerializeToString,
),
'isOpen': grpc.unary_unary_rpc_method_handler(
servicer.isOpen,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Is.SerializeToString,
),
'cancelJob': grpc.unary_unary_rpc_method_handler(
servicer.cancelJob,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.JobRequest.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.SerializeToString,
),
'getDefaultRuntime': grpc.unary_unary_rpc_method_handler(
servicer.getDefaultRuntime,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.GetDefaultRuntimeResponse.SerializeToString,
),
'getFileSystem': grpc.unary_unary_rpc_method_handler(
servicer.getFileSystem,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
),
'close': grpc.unary_unary_rpc_method_handler(
servicer.close,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
),
'localScheduler': grpc.unary_unary_rpc_method_handler(
servicer.localScheduler,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
),
'listSchedulers': grpc.unary_unary_rpc_method_handler(
servicer.listSchedulers,
request_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
response_serializer=xenon_dot_proto_dot_xenon__pb2.Schedulers.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'xenon.SchedulerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| NLeSC/pyxenon | xenon/proto/xenon_pb2_grpc.py | Python | apache-2.0 | 52,079 |
from django.urls import path
from . import views
app_name = 'tariff_app'
urlpatterns = [
path('', views.TariffsListView.as_view(), name='home'),
path('<int:tarif_id>/', views.edit_tarif, name='edit'),
path('<int:tarif_id>/users/', views.ServiceUsers.as_view(), name='service_users'),
path('add/', views.edit_tarif, name='add'),
path('del/<int:tid>/', views.TariffDeleteView.as_view(), name='del'),
path('periodic_pays/', views.PeriodicPaysListView.as_view(), name='periodic_pays'),
path('periodic_pays/add/', views.periodic_pay, name='periodic_pay_add'),
path('periodic_pays/<int:pay_id>/', views.periodic_pay, name='periodic_pay_edit')
]
| nerosketch/djing | tariff_app/urls.py | Python | unlicense | 675 |
#!/bin/env python
import sys
import uwhd
import time
import random
# Start up the GPIO for talking to the 32x32 panels
io = uwhd.GPIO()
if not io.Init():
print 'Try running this as:'
print ''
print '$ sudo PYTHONPATH=path/to/build/lib python uwhd-demo.py'
sys.exit(-1)
print "GPIO inited"
# number of rows of pixels on a display
# (always 32 for the 7.5" displays from SparkFun)
rows = 32
# length of the display chain
# (always 3 for v1 and v2 of the game display)
chained_displays = 3
# number of parallel rows of display chains
# (always 1 for v1 and v2 of the game clock boards)
parallel_rows = 1
# Create the display driver
matrix = uwhd.RGBMatrix(io, rows, chained_displays, parallel_rows)
print "Built RGBMatrix"
if False:
print "Make it purple with twinkles!"
matrix.Fill(128, 0, 255)
for i in range(0, 10000):
x = random.randint(0, 96)
y = random.randint(0, 96)
if random.getrandbits(1):
matrix.SetPixel(x, y, 255,255,255)
else:
matrix.SetPixel(x, y, 0,0,0)
# Create the Game Display object, which maintains its own version of the
# game state, and renders it onto the external 32x32 panels on the front
# of the clock, as well as the alphanumeric and 7-segment displays on the
# back.
gd = uwhd.GameDisplay(matrix)
print "Built GameDisplay"
# Fire up the Game Display thread, which is responsible for
# continuously drawing the state to the 32x32 boards
gd.Start0()
print "Started GameDisplay"
# Fetch the GameManager so we can edit the game state
mgr = gd.getMgr2()
mgr.setGameStateFirstHalf()
mgr.setGameClockRunning(0)
mgr.setBlackScore(1)
mgr.setWhiteScore(4)
mgr.setGameClock(135)
print "Set up GameModel"
xbee = uwhd.CreateXBeeSyncServer()
print "created xbee sync"
xbee.Init()
print "inited xbee sync"
xbee.setMgr(mgr)
print "added the mgr"
print "starting the game clock:"
mgr.setGameClockRunning(1)
print "probe the mgr for model updates:"
for i in range(0, 100):
print ""
print " BlackScore: %d" %(mgr.blackScore(),)
print " WhiteScore: %d" %(mgr.whiteScore(),)
print " Time: %ds" %(mgr.gameClock(),)
print " ClockRunning: %s" %("YES" if mgr.gameClockRunning() else "NO",)
time.sleep(1)
print "Quitting"
| jroelofs/uwh-display | tools/uwhd-demo-py/uwhd-demo.py | Python | bsd-3-clause | 2,220 |
# Copyright (C) 2018 Camptocamp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': """Oneshot cron""",
'summary': """Allows creating of single-use disposable crons.""",
'category': "Extra Tools",
'version': "11.0.1.0.0",
'author': "Camptocamp, "
"Odoo Community Association (OCA)",
'website': "https://github.com/OCA/server-tools",
'license': "AGPL-3",
'data': [
'data/ir_sequence.xml',
'data/ir_cron.xml',
'views/ir_cron.xml',
],
}
| brain-tec/server-tools | base_cron_oneshot/__manifest__.py | Python | agpl-3.0 | 530 |
class Particle:
"""
Class to represent a single particle.
Keep track of the start location and path of a particle.
"""
def __init__(self, id, base_time, timestep, start_lat, start_lon):
self.id = id
self.base_time = base_time
self.timestep = timestep
self.curr_lat = start_lat
self.curr_lon = start_lon
# The previously calculated i, j. Used to take a guess at the current i, j.
self.prev_i = None
self.prev_j = None
# Interpolation functions used when getting velocities between two points.
self.interp_f_u = None
self.interp_f_v = None
self.path_lat_lon = [(0, start_lat, start_lon)]
def __str__(self):
s = "id: {}, base time: {}, timestep: {}, ".format(self.id, self.base_time, self.timestep)
s += "curr lat: {}, curr lon: {}, ".format(self.curr_lat, self.curr_lon)
s += "prev i: {}, prev j: {}".format(self.prev_i, self.prev_j)
return s
def update_position(self, grid, disps, times):
"""
The particle has been moved. Convert displacements to lat lon, save and
calculate the new position.
Argument displs is a set of x, y pairs from the current location,
argument times is the time in seconds after the base_time at which the
particle had the corrosponding displacement.
"""
assert(len(disps) == len(times))
assert(disps[0][0] == 0 and disps[0][1] == 0)
# Displacements is accumulated distance from the start point, we want
# relative distance from the previous point. Calculate this separately
# for clarity.
d = [d_next - d for d_next, d in zip(disps[1:], disps[:-1])]
lat = self.curr_lat
lon = self.curr_lon
for x, t in zip(d, times[1:]):
(lat, lon) = grid.lat_lon_plus_displacement(lat, lon, x[0], x[1])
self.path_lat_lon.append((t, lat, lon))
self.curr_lat = lat
self.curr_lon = lon
| nicjhan/mom-particles | particle.py | Python | gpl-2.0 | 2,027 |
"""
add workbench username to user
Revision ID: 8cf88e6f71d
Revises: 30b4cd1b3ebf
Create Date: 2015-04-08 14:32:01.562070
"""
# revision identifiers, used by Alembic.
revision = '8cf88e6f71d'
down_revision = '30b4cd1b3ebf'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Upgrade the database from nothing
:return: nothing
"""
op.add_column('users', sa.Column('workbench_username', sa.String(length=255), nullable=True))
def downgrade():
"""
Downgrade the database
:return: nothing
"""
op.drop_column('users', 'workbench_username')
| NERC-CEH/jules-jasmin | majic/scripts/versions/8cf88e6f71d_add_workbench_username.py | Python | gpl-2.0 | 636 |
#!/usr/local/bin/python
"""
Web app for displaying survey results
"""
import json
import pandas as pd
from flask import Flask, render_template
from flask_wtf import Form
from wtforms import SelectMultipleField, SubmitField
app = Flask(__name__)
app.secret_key = 'hard to guess string'
DF = pd.read_csv('static/data/responses_no_response_rate.csv',
names=['region', 'question', 'response', 'frequency'],
header=None)
REGIONS = DF.region.unique()
@app.route('/', methods=['GET', 'POST'])
def index():
"""
Mostly single-page web app, here the index is used to view the results
given form inputs. The default view is just "ALL", the whole region
together.
"""
# default, will append more if user selects more
regions = ["ALL"]
form = CountyForm()
if not form.regions.data:
regions = ['ALL']
else:
regions = form.regions.data
# don't allow more than three selections
if len(regions) > 3:
regions = regions[:3]
# data dict will be in the proper form for JSON data to be passed to templ
json_data = make_data_json(regions)
javascript = render_template('barchart.js', json_data=json_data,
n_regions=len(regions))
all_region_summaries = \
json.loads(open('static/data/region_summaries.json', 'r').read()[3:])
region_summaries = [(r, all_region_summaries[r])
for r in regions if r in all_region_summaries]
return render_template('index.html', javascript=javascript, form=form,
region_summaries=region_summaries)
#: Questions in the order requested by L
ORDERED_QUESTIONS = ['9', '8a', '18c', '18d', '18a']
QUESTION_LOOKUP = \
{'9': 'Indicate which of the following categories best fits your tillage practice',
'8a': 'I consider myself to be an aggressive adopter of conservation practices',
'18c': 'Human activities are the primary cause of climate change',
'18d': 'I will have to make serious changes to my farming operation to adjust to climate change',
'18a': 'I have observed changes in weather patterns over my lifeteime'}
def make_data_json(regions):
"""
Given the counties the user is interested in, select the rows from the
dataframe and return a JSON representation for use in the javascript
"""
regions_df = DF[DF.region.isin(regions)]
data_dict_list = []
# each dataset has a list of objects with question at the top level
for q in ORDERED_QUESTIONS:
q_data_dict = {}
q_data_dict.update(question=QUESTION_LOOKUP[q])
q_df = regions_df[regions_df.question == q]
# each question has region_responses: region name, and array of JSON
# tuples of response and percent responding that response
region_responses = []
for region in regions:
region_dict = dict(region=region)
reg_qst_df = q_df[q_df.region == region]
pct_response = reg_qst_df.frequency
pct_response = pct_response/pct_response.sum()
resp_pct = zip(reg_qst_df.response, pct_response)
region_dict.update(responses=\
[{'response': rp[0], 'frequency': rp[1]} for rp in resp_pct])
region_responses.append(region_dict)
q_data_dict.update(region_responses=region_responses)
data_dict_list.append(q_data_dict)
return json.dumps(data_dict_list)
def extract_():
"""
docstring for extract_
"""
pass
class CountyForm(Form):
"""Dropdown, multi select form to pick counties to copmare"""
regions = SelectMultipleField(u'Regions', choices=zip(REGIONS, REGIONS))
submit = SubmitField('Compare Regions')
if __name__ == '__main__':
app.run(debug=True)
| mtpain/decision_tool | app.py | Python | mit | 3,817 |
import factory
import mock
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.models import User
from django.test import TestCase
from lizard_map import coordinates
from lizard_neerslagradar import models
# Found by calling coordinates.rd_to_wgs84 on 150000 450000
TOREN_AMERSFOORT = (5.3143, 52.03830)
# Small area around it
AREA_AMERSFOORT = (
'POLYGON((5.2997 52.0472, 5.3289 52.0472, 5.3289 52.0293, '
'5.2997 52.0293, 5.2997 52.0472))')
class UserF(factory.Factory):
FACTORY_FOR = User
username = 'Remco'
class RegionF(factory.Factory):
FACTORY_FOR = models.Region
name = "Amersfoort"
geometry = AREA_AMERSFOORT
class TestRegion(TestCase):
def test_has_unicode(self):
self.assertTrue(
'naam' in unicode(RegionF.build(name='naam')))
def test_google_extent_works(self):
region = RegionF.build()
left, top, right, bottom = region.google_extent()
self.assertNotEquals(left, right)
self.assertNotEquals(top, bottom)
def test_extent_for_user_none_when_not_logged_in(self):
self.assertEquals(
None,
models.Region.extent_for_user(AnonymousUser()))
def test_extent_for_user_without_regions_is_none(self):
user = UserF.create()
self.assertEquals(
None,
models.Region.extent_for_user(user))
@mock.patch(
'lizard_neerslagradar.projections.coordinate_to_composite_pixel',
return_value=None) # No coordinates found
@mock.patch(
'lizard_neerslagradar.projections.topleft_of_composite_pixel',
return_value=(0, 0)) # Dummy return value
@mock.patch(
'lizard_neerslagradar.projections.bottomright_of_composite_pixel',
return_value=(0, 0)) # Dummy return value
def test_if_extent_outside_composite_uses_corners(
self, mocked_bottomright, mocked_topleft, mocked_to_composite):
user = UserF.create()
region = RegionF.create()
region.users.add(user)
with self.settings(COMPOSITE_CELLS=(500, 490)):
models.Region.extent_for_user(user)
# topleft and bottomright should have been called with topleft
# and bottomright pixels of the composite
mocked_topleft.assert_called_with(
0, 0, to_projection=coordinates.google_projection)
mocked_bottomright.assert_called_with(
499, 489, to_projection=coordinates.google_projection)
def test_superuser_gets_an_extent_even_without_regions(self):
user = User.objects.create_superuser(
'admin',
'test@example.com',
'some_password')
extent = models.Region.extent_for_user(user)
self.assertNotEquals(extent, None)
def test_extent_returned(self):
user = UserF.create()
region = RegionF.create()
region.users.add(user)
extent = models.Region.extent_for_user(user)
self.assertNotEquals(extent, None)
for key in ('left', 'bottom', 'right', 'top'):
self.assertTrue(key in extent)
self.assertTrue(isinstance(extent[key], str))
def test_region_returned_for_point_inside_it(self):
# We use the polygon defined in RegionF, (2 2) is inside it
region = RegionF.create()
regionfound = models.Region.find_by_point(TOREN_AMERSFOORT)
self.assertEquals(region, regionfound)
def test_region_not_returned_for_point_outside_it(self):
RegionF.create()
regionfound = models.Region.find_by_point((5, 5))
self.assertEquals(None, regionfound)
def test_region_returned_for_user(self):
RegionF.create(name='amersfoort1')
region2 = RegionF.create(name='amersfoort2')
user = UserF.create()
region2.users.add(user)
region2.save()
RegionF.create(name='amersfoort3')
regionfound = models.Region.find_by_point(TOREN_AMERSFOORT, user=user)
self.assertEquals(region2, regionfound)
| lizardsystem/lizard-neerslagradar | lizard_neerslagradar/tests/test_models.py | Python | gpl-3.0 | 4,024 |
#!/usr/bin/env python2.7
"""
this file generates the README.md file containing images and a tutorial on
secp256k1 operations.
"""
from grunt import *
secp256k1_eq = "y^2 = x^3 + 7"
md_file = "README.md"
import sys
markdown = True if "-m" in sys.argv else False
init_grunt_globals(markdown, md_file)
if markdown:
import os, errno
# create the img directory to store the graph and equation images in
try:
os.makedirs("img")
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# clear the md_file, ready for writing again
try:
os.remove(md_file)
except OSError as exception:
# errno.ENOENT = no such file or directory
if exception.errno != errno.ENOENT:
raise
##########decimal_places = 30
hr = """
--------------------------------------------------------------------------------
"""
# detect the best form of pretty printing available in this terminal
sympy.init_printing()
# document title
print hr
quick_write(
"""# visual secp256k1
visualise operations on the secp256k1 (bitcoin) elliptic curve
**this project is intended purely for understanding concepts. do not use it for
live crypto applications**
run like so:
./main.py -m
to generate this git markdown file and images. run without the `-m` (markdown)
flag to step through the tutorial image by image using matplotlib (enables
zooming) in your shell.
1. [point addition (infinite field)](#1-point-addition-infinite-field)
2. [subtraction and halving (infinite field)](#2-subtraction-and-halving-infinite-field)
3. [point addition (finite field)](#3-point-addition-finite-field) (TODO)
4. [subtraction and halving (finite field)](#4-subtraction-and-halving-finite-field) (TODO)
5. [bitcoin deterministic keys](#5-bitcoin-deterministic-keys) (TODO)
6. [signing a message](#6-signing-a-message) (TODO)
7. [verifying a message signature](#7-verifying-a-message-signature) (TODO)
8. [recovering a public key from a signature](#8-recovering-a-public-key-from-a-signature) (TODO)
9. [cracking a private key](#9-cracking-a-private-key) (TODO)"""
)
if markdown:
# a notice when running in markdown mode. not printed to markdown file
print "writing output to %s. graphs and equations stored in img/\n" \
% md_file
quick_write("the equation of the bitcoin elliptic curve is as follows:")
quick_equation(latex = secp256k1_eq)
quick_write("this equation is called `secp256k1` and looks like this:")
init_plot_ec(x_max = 7)
finalize_plot_ec("secp256k1")
quick_write(
"""### 1. point addition (infinite field)
to add two points on the elliptic curve, just draw a line through them and find
the third intersection with the curve, then mirror this third point about the
`x`-axis. for example, adding point `p` to point `q`:"""
)
init_plot_ec(x_max = 7)
xp = 5
yp_pos = False
yp = y_ec(xp, yp_pos)
p = (xp, yp)
xq = 1
yq_pos = False
yq = y_ec(xq, yq_pos)
q = (xq, yq)
plot_add(p, q, "p", "q", "p + q", color = "r")
finalize_plot_ec("point_addition1")
quick_write(
"""note that the third intersection with the curve can also lie between the
points being added:"""
)
init_plot_ec(x_max = 7)
xp = 6
yp_pos = False
yp = y_ec(xp, yp_pos)
p = (xp, yp)
xq = -1
yq_pos = True
yq = y_ec(xq, yq_pos)
q = (xq, yq)
plot_add(p, q, "p", "q", "p + q", color = "r")
finalize_plot_ec("point_addition2")
quick_write("try moving point `q` towards point `p` along the curve:")
init_plot_ec(x_max = 7, color = "y")
xp = 5
yp_pos = False
yp = y_ec(xp, yp_pos)
p = (xp, yp)
xq1 = 0
yq1_pos = False
yq1 = y_ec(xq1, yq1_pos)
q1 = (xq1, yq1)
plot_add(p, q1, "p", "", "", color = "r")
xq2 = 1
yq2_pos = False
yq2 = y_ec(xq2, yq2_pos)
q2 = (xq2, yq2)
plot_add(p, q2, "", "", "", color = "m")
xq3 = 4
yq3_pos = False
yq3 = y_ec(xq3, yq3_pos)
q3 = (xq3, yq3)
plot_add(p, q3, "", "", "", color = "g")
finalize_plot_ec("point_addition3")
quick_write(
"""clearly as `q` approaches `p`, the line between `q` and `p` approaches the
tangent at `p`. and at `q = p` this line *is* the tangent. so a point can be
added to itself (`p + p`, ie `2p`) by finding the tangent to the curve at that
point and the third intersection with the curve:"""
)
init_plot_ec(x_max = 5)
xp = 2
yp_pos = False
yp = y_ec(xp, yp_pos)
p = (xp, yp)
plot_add(p, p, "p", "", "2p", color = "r")
finalize_plot_ec("point_doubling1")
# you can change this to anything and it will still work (though some values
# will give coordinates of 4p which are too large for matplotlib to compute
# and graph)
xp = 10
yp_pos = True
quick_write(
"""ok, but so what? when you say 'add points on the curve' is this just fancy
mathematical lingo, or does this form of addition work like regular addition?
for example does `p + p + p + p = 2p + 2p` on the curve?
to answer that, lets check with `p` at `x = %s` in the %s half of the curve:"""
% (xp, "top" if yp_pos else "bottom")
)
def plot_4p(xp, yp_pos, labels_on = True):
global plt
# first calculate the rightmost x coordinate for the plot area
yp = y_ec(xp, yp_pos)
p = (xp, yp)
two_p = add_points(p, p)
three_p = add_points(p, two_p)
four_p = add_points(p, three_p)
(x2p, y2p) = two_p
(x3p, y3p) = three_p
(x4p, y4p) = four_p
rightmost_x = max(xp, x2p, x3p, x4p)
init_plot_ec(rightmost_x + 2, color = "y")
plot_add(p, p, "p", "p", "2p", color = "r", labels_on = labels_on)
plot_add(p, two_p, "p", "2p", "3p", color = "c", labels_on = labels_on)
plot_add(p, three_p, "p", "3p", "4p", color = "g", labels_on = labels_on)
plot_add(two_p, two_p, "2p", "2p", "4p", color = "b", labels_on = labels_on)
plot_4p(xp, yp_pos)
finalize_plot_ec("4p1")
quick_write(
"""notice how the tangent to `2p` and the line through `p` and `3p` both result
in the same intersection with the curve. lets zoom in to check:"""
)
plot_4p(xp, yp_pos, labels_on = False)
plt.axis([-2, 0, -3, 3]) # xmin, xmax, ymin, ymax
finalize_plot_ec("4p1_zoom")
xp = 4
yp_pos = False
quick_write(
"""ok they sure seem to converge on the same point, but maybe `x = 10` is just a
special case? does point addition work for other values of `x`?
lets try `x = %s` in the %s half of the curve:"""
% (xp, "top" if yp_pos else "bottom")
)
plot_4p(xp, yp_pos)
finalize_plot_ec("4p2")
quick_write("so far so good. zooming in:")
plot_4p(xp, yp_pos, labels_on = False)
plt.axis([-0.6, 0.3, -3.5, -1.5]) # xmin, xmax, ymin, ymax
finalize_plot_ec("4p2_zoom")
xp = 3
yp_pos = True
quick_write("""cool. lets do one last check using point `x = %s` in the %s half
of the curve:"""
% (xp, "top" if yp_pos else "bottom")
)
plot_4p(xp, yp_pos)
finalize_plot_ec("4p3")
xp = 10
yp_pos = True
quick_write(
"""well, this point addition on the bitcoin elliptic curve certainly
works in the graphs. but what if the graphs are innaccurate? maybe the point
addition is only approximate and the graphs do not display the inaccuracy...
a more accurate way of testing whether point addition really does work would be
to compute the `x` and `y` coordinates at point `p + p + p + p` and also compute
the `x` and `y` coordinates at point `2p + 2p` and see if they are identical.
lets check for `x = %s` with y in the %s half of the curve:"""
% (xp, "top" if yp_pos else "bottom")
)
# p + p + p + p
yp = y_ec(xp, yp_pos)
p = (xp, yp)
two_p = add_points(p, p)
three_p = add_points(p, two_p)
four_p = add_points(p, three_p)
quick_write(" p + p + p + p = %s" % (four_p, ))
# 2p + 2p
two_p_plus_2p = add_points(two_p, two_p)
quick_write(" 2p + 2p = %s" % (two_p_plus_2p, ))
yp_pos = False
quick_write(
"""cool! clearly they are identical :) however lets check the more
general case where `x` at point `p` is a variable in the %s half of the
curve:"""
% ("top" if yp_pos else "bottom")
)
xp = sympy.symbols("x_p")
yp = y_ec(xp, yp_pos)
p = (xp, yp)
two_p = add_points(p, p)
three_p = add_points(p, two_p)
four_p = add_points(p, three_p)
(x4p, y4p) = four_p
quick_write("at `p + p + p + p`, `x` is computed as:")
quick_equation(
eq = x4p.simplify(),
latex = "x_{(p+p+p+p)} = %s" % sympy.latex(x4p.simplify())
)
quick_write("and `y` is computed as:")
quick_equation(
eq = y4p.simplify(),
latex = "y_{(p+p+p+p)} = %s" % sympy.latex(y4p.simplify())
)
two_p_plus_2p = add_points(two_p, two_p)
(x2p_plus_2p, y2p_plus_2p) = two_p_plus_2p
quick_write("at `2p + 2p`, `x` is computed as:")
quick_equation(
eq = x2p_plus_2p.simplify(),
latex = "x_{(2p+2p)} = %s" % sympy.latex(x2p_plus_2p.simplify())
)
quick_write("and `y` is computed as:")
quick_equation(
eq = y2p_plus_2p.simplify(),
latex = "y_{(2p+2p)} = %s" % sympy.latex(y2p_plus_2p.simplify())
)
quick_write(
"""compare these results and you will see that that they are
identical. this means that addition and multiplication of points on the bitcoin
elliptic curve really does work the same way as regular addition and
multiplication!
%s
### 2. subtraction and halving (infinite field)
just as points can be added together and doubled and on the bitcoin elliptic, so
they can also be subtracted and halved. subtraction is simply the reverse of
addition - ie if we add point `q` to point `p` and arrive at point `r` then
logically if we subtract point `q` from point `r` we should arrive back at `p`:
`p + q = r`, therefore (subtracting `q` from both sides): `p = r - q`. another
way of writing this is `r + (-q) = p`. but what is `-q`? it is simply the
mirroring of point `q` about the `x`-axis:"""
% hr
)
init_plot_ec(x_max = 7)
xp = 5
yp_pos = False
yp = y_ec(xp, yp_pos)
p = (xp, yp)
xq = 1
yq_pos = False
yq = y_ec(xq, yq_pos)
q = (xq, yq)
r = add_points(p, q)
plot_add(p, q, "p", "q", "r", color = "r")
plot_subtract(r, q, "", "-q", "", color = "g")
finalize_plot_ec("point_subtraction1")
quick_write(
"""clearly, subtracting point `q` from point `r` does indeed result in point
`p` - back where we started.
so if subtraction is possible on the bitcoin elliptic curve, then how about
division? well we have already seen how a point can be added to itself - ie a
doubling (`p + p = 2p`), so the converse must also hold true. to get from point
`2p` back to point `p` constitutes a halving operation. but is it possible?
while it is certainly possible to find the tangent to the curve which passes
through a given point, it must be noted that there exist 2 such tangents - one
in the top half of the curve and one in the bottom:"""
)
# works best if you pick a value between cuberoot(-7) and -0.5
x2p = -1.7
y2p_pos = False # 2p is below the x-axis
y2p = y_ec(x2p, y2p_pos)
two_p = (x2p, y2p)
y2q1_pos = False
half_p1 = half_point(two_p, y2q1_pos)
(half_p1_x, half_p1_y) = half_p1
y2q2_pos = True
half_p2 = half_point(two_p, y2q2_pos)
(half_p2_x, half_p2_y) = half_p2
x_max = max(x2p, half_p1_x, half_p2_x)
init_plot_ec(x_max = x_max + 2, color = "m")
plot_add(half_p1, half_p1, "p_1", "", "2p", color = "g")
plot_add(half_p2, half_p2, "p_2", "", "", color = "b")
finalize_plot_ec("point_halving1")
quick_write(
"""this means that it is not possible to conduct a point division
and arrive at a single solution on the bitcoin elliptic curve. note that this
conclusion does not apply to elliptic curves over a finite field, as we will see
later on.
%s
### 3. point addition (finite field)
### 4. subtraction and halving (finite field)
### 5. bitcoin master public keys
### 6. signing a message
### 7. verifying a message signature
### 8. recovering a public key from a signature
### 9. cracking a private key"""
% hr
)
# don't set k too high or it will produce huge numbers that cannot be
# computed and plotted. k = 7 seems to be about the limit for this simple
# script
# k = 7
# xp0 = 10
# yp_pos = True
# output = """
#plot the bitcoin elliptic curve and add point xp = %s (%s y) to itself %s times:
#
#""" % (xp0, "positive" if yp_pos else "negative", k)
# quick_write(output)
#
# # first calculate the rightmost x coordinate for the curve
# yp0 = y_ec(xp0, yp_pos)
# p = []
# p.append((xp0, yp0))
# rightmost_x = xp0 # init
# for i in xrange(1, k + 1):
# p.append(add_points(p[0], p[i - 1]))
# (xpi, ypi) = p[i]
# if xpi > rightmost_x:
# rightmost_x = xpi
#
# init_plot_ec(rightmost_x + 2)
# for i in xrange(1, k + 1):
# # alternate between red and green - makes it easier to distinguish
# # addition lines
# color = "g" if (i % 2) else "r"
# plot_add(p[0], p[i - 1], "p", "" % (), "%sp" % (i + 1), color = color)
# finalize_plot_ec(True if markdown else False, "graph2")
| mulllhausen/visual-secp256k1 | main.py | Python | gpl-2.0 | 12,311 |
##########################################################################
#
# Copyright (c) 2021, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import Gaffer
import GafferSceneUI
if os.environ.get( "GAFFERAPPLESEED_HIDE_UI", "" ) != "1" :
Gaffer.Metadata.registerValue( GafferSceneUI.LightEditor.Settings, "attribute", "preset:Appleseed", "as:light" )
# Default to showing Appleseed lights, since that is the renderer we ship with.
Gaffer.Metadata.registerValue( GafferSceneUI.LightEditor.Settings, "attribute", "userDefault", "as:light" )
with IECore.IgnoredExceptions( ImportError ) :
# This import appears unused, but it is intentional; it prevents us from
# adding the OSL lights when 3Delight isn't available.
import GafferDelight
import GafferOSL
shader = GafferOSL.OSLShader()
for light in [
"maya/osl/pointLight",
"maya/osl/spotLight",
"maya/osl/distantLight",
"maya/osl/environmentLight"
] :
shader.loadShader( light )
for parameter in shader["parameters"] :
GafferSceneUI.LightEditor.registerParameter(
"osl:light", parameter.getName(),
shader.parameterMetadata( parameter, "page" )
)
Gaffer.Metadata.registerValue( GafferSceneUI.LightEditor.Settings, "attribute", "preset:OSL", "osl:light" )
# If 3Delight is available, then assume it will be used in preference to Appleseed.
Gaffer.Metadata.registerValue( GafferSceneUI.LightEditor.Settings, "attribute", "userDefault", "osl:light" )
with IECore.IgnoredExceptions( ImportError ) :
import GafferArnold
Gaffer.Metadata.registerValue( GafferSceneUI.LightEditor.Settings, "attribute", "preset:Arnold", "ai:light" )
# If Arnold is available, then assume it is the renderer of choice.
Gaffer.Metadata.registerValue( GafferSceneUI.LightEditor.Settings, "attribute", "userDefault", "ai:light" )
| hradec/gaffer | startup/gui/lightEditor.py | Python | bsd-3-clause | 3,479 |
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Variable trace objects.
Variable traces indicate the flow of variables and merges their versions for
the SSA (Single State Assignment) form being used in Nuitka.
Variable version can start as:
* Unknown (maybe initialized, maybe not, we cannot know)
* Uninit (definitely not initialized, first version, or after "del" statement)
* Init (definitely initialized, e.g. parameter variables)
* Merge (result of diverged code paths)
"""
from logging import debug
from nuitka.utils import InstanceCounters
class VariableTraceBase:
# We are going to have many instance attributes, pylint: disable=R0902
@InstanceCounters.counted_init
def __init__(self, owner, variable, version, previous):
self.owner = owner
self.variable = variable
self.version = version
# Definite usage indicator.
self.usage_count = 0
# Potential usages indicator that an assignment value may be used.
self.has_potential_usages = False
# If False, this indicates the trace has no explicit releases.
self.has_releases = False
# If False, this indicates, the variable name needs to be assigned.
self.has_name_usages = False
# If False, this indicates that the value is not yet escaped.
self.is_escaped = False
# Previous trace this is replacing.
self.previous = previous
__del__ = InstanceCounters.counted_del()
def getVariable(self):
return self.variable
def getVersion(self):
return self.version
def addUsage(self):
self.usage_count += 1
def addPotentialUsage(self):
self.has_potential_usages = True
def addRelease(self):
self.has_releases = True
def addNameUsage(self):
self.usage_count += 1
self.has_name_usages = True
def onValueEscape(self):
self.is_escaped = True
def isEscaped(self):
return self.is_escaped
def hasDefiniteUsages(self):
return self.usage_count > 0
def getDefiniteUsages(self):
return self.usage_count
def hasPotentialUsages(self):
return self.has_potential_usages
def hasNameUsages(self):
return self.has_name_usages
def getPrevious(self):
return self.previous
@staticmethod
def isAssignTrace():
return False
@staticmethod
def isUninitTrace():
return False
@staticmethod
def isInitTrace():
return False
@staticmethod
def isUnknownTrace():
return False
@staticmethod
def isMergeTrace():
return False
def mustHaveValue(self):
# TODO: Temporarily disable far reaching of assumptions, until value
# escaping can be trusted.
if self.variable.isModuleVariable() or \
self.variable.isMaybeLocalVariable() or \
self.variable.isSharedTechnically() is not False:
return False
# Merge traces have this overloaded.
return self.isInitTrace() or self.isAssignTrace()
def mustNotHaveValue(self):
if self.variable.isModuleVariable() or \
self.variable.isSharedTechnically() is not False:
return False
return self.isUninitTrace()
def getReplacementNode(self, usage):
# Virtual method, pylint: disable=R0201,W0613
return None
def hasShapeDictionaryExact(self):
# Virtual method, pylint: disable=R0201
return False
class VariableTraceUninit(VariableTraceBase):
def __init__(self, owner, variable, version, previous):
VariableTraceBase.__init__(
self,
owner = owner,
variable = variable,
version = version,
previous = previous
)
def __repr__(self):
return "<VariableTraceUninit {variable} {version}>".format(
variable = self.variable,
version = self.version
)
@staticmethod
def isUninitTrace():
return True
def dump(self):
debug(
"Trace of %s %d:",
self.variable,
self.version
)
debug(" Starts out uninitialized")
if self.usage_count:
debug(" -> has %s usages" % self.usage_count)
if self.is_escaped:
debug(" -> value escapes")
if self.has_releases:
debug(" -> has released")
class VariableTraceInit(VariableTraceBase):
def __init__(self, owner, variable, version):
VariableTraceBase.__init__(
self,
owner = owner,
variable = variable,
version = version,
previous = None
)
def __repr__(self):
return "<VariableTraceInit {variable} {version}>".format(
variable = self.variable,
version = self.version
)
def dump(self):
debug(
"Trace of %s %d:",
self.variable,
self.version
)
debug(" Starts initialized")
if self.usage_count:
debug(" -> has %s usages" % self.usage_count)
if self.is_escaped:
debug(" -> value escapes")
if self.has_releases:
debug(" -> has released")
@staticmethod
def isInitTrace():
return True
class VariableTraceUnknown(VariableTraceBase):
def __init__(self, owner, variable, version, previous):
VariableTraceBase.__init__(
self,
owner = owner,
variable = variable,
version = version,
previous = previous
)
def __repr__(self):
return "<VariableTraceUnknown {variable} {version}>".format(
variable = self.variable,
version = self.version
)
def dump(self):
debug(
"Trace of %s %d:",
self.variable,
self.version
)
debug(" Starts unknown")
if self.usage_count:
debug(" -> has %s usages" % self.usage_count)
if self.is_escaped:
debug(" -> value escapes")
if self.has_releases:
debug(" -> has released")
@staticmethod
def isUnknownTrace():
return True
def addUsage(self):
self.usage_count += 1
if self.previous is not None:
self.previous.addPotentialUsage()
def addNameUsage(self):
self.addUsage()
if self.previous is not None:
self.previous.addNameUsage()
def addPotentialUsage(self):
old = self.has_potential_usages
if not old:
self.has_potential_usages = True
if self.previous is not None:
self.previous.addPotentialUsage()
class VariableTraceAssign(VariableTraceBase):
def __init__(self, owner, assign_node, variable, version, previous):
VariableTraceBase.__init__(
self,
owner = owner,
variable = variable,
version = version,
previous = previous
)
self.assign_node = assign_node
self.replace_it = None
def __repr__(self):
return """\
<VariableTraceAssign {variable} {version} at {source_ref}>""".format(
variable = self.variable,
version = self.version,
source_ref = self.assign_node.getSourceReference().getAsString()
)
def dump(self):
debug("Trace of %s %d:",
self.variable,
self.version)
debug(" Starts assigned")
if self.usage_count:
debug(" -> has %s usages" % self.usage_count)
if self.is_escaped:
debug(" -> value escapes")
if self.has_releases:
debug(" -> has released")
@staticmethod
def isAssignTrace():
return True
def getAssignNode(self):
return self.assign_node
def setReplacementNode(self, replacement):
self.replace_it = replacement
def getReplacementNode(self, usage):
if self.replace_it is not None:
return self.replace_it(usage)
else:
return None
def hasShapeDictionaryExact(self):
return self.assign_node.getAssignSource().hasShapeDictionaryExact()
class VariableTraceMerge(VariableTraceBase):
""" Merge of two or more traces.
Happens at the end of conditional blocks. This is "phi" in
SSA theory. Also used for merging multiple "return", "break" or
"continue" exits.
"""
def __init__(self, variable, version, traces):
VariableTraceBase.__init__(
self,
owner = traces[0].owner,
variable = variable,
version = version,
previous = tuple(traces)
)
def __repr__(self):
return """\
<VariableTraceMerge {variable} {version} of {previous}>""".format(
variable = self.variable,
version = self.version,
previous = tuple(previous.getVersion() for previous in self.previous)
)
@staticmethod
def isMergeTrace():
return True
def dump(self):
debug(
"Trace of %s %d:",
self.variable,
self.version
)
debug(
" Merge of %s",
" <-> ".join(self.previous),
)
def mustHaveValue(self):
# TODO: Temporarily disable far reaching of assumptions, until value
# escaping can be trusted.
if self.variable.isModuleVariable() or \
self.variable.isSharedTechnically() is not False:
return False
for previous in self.previous:
if not previous.isInitTrace() and not previous.isAssignTrace():
return False
return True
def mustNotHaveValue(self):
if self.variable.isModuleVariable() or \
self.variable.isSharedTechnically() is not False:
return False
for previous in self.previous:
if not previous.isUninitTrace():
return False
return True
def addUsage(self):
self.usage_count += 1
for previous in self.previous:
previous.addPotentialUsage()
def addNameUsage(self):
self.usage_count += 1
for previous in self.previous:
previous.addPotentialUsage()
previous.addNameUsage()
def addPotentialUsage(self):
old = self.has_potential_usages
if not old:
self.has_potential_usages = True
for previous in self.previous:
previous.addPotentialUsage()
def hasShapeDictionaryExact(self):
for previous in self.previous:
if not previous.hasShapeDictionaryExact():
return False
return True
class VariableTraceLoopMerge(VariableTraceBase):
""" Merge of loop wrap around with loop start value.
Happens at the start of loop blocks. This is for loop closed SSA, to
make it clear, that the entered value, cannot be trusted inside the
loop.
They will start out with just one previous, and later be updated with
all of the variable versions at loop continue times.
.
"""
def __init__(self, variable, version, previous):
VariableTraceBase.__init__(
self,
owner = previous.owner,
variable = variable,
version = version,
previous = previous
)
self.loop_finished = False
previous.addPotentialUsage()
def hasDefiniteUsages(self):
if not self.loop_finished:
return True
return self.usage_count > 0
def hasPotentialUsages(self):
if not self.loop_finished:
return True
return self.has_potential_usages
def hasNameUsages(self):
if not self.loop_finished:
return True
return self.has_name_usages
def getPrevious(self):
assert self.loop_finished
return self.previous
@staticmethod
def isMergeTrace():
return True
def addLoopContinueTraces(self, continue_traces):
self.previous.addPotentialUsage()
for continue_trace in continue_traces:
continue_trace.addPotentialUsage()
self.previous = (self.previous,) + tuple(continue_traces)
| fluxer/spm | nuitka/nuitka/optimizations/VariableTraces.py | Python | gpl-2.0 | 13,195 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import click
from snapcraft.internal import deprecations
from . import echo
from . import env
_CMD_DEPRECATED_REPLACEMENTS = {
"strip": "prime",
"upload": "push",
"history": "list-revisions",
}
_CMD_ALIASES = {
"registered": "list-registered",
"keys": "list-keys",
"revisions": "list-revisions",
"plugins": "list-plugins",
"collaborators": "edit-collaborators",
"extensions": "list-extensions",
}
_CMD_DEPRECATION_NOTICES = {"history": "dn4"}
class SnapcraftGroup(click.Group):
def get_command(self, ctx, cmd_name):
new_cmd_name = _CMD_DEPRECATED_REPLACEMENTS.get(cmd_name)
if new_cmd_name:
if _CMD_DEPRECATION_NOTICES.get(cmd_name):
deprecations.handle_deprecation_notice(
_CMD_DEPRECATION_NOTICES.get(cmd_name)
)
else:
echo.warning(
"DEPRECATED: Use {!r} instead of {!r}".format(
new_cmd_name, cmd_name
)
)
cmd = click.Group.get_command(self, ctx, new_cmd_name)
else:
cmd_name = _CMD_ALIASES.get(cmd_name, cmd_name)
cmd = click.Group.get_command(self, ctx, cmd_name)
return cmd
def list_commands(self, ctx):
commands = super().list_commands(ctx)
# Let's keep edit-collaborators hidden until we get the green light
# from the store.
commands.pop(commands.index("edit-collaborators"))
# Inspect is for internal usage: hide it
commands.pop(commands.index("inspect"))
build_environment = env.BuilderEnvironmentConfig()
if build_environment.is_host:
commands.pop(commands.index("refresh"))
return commands
| sergiusens/snapcraft | snapcraft/cli/_command_group.py | Python | gpl-3.0 | 2,446 |
#!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# SRCommand: parse SR command-line objects
#
import XenAPI
import sys, errno, syslog
import xs_errors
import xmlrpclib
import SR, VDI, util
import blktap2
import resetvdis
import os
import copy
NEEDS_VDI_OBJECT = [
"vdi_update", "vdi_create", "vdi_delete", "vdi_snapshot", "vdi_clone",
"vdi_resize", "vdi_resize_online", "vdi_attach", "vdi_detach",
"vdi_activate", "vdi_deactivate", "vdi_attach_from_config",
"vdi_generate_config", "vdi_compose",
"vdi_epoch_begin", "vdi_epoch_end" ]
# don't log the commands that spam the log file too much
NO_LOGGING = {
"iso": ["sr_scan"],
"nfs_iso": ["sr_scan"]
}
EXCEPTION_TYPE = {
"sr_scan" : "SRScan",
"vdi_init" : "VDILoad",
"vdi_create" : "VDICreate",
"vdi_delete" : "VDIDelete",
"vdi_attach" : "VDIUnavailable",
"vdi_detach" : "VDIUnavailable",
"vdi_activate" : "VDIUnavailable",
"vdi_deactivate" : "VDIUnavailable",
"vdi_resize" : "VDIResize",
"vdi_resize_online" : "VDIResize",
"vdi_snapshot" : "VDISnapshot",
"vdi_clone" : "VDIClone",
}
class SRCommand:
def __init__(self, driver_info):
self.dconf = ''
self.type = ''
self.sr_uuid = ''
self.cmdname = ''
self.cmdtype = ''
self.cmd = None
self.args = None
self.driver_info = driver_info
def parse(self):
if len(sys.argv) <> 2:
util.SMlog("Failed to parse commandline; wrong number of arguments; argv = %s" % (repr(sys.argv)))
raise xs_errors.XenError('BadRequest')
# Debug logging of the actual incoming command from the caller.
# util.SMlog( "" )
# util.SMlog( "SM.parse: DEBUG: args = %s,\n%s" % \
# ( sys.argv[0], \
# util.splitXmlText( util.hideMemberValuesInXmlParams( \
# sys.argv[1] ), showContd=True ) ), \
# priority=syslog.LOG_DEBUG )
try:
params, methodname = xmlrpclib.loads(sys.argv[1])
self.cmd = methodname
params = params[0] # expect a single struct
self.params = params
# params is a dictionary
self.dconf = params['device_config']
if params.has_key('sr_uuid'):
self.sr_uuid = params['sr_uuid']
if params.has_key('vdi_uuid'):
self.vdi_uuid = params['vdi_uuid']
elif self.cmd == "vdi_create":
self.vdi_uuid = util.gen_uuid ()
except Exception, e:
util.SMlog("Failed to parse commandline; exception = %s argv = %s" % (str(e), repr(sys.argv)))
raise xs_errors.XenError('BadRequest')
def run_statics(self):
if self.params['command'] == 'sr_get_driver_info':
print util.sr_get_driver_info(self.driver_info)
sys.exit(0)
def run(self, sr):
try:
return self._run_locked(sr)
except (util.CommandException, util.SMException, XenAPI.Failure), e:
util.logException(self.cmd)
msg = str(e)
if isinstance(e, util.CommandException):
msg = "Command %s failed (%s): %s" % \
(e.cmd, e.reason, os.strerror(abs(e.code)))
excType = EXCEPTION_TYPE.get(self.cmd)
if not excType:
excType = "SMGeneral"
raise xs_errors.XenError(excType, opterr=msg)
except blktap2.TapdiskFailed, e:
util.logException('tapdisk failed exception: %s' % e)
raise xs_errors.XenError('TapdiskFailed',
os.strerror(e.get_error().get_error_code()))
except blktap2.TapdiskExists, e:
util.logException('tapdisk exists exception: %s' % e)
raise xs_errors.XenError('TapdiskAlreadyRunning', e.__str__())
except:
util.logException('generic exception: %s' % self.cmd)
raise
def _run_locked(self, sr):
lockSR = False
lockInitOnly = False
rv = None
e = None
if self.cmd in sr.ops_exclusive:
lockSR = True
elif self.cmd in NEEDS_VDI_OBJECT and "vdi_init" in sr.ops_exclusive:
lockInitOnly = True
target = None
acquired = False
if lockSR or lockInitOnly:
sr.lock.acquire()
acquired = True
try:
try:
if self.cmd in NEEDS_VDI_OBJECT:
target = sr.vdi(self.vdi_uuid)
finally:
if acquired and lockInitOnly:
sr.lock.release()
acquired = False
try:
rv = self._run(sr, target)
except Exception, e:
raise
finally:
if acquired:
sr.lock.release()
try:
sr.cleanup()
except Exception, e1:
msg = 'failed to clean up SR: %s' % e1
if not e:
util.SMlog(msg)
raise e1
else:
util.SMlog('WARNING: %s (error ignored)' % msg)
return rv
def _run(self, sr, target):
dconf_type = sr.dconf.get("type")
if not dconf_type or not NO_LOGGING.get(dconf_type) or \
not self.cmd in NO_LOGGING[dconf_type]:
if 'device_config' in self.params:
util.SMlog("%s %s" % (self.cmd, util.hidePasswdInParams(self.params,'device_config')))
else:
util.SMlog("%s %s" % (self.cmd, repr(self.params)))
caching_params = dict((k, self.params.get(k)) for k in \
[blktap2.VDI.CONF_KEY_ALLOW_CACHING,
blktap2.VDI.CONF_KEY_MODE_ON_BOOT,
blktap2.VDI.CONF_KEY_CACHE_SR,
blktap2.VDI.CONF_KEY_O_DIRECT])
if self.cmd == 'vdi_create':
# These are the fields owned by the backend, passed on the
# commandline:
# LVM SRs store their metadata in XML format. XML does not support
# all unicode characters, so we must check if the label or the
# description contain such characters. We must enforce this
# restriction to other SRs as well (even if they do allow these
# characters) in order to be consistent.
target.label = self.params['args'][1]
target.description = self.params['args'][2]
if not util.isLegalXMLString(target.label) \
or not util.isLegalXMLString(target.description):
raise xs_errors.XenError('IllegalXMLChar', \
opterr = 'The name and/or description you supplied contains one or more unsupported characters. The name and/or description must contain valid XML characters. See http://www.w3.org/TR/2004/REC-xml-20040204/#charsets for more information.')
target.ty = self.params['vdi_type']
target.metadata_of_pool = self.params['args'][3]
target.is_a_snapshot = self.params['args'][4] == "true"
target.snapshot_time = self.params['args'][5]
target.snapshot_of = self.params['args'][6]
target.read_only = self.params['args'][7] == "true"
return target.create(self.params['sr_uuid'], self.vdi_uuid, long(self.params['args'][0]))
elif self.cmd == 'vdi_update':
# Check for invalid XML characters, similar to VDI.create right
# above.
vdi_ref = sr.session.xenapi.VDI.get_by_uuid(self.vdi_uuid)
name_label = sr.session.xenapi.VDI.get_name_label(vdi_ref)
description = sr.session.xenapi.VDI.get_name_description(vdi_ref)
if not util.isLegalXMLString(name_label) \
or not util.isLegalXMLString(description):
raise xs_errors.XenError('IllegalXMLChar', \
opterr = 'The name and/or description you supplied contains one or more unsupported characters. The name and/or description must contain valid XML characters. See http://www.w3.org/TR/2004/REC-xml-20040204/#charsets for more information.')
return target.update(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_introduce':
target = sr.vdi(self.params['new_uuid'])
return target.introduce(self.params['sr_uuid'], self.params['new_uuid'])
elif self.cmd == 'vdi_delete':
return target.delete(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_attach':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
writable = self.params['args'][0] == 'true'
return target.attach(self.params['sr_uuid'], self.vdi_uuid, writable, caching_params = caching_params)
elif self.cmd == 'vdi_detach':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
return target.detach(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_snapshot':
return target.snapshot(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_clone':
return target.clone(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_resize':
return target.resize(self.params['sr_uuid'], self.vdi_uuid, long(self.params['args'][0]))
elif self.cmd == 'vdi_resize_online':
return target.resize_online(self.params['sr_uuid'], self.vdi_uuid, long(self.params['args'][0]))
elif self.cmd == 'vdi_activate':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
writable = self.params['args'][0] == 'true'
return target.activate(self.params['sr_uuid'], self.vdi_uuid,
writable, caching_params)
elif self.cmd == 'vdi_deactivate':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
return target.deactivate(self.params['sr_uuid'], self.vdi_uuid,
caching_params)
elif self.cmd == 'vdi_epoch_begin':
if caching_params.get(blktap2.VDI.CONF_KEY_MODE_ON_BOOT) != "reset":
return
if not "VDI_RESET_ON_BOOT/2" in self.driver_info['capabilities']:
raise xs_errors.XenError('Unimplemented')
return target.reset_leaf(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_epoch_end':
return
elif self.cmd == 'vdi_generate_config':
return target.generate_config(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_compose':
vdi1_uuid = sr.session.xenapi.VDI.get_uuid(self.params['args'][0])
return target.compose(self.params['sr_uuid'], vdi1_uuid, self.vdi_uuid)
elif self.cmd == 'vdi_attach_from_config':
try:
if self.params.has_key('allocation') \
and self.params['allocation'] == 'xlvhd':
os.environ['THIN_STATE_FILE_ATTACH'] = "true"
target.sr._write_vginfo(self.params['sr_uuid'])
ret = target.attach_from_config(self.params['sr_uuid'], self.vdi_uuid)
if not target.sr.driver_config.get("ATTACH_FROM_CONFIG_WITH_TAPDISK"):
return ret
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
return target.attach(self.params['sr_uuid'], self.vdi_uuid, True, True)
finally:
os.unsetenv('THIN_STATE_FILE_ATTACH')
elif self.cmd == 'sr_create':
return sr.create(self.params['sr_uuid'], long(self.params['args'][0]))
elif self.cmd == 'sr_delete':
return sr.delete(self.params['sr_uuid'])
elif self.cmd == 'sr_update':
return sr.update(self.params['sr_uuid'])
elif self.cmd == 'sr_probe':
txt = sr.probe()
util.SMlog( "sr_probe result: %s" % util.splitXmlText( txt, showContd=True ) )
# return the XML document as a string
return xmlrpclib.dumps((txt,), "", True)
elif self.cmd == 'sr_attach':
is_master = False
if sr.dconf.get("SRmaster") == "true":
is_master = True
resetvdis.reset_sr(sr.session, util.get_this_host(),
self.params['sr_uuid'], is_master)
if is_master:
# Schedule a scan only when attaching on the SRmaster
util.set_dirty(sr.session, self.params["sr_ref"])
return sr.attach(self.params['sr_uuid'])
elif self.cmd == 'sr_detach':
return sr.detach(self.params['sr_uuid'])
elif self.cmd == 'sr_content_type':
return sr.content_type(self.params['sr_uuid'])
elif self.cmd == 'sr_scan':
return sr.scan(self.params['sr_uuid'])
else:
util.SMlog("Unknown command: %s" % self.cmd)
raise xs_errors.XenError('BadRequest')
def run(driver, driver_info):
"""Convenience method to run command on the given driver"""
cmd = SRCommand(driver_info)
try:
cmd.parse()
cmd.run_statics()
sr = driver(cmd, cmd.sr_uuid)
sr.direct = True
ret = cmd.run(sr)
if ret == None:
print util.return_nil ()
else:
print ret
except (Exception, SR.SRException) as e:
try:
util.logException(driver_info['name'])
except KeyError:
util.SMlog('driver_info does not contain a \'name\' key.')
except:
pass
# If exception is of type SR.SRException,
# pass to xapi, else re-raise.
if isinstance(e, SR.SRException):
print e.toxml()
else:
raise
sys.exit(0)
| pritha-srivastava/sm | drivers/SRCommand.py | Python | lgpl-2.1 | 14,893 |
"""
Support for Keene Electronics IR-IP devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/remote.kira/
"""
import logging
import functools as ft
import homeassistant.components.remote as remote
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
STATE_UNKNOWN,
CONF_DEVICE,
CONF_NAME)
DOMAIN = 'kira'
_LOGGER = logging.getLogger(__name__)
CONF_REMOTE = "remote"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Kira platform."""
if discovery_info:
name = discovery_info.get(CONF_NAME)
device = discovery_info.get(CONF_DEVICE)
kira = hass.data[DOMAIN][CONF_REMOTE][name]
add_devices([KiraRemote(device, kira)])
return True
class KiraRemote(Entity):
"""Remote representation used to send commands to a Kira device."""
def __init__(self, name, kira):
"""Initialize KiraRemote class."""
_LOGGER.debug("KiraRemote device init started for: %s", name)
self._name = name
self._state = STATE_UNKNOWN
self._kira = kira
@property
def name(self):
"""Return the Kira device's name."""
return self._name
@property
def device_state_attributes(self):
"""Add platform specific attributes."""
return {}
@property
def is_on(self):
"""Return True. Power state doesn't apply to this device."""
return True
def update(self):
"""No-op."""
def send_command(self, **kwargs):
"""Send a command to one device."""
code_tuple = (kwargs.get(remote.ATTR_COMMAND),
kwargs.get(remote.ATTR_DEVICE))
_LOGGER.info("Sending Command: %s to %s", *code_tuple)
self._kira.sendCode(code_tuple)
def async_send_command(self, **kwargs):
"""Send a command to a device.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.send_command, **kwargs))
| shaftoe/home-assistant | homeassistant/components/remote/kira.py | Python | apache-2.0 | 2,129 |
from django import forms
from mozdns.forms import BaseForm
from mozdns.nameserver.models import Nameserver
from mozdns.address_record.models import AddressRecord
from core.registration.static.models import StaticReg
class NameserverForm(BaseForm):
class Meta:
model = Nameserver
exclude = ('addr_glue', 'sreg_glue')
fields = ('domain', 'server', 'ttl', 'description', 'views')
widgets = {'views': forms.CheckboxSelectMultiple}
def __init__(self, *args, **kwargs):
super(NameserverForm, self).__init__(*args, **kwargs)
self.fields['domain'].choices = sorted(
self.fields['domain'].choices, key=lambda d: d[1]
)
if not self.instance:
return
if not self.instance.glue:
# If it doesn't have glue, it doesn't need it.
return
addr_glue = AddressRecord.objects.filter(
label=self.instance.glue.label,
domain=self.instance.glue.domain)
sreg_glue = StaticReg.objects.filter(
label=self.instance.glue.label,
domain=self.instance.glue.domain)
glue_choices = []
for glue in addr_glue:
glue_choices.append(("addr_{0}".format(glue.pk), str(glue)))
for glue in sreg_glue:
glue_choices.append(("sreg_{0}".format(glue.pk), str(glue)))
if isinstance(self.instance.glue, AddressRecord):
initial = "addr_{0}".format(self.instance.glue.pk)
elif isinstance(self.instance.glue, StaticReg):
initial = "sreg_{0}".format(self.instance.glue.pk)
self.fields['glue'] = forms.ChoiceField(choices=glue_choices,
initial=initial)
class NSDelegated(forms.Form):
server = forms.CharField()
server_ip_address = forms.CharField()
| mozilla/inventory | mozdns/nameserver/forms.py | Python | bsd-3-clause | 1,846 |
"""Support for BSH Home Connect appliances."""
from datetime import timedelta
import logging
from requests import HTTPError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import Throttle
from . import api, config_flow
from .const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=1)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = [Platform.BINARY_SENSOR, Platform.LIGHT, Platform.SENSOR, Platform.SWITCH]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Home Connect component."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
config_flow.OAuth2FlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Home Connect from a config entry."""
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
hc_api = api.ConfigEntryAuth(hass, entry, implementation)
hass.data[DOMAIN][entry.entry_id] = hc_api
await update_all_devices(hass, entry)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
@Throttle(SCAN_INTERVAL)
async def update_all_devices(hass, entry):
"""Update all the devices."""
data = hass.data[DOMAIN]
hc_api = data[entry.entry_id]
try:
await hass.async_add_executor_job(hc_api.get_devices)
for device_dict in hc_api.devices:
await hass.async_add_executor_job(device_dict["device"].initialize)
except HTTPError as err:
_LOGGER.warning("Cannot update devices: %s", err.response.status_code)
| home-assistant/home-assistant | homeassistant/components/home_connect/__init__.py | Python | apache-2.0 | 2,825 |
#############
# Count number of strings in list that are the same
#
#
#############
from collections import defaultdict
import os
import numpy as np
import cPickle as pickle
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
data, in_header, np_soundings = pickle.load(open('india_radiosonde_aug_sep2011.p', 'rb'))
d = defaultdict(int)
for q, location in enumerate(in_header[8]):
d[location] += 1
lc=0
for i2, c2 in enumerate(d):
lc=lc+1
st_lat_c=[0] * lc
st_lon_c=[0] * lc
st_nam_c=[0] * lc
st_cnt_c=[0] * lc
st_namcnt_c=[0] * lc
for i, c in enumerate(d):
for p, l in enumerate(in_header[8]):
if c in l:
st_lat_c[i]=float(in_header[5][p])
st_lon_c[i]=float(in_header[6][p])
st_nam_c[i]=(d.items()[i][0]).title()
st_cnt_c[i]=str(d.items()[i][1])
st_namcnt_c[i]=st_nam_c[i] + ': ' + st_cnt_c[i]
stt=st_nam_c, st_lat_c, st_lon_c, st_cnt_c
st_name_count=np.array(stt)
#d.items()[location][0], d.items()[location][1]
#PLOT TIME AND DATE vS FREQ ALL STATIONS
# create figure and axes instances
fig = plt.figure(figsize=(8,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# create polar stereographic Basemap instance.
#m = Basemap(projection='ste',lon_0=lon_mid,lat_0=lat_mid,lat_ts=lat_mid,\
m = Basemap(projection='cyl',\
llcrnrlat=-10.,urcrnrlat=30.,\
llcrnrlon=60.,urcrnrlon=105.,\
rsphere=6371200.,resolution='l',area_thresh=10000)
# draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines()
# draw parallels.
parallels = np.arange(0.,90,10.)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
# draw meridians
meridians = np.arange(0.,360.,10.)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
x, y = m(st_lon_c,st_lat_c)
m.scatter(x,y,3,marker='o',color='red')
for i,j,s in zip(x, y, st_namcnt_c):
plt.text(i, j, s, fontsize=7)
plt.title('Position and number of soundings in August and September 2011')
plt.show()
| peterwilletts24/Python-Scripts | plot_scripts/Radiosonde/plot_radiosonde_map_loc_no.py | Python | mit | 1,971 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Zato
from zato.common import PUB_SUB
from zato.server.service import AsIs, Int, UTC
from zato.server.service.internal import AdminService, AdminSIO, GetListAdminSIO
# ################################################################################################################################
class _SourceTypeAware(AdminService):
ZATO_DONT_DEPLOY = True
source_type_func = {
'get_list': {
PUB_SUB.MESSAGE_SOURCE.TOPIC.id: 'get_topic_message_list',
PUB_SUB.MESSAGE_SOURCE.CONSUMER_QUEUE.id: 'get_consumer_queue_message_list',
},
'delete': {
PUB_SUB.MESSAGE_SOURCE.TOPIC.id: 'delete_from_topic',
PUB_SUB.MESSAGE_SOURCE.CONSUMER_QUEUE.id: 'delete_from_consumer_queue',
},
}
def get_pubsub_api_func(self, action, source_type):
return getattr(self.pubsub, self.source_type_func[action][source_type])
class GetList(_SourceTypeAware):
""" Returns a list of mesages from a topic or consumer queue.
"""
class SimpleIO(GetListAdminSIO):
request_elem = 'zato_pubsub_message_get_list_request'
response_elem = 'zato_pubsub_message_get_list_response'
input_required = ('cluster_id', 'source_type', 'source_name')
output_required = (AsIs('msg_id'), 'topic', 'mime_type', Int('priority'), Int('expiration'),
UTC('creation_time_utc'), UTC('expire_at_utc'), 'producer')
def get_data(self):
func = self.get_pubsub_api_func('get_list', self.request.input.source_type)
for item in func(self.request.input.source_name):
yield item.to_dict()
def handle(self):
self.response.payload[:] = self.get_data()
# ################################################################################################################################
class Get(_SourceTypeAware):
""" Returns basic information regarding a message from a topic or a consumer queue.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_pubsub_message_get_request'
response_elem = 'zato_pubsub_message_get_response'
input_required = ('cluster_id', AsIs('msg_id'))
output_required = ('topic', 'producer', 'priority', 'mime_type', 'expiration',
UTC('creation_time_utc'), UTC('expire_at_utc'))
output_optional = ('payload',)
def handle(self):
self.response.payload = self.pubsub.get_message(self.request.input.msg_id)
# ################################################################################################################################
class Delete(_SourceTypeAware):
""" Irrevocably deletes a message from a producer's topic or a consumer's queue.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_pubsub_message_delete_request'
response_elem = 'zato_pubsub_message_delete_response'
input_required = ('cluster_id', AsIs('msg_id'), 'source_name', 'source_type')
def handle(self):
func = self.get_pubsub_api_func('delete', self.request.input.source_type)
func(self.request.input.source_name, self.request.input.msg_id)
# ################################################################################################################################
| alirizakeles/zato | code/zato-server/src/zato/server/service/internal/pubsub/message.py | Python | gpl-3.0 | 3,493 |
"""Code Wars kata
https://www.codewars.com/kata/binary-tree-compare
"""
# return True if the two binary trees rooted and a and b are equal in value and structure
# return False otherwise
def compare(a, b):
# take care of NoneTypes
if a is None and b is None:
return True
if a is None and b is not None:
return False
if a is not None and b is None:
return False
if a.val is None and b.val is None:
return True
if a.val is None and b.val is not None:
return False
if a.val is not None and b.val is None:
return True
if a.val != b.val:
return False
if a.val == b.val: # values match
if type(a.left) != type(b.left): # take care of = types
return False
if type(a.right) != type(b.right):
return False
if type(a.left) == int: # if int, compare, else recursive
if a.left == b.left:
left = True
else:
left = False
else:
left = compare(a.left, b.left)
if type(a.right) == int:
if a.right == b.right:
right = True
else:
right = False
else:
right = compare(a.right, b.right)
if right and left:
return True
else:
return False
| Bl41r/code-katas | binary-tree-compare.py | Python | mit | 1,380 |
# Copyright 2020 Timothy Trippel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CONFIG_DICT = {
"experiment_name": "not-set",
"toplevel": "lock",
"soc": "other",
"version": "HEAD",
"tb_type": "cpp",
"tb": "not-set",
"fuzzer": "afl-term-on-crash",
"instrument_dut": 1,
"instrument_tb": 0,
"instrument_vltrt": 0,
"manual": 0,
"run_on_gcp": 1,
"hdl_gen_params": {},
"model_params": {},
"fuzzer_params": {
"interactive_mode": 1,
"timeout_ms": None,
"memory_limit_mb": None,
"num_instances": 1,
"mode": "s",
"duration_mins": 1440,
},
}
| googleinterns/hw-fuzzing | experiment_scripts/lock_config_dict.py | Python | apache-2.0 | 1,144 |
import random
from random import sample
from traceback import format_exc
import re
from navmazing import NavigateToSibling, NavigateToAttribute
from widgetastic.utils import VersionPick, Version
from widgetastic.widget import Text, View, TextInput
from widgetastic_patternfly import (
SelectorDropdown, Dropdown, BootstrapSelect, Input, Button, Tab
)
from wrapanapi.utils import eval_strings
from cfme import exceptions
from cfme.base.credential import TokenCredential
from cfme.base.login import BaseLoggedInPage
from cfme.common import TagPageView, PolicyProfileAssignable
from cfme.common.provider import BaseProvider, DefaultEndpoint, DefaultEndpointForm
from cfme.common.provider_views import (
BeforeFillMixin, ContainerProviderAddView, ContainerProvidersView,
ContainerProviderEditView, ContainerProviderEditViewUpdated, ProvidersView,
ContainerProviderAddViewUpdated, ProviderSideBar,
ProviderDetailsToolBar, ProviderDetailsView, ProviderToolBar)
from cfme.utils import version
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.browser import browser
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from cfme.utils.varmeth import variable
from cfme.utils.wait import wait_for
from widgetastic_manageiq import (
SummaryTable, BreadCrumb, Accordion, ManageIQTree
)
class ContainersProviderDefaultEndpoint(DefaultEndpoint):
"""Represents Containers Provider default endpoint"""
credential_class = TokenCredential
@property
def view_value_mapping(self):
out = {'hostname': self.hostname,
'password': self.token,
'api_port': self.api_port,
'sec_protocol': self.sec_protocol}
if self.sec_protocol.lower() == 'ssl trusting custom ca' and hasattr(self, 'get_ca_cert'):
out['trusted_ca_certificates'] = self.get_ca_cert(
{"username": self.ssh_creds.principal,
"password": self.ssh_creds.secret,
"hostname": self.master_hostname})
out['confirm_password'] = version.pick({
version.LOWEST: self.token,
'5.9': None})
return out
class ContainersProviderEndpointsForm(View):
"""
represents default Containers Provider endpoint form in UI (Add/Edit dialogs)
"""
@View.nested
class default(Tab, DefaultEndpointForm, BeforeFillMixin): # NOQA
TAB_NAME = 'Default'
sec_protocol = BootstrapSelect('default_security_protocol')
trusted_ca_certificates = TextInput('default_tls_ca_certs')
api_port = Input('default_api_port')
@View.nested
class metrics(Tab, BeforeFillMixin): # NOQA
TAB_NAME = VersionPick({
Version.lowest(): 'Hawkular',
'5.9': 'Metrics'
})
sec_protocol = VersionPick({
Version.lowest(): BootstrapSelect(id='hawkular_security_protocol'),
'5.9': BootstrapSelect(id='metrics_security_protocol')
})
trusted_ca_certificates = VersionPick({
Version.lowest(): TextInput('hawkular_tls_ca_certs'),
'5.9': TextInput('metrics_tls_ca_certs')
})
hostname = VersionPick({
Version.lowest(): Input('hawkular_hostname'),
'5.9': Input('metrics_hostname')
})
api_port = VersionPick({
Version.lowest(): Input('hawkular_api_port'),
'5.9': Input('metrics_api_port')
})
validate = Button('Validate')
@View.nested
class alerts(Tab, BeforeFillMixin): # NOQA
TAB_NAME = 'Alerts'
sec_protocol = BootstrapSelect(id='prometheus_alerts_security_protocol')
trusted_ca_certificates = TextInput('prometheus_alerts_tls_ca_certs')
hostname = Input('prometheus_alerts_hostname')
api_port = Input('prometheus_alerts_api_port')
validate = Button('Validate')
class LoggingableView(View):
monitor = Dropdown('Monitoring')
def get_logging_url(self):
def report_kibana_failure():
raise RuntimeError("Kibana not found in the window title or content")
browser_instance = browser()
all_windows_before = browser_instance.window_handles
appliance_window = browser_instance.current_window_handle
self.monitor.item_select('External Logging')
all_windows_after = browser_instance.window_handles
new_windows = set(all_windows_after) - set(all_windows_before)
if not new_windows:
raise RuntimeError("No logging window was open!")
logging_window = new_windows.pop()
browser_instance.switch_to_window(logging_window)
logging_url = browser_instance.current_url
wait_for(lambda: "kibana" in
browser_instance.title.lower() + " " +
browser_instance.page_source.lower(),
fail_func=report_kibana_failure, num_sec=60, delay=5)
browser_instance.close()
browser_instance.switch_to_window(appliance_window)
return logging_url
class ContainerProviderDetailsView(ProviderDetailsView, LoggingableView):
"""
Container Details page
"""
@property
def is_displayed(self):
return (super(ContainerProviderDetailsView, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Containers', 'Providers'])
class ContainersProvider(BaseProvider, Pretty, PolicyProfileAssignable):
PLURAL = 'Providers'
provider_types = {}
in_version = ('5.5', version.LATEST)
category = "container"
pretty_attrs = [
'name',
'key',
'zone',
'metrics_type',
'alerts_type']
STATS_TO_MATCH = [
'num_project',
'num_service',
'num_replication_controller',
'num_pod',
'num_node',
'num_image_registry',
'num_container']
# TODO add 'num_volume'
string_name = "Containers"
detail_page_suffix = 'provider_detail'
edit_page_suffix = 'provider_edit_detail'
quad_name = None
db_types = ["ContainerManager"]
endpoints_form = ContainersProviderEndpointsForm
all_view = ContainerProvidersView
details_view = ContainerProviderDetailsView
refresh_text = 'Refresh items and relationships'
def __init__(
self,
name=None,
key=None,
zone=None,
metrics_type=None,
alerts_type=None,
endpoints=None,
provider_data=None,
appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.key = key
self.zone = zone
self.endpoints = endpoints
self.provider_data = provider_data
self.metrics_type = metrics_type
self.alerts_type = alerts_type
@property
def view_value_mapping(self):
mapping = {
'name': self.name,
'prov_type': self.type,
'zone': self.zone
}
return mapping
@variable(alias='db')
def num_project(self):
return self._num_db_generic('container_projects')
@num_project.variant('ui')
def num_project_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Projects"))
@variable(alias='db')
def num_service(self):
return self._num_db_generic('container_services')
@num_service.variant('ui')
def num_service_ui(self):
if self.appliance.version < "5.7":
name = "Services"
else:
name = "Container Services"
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of(name))
@variable(alias='db')
def num_replication_controller(self):
return self._num_db_generic('container_replicators')
@num_replication_controller.variant('ui')
def num_replication_controller_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Replicators"))
@variable(alias='db')
def num_container_group(self):
return self._num_db_generic('container_groups')
@num_container_group.variant('ui')
def num_container_group_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Pods"))
@variable(alias='db')
def num_pod(self):
# potato tomato
return self.num_container_group()
@num_pod.variant('ui')
def num_pod_ui(self):
# potato tomato
return self.num_container_group(method='ui')
@variable(alias='db')
def num_node(self):
return self._num_db_generic('container_nodes')
@num_node.variant('ui')
def num_node_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Nodes"))
@variable(alias='db')
def num_container(self):
# Containers are linked to providers through container definitions and then through pods
query = version.pick({
version.LOWEST: "SELECT count(*) "
"FROM ext_management_systems, container_groups, container_definitions, containers "
"WHERE containers.container_definition_id=container_definitions.id "
"AND container_definitions.container_group_id=container_groups.id "
"AND container_groups.ems_id=ext_management_systems.id "
"AND ext_management_systems.name='{}'".format(self.name),
'5.9': "SELECT count(*) "
"FROM ext_management_systems, container_groups, containers "
"WHERE containers.container_group_id=container_groups.id "
"AND container_groups.ems_id=ext_management_systems.id "
"AND ext_management_systems.name='{}'".format(self.name)
})
res = self.appliance.db.client.engine.execute(query)
return int(res.first()[0])
@num_container.variant('ui')
def num_container_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Containers"))
@variable(alias='db')
def num_image(self):
return self._num_db_generic('container_images')
@num_image.variant('ui')
def num_image_ui(self):
if self.appliance.version < "5.7":
name = "Images"
else:
name = "Container Images"
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of(name))
@variable(alias='db')
def num_image_registry(self):
return self._num_db_generic('container_image_registries')
@num_image_registry.variant('ui')
def num_image_registry_ui(self):
view = navigate_to(self, "Details")
return int(view.entities.summary("Relationships").get_text_of("Image Registries"))
def pods_per_ready_status(self):
"""Grabing the Container Statuses Summary of the pods from API"""
# TODO: Add later this logic to wrapanapi
entities = self.mgmt.api.get('pod')[1]['items']
out = {}
for entity_j in entities:
out[entity_j['metadata']['name']] = {
condition['type']: eval_strings([condition['status']]).pop()
for condition in entity_j['status'].get('conditions', [])
}
return out
@navigator.register(ContainersProvider, 'All')
class All(CFMENavigateStep):
VIEW = ContainerProvidersView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Compute', 'Containers', 'Providers')
def resetter(self):
# Reset view and selection
self.view.toolbar.view_selector.select("Grid View")
self.view.paginator.reset_selection()
@navigator.register(ContainersProvider, 'Add')
class Add(CFMENavigateStep):
def container_provider_view_class(self):
return VersionPick({
Version.lowest(): ContainerProviderAddView,
'5.9': ContainerProviderAddViewUpdated
})
@property
def VIEW(self): # noqa
return self.container_provider_view_class().pick(self.obj.appliance.version)
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select(
VersionPick({
Version.lowest(): 'Add Existing Containers Provider',
'5.9': 'Add a new Containers Provider'
}).pick(self.obj.appliance.version))
@navigator.register(ContainersProvider, 'Details')
class Details(CFMENavigateStep):
VIEW = ContainerProviderDetailsView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=True).click()
def resetter(self):
self.view.toolbar.view_selector.select("Summary View")
@navigator.register(ContainersProvider, 'Edit')
class Edit(CFMENavigateStep):
def container_provider_edit_view_class(self):
return VersionPick({
Version.lowest(): ContainerProviderEditView,
'5.9': ContainerProviderEditViewUpdated
})
@property
def VIEW(self): # noqa
return self.container_provider_edit_view_class().pick(self.obj.appliance.version)
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=True).check()
self.prerequisite_view.toolbar.configuration.item_select(
'Edit Selected Containers Provider')
@navigator.register(ContainersProvider, 'EditFromDetails')
class EditFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this Containers Provider')
@navigator.register(ContainersProvider, 'EditTags')
class EditTags(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=True).click()
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
@navigator.register(ContainersProvider, 'EditTagsFromDetails')
class EditTagsFromDetails(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
@navigator.register(ContainersProvider, 'TimelinesFromDetails')
class TimelinesFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.monitoring.item_select('Timelines')
@navigator.register(ContainersProvider, 'TopologyFromDetails')
class TopologyFromDetails(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def step(self):
# TODO: implement topology view
self.prerequisite_view.toolbar.view_selector.select("Topology View")
class AdHocMetricsView(BaseLoggedInPage):
filter_dropdown = SelectorDropdown('uib-tooltip', 'Filter by')
filter_result_header = Text('h5.ng-binding')
apply_btn = Button("Apply Filters")
selected_filter = None
@property
def is_displayed(self):
return False
def wait_for_filter_option_to_load(self):
wait_for(lambda: bool(self.filter_dropdown.items), delay=5, num_sec=60)
def wait_for_results_to_load(self):
wait_for(lambda: bool(int(self.filter_result_header.text.split()[0])),
delay=5, num_sec=60)
def apply_filter(self):
self.apply_btn.click()
def set_filter(self, desired_filter):
self.selected_filter = desired_filter
self.filter_dropdown.fill_with(desired_filter)
def get_random_filter(self):
return str(random.choice(self.filter_dropdown.items))
def get_total_results_count(self):
return int(self.filter_result_header.text.split()[0])
@navigator.register(ContainersProvider, 'AdHoc')
class AdHocMain(CFMENavigateStep):
VIEW = AdHocMetricsView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.monitoring.item_select('Ad hoc Metrics')
class ContainerObjectAllBaseView(ProvidersView):
"""Base class for container object All view.
SUMMARY_TEXT should be defined in child.
"""
summary = Text('//div[@id="main-content"]//h1')
policy = Dropdown('Policy')
download = Dropdown('Download')
toolbar = View.nested(ProviderToolBar)
@property
def table(self):
return self.entities.elements
@property
def is_displayed(self):
# We use 'in' for this condition since when we use search it'll include (Names with "...")
return self.SUMMARY_TEXT in self.summary.text
class ContainerObjectDetailsEntities(View):
properties = SummaryTable(title="Properties")
status = SummaryTable(title="Status")
relationships = SummaryTable(title="Relationships")
overview = SummaryTable(title="Overview")
smart_management = SummaryTable(title="Smart Management")
labels = SummaryTable(title="Labels")
class ContainerObjectDetailsBaseView(BaseLoggedInPage, LoggingableView):
title = Text('//div[@id="main-content"]//h1')
breadcrumb = BreadCrumb(locator='//ol[@class="breadcrumb"]')
toolbar = View.nested(ProviderDetailsToolBar)
entities = View.nested(ContainerObjectDetailsEntities)
@View.nested
class sidebar(ProviderSideBar): # noqa
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
@property
def is_displayed(self):
return (
self.title.is_displayed and
self.breadcrumb.is_displayed and
# We use 'in' for this condition because when we use search the
# text will include include (Names with "...")
'{} (Summary)'.format(self.context['object'].name) in self.breadcrumb.active_location
)
# Common methods:
class ContainersTestItem(object):
"""This is a generic test item. Especially used for parametrized functions
"""
__test__ = False
def __init__(self, obj, polarion_id, **additional_attrs):
"""Args:
* obj: The container object in this test (e.g. Image)
* The polarion test case ID
"""
self.obj = obj
self.polarion_id = polarion_id
for name, value in additional_attrs.items():
self.__setattr__(name, value)
def pretty_id(self):
return '{} ({})'.format(
getattr(self.obj, '__name__', str(self.obj)),
self.polarion_id)
@classmethod
def get_pretty_id(cls, obj):
"""Since sometimes the test object is wrapped within markers,
it's difficult to find get it inside the args tree.
hence we use this to get the object and all pretty_id function.
Args:
* obj: Either a ContainersTestItem or a marker that include it
returns:
str pretty id
"""
if isinstance(obj, cls):
return obj.pretty_id()
elif hasattr(obj, 'args') and hasattr(obj, '__iter__'):
for arg in obj.args:
pretty_id = cls.get_pretty_id(arg)
if pretty_id:
return pretty_id
class LoadDetailsMixin(object):
"""Embed load details functionality for objects -
required for some classes like PolicyProfileAssignable"""
def load_details(self, refresh=False):
view = navigate_to(self, 'Details')
if refresh:
view.browser.refresh()
class Labelable(object):
"""Provide the functionality to set labels"""
_LABEL_NAMEVAL_PATTERN = re.compile(r'^[A-Za-z0-9_.]+$')
def get_labels(self):
"""List labels"""
return self.mgmt.list_labels()
def set_label(self, name, value):
"""Sets a label to the object instance
Args:
:var name: the name of the label
:var value: the value of the label
Returns:
self.mgmt.set_label return value.
"""
assert self._LABEL_NAMEVAL_PATTERN.match(name), \
'name part ({}) must match the regex pattern {}'.format(
name, self._LABEL_NAMEVAL_PATTERN.pattern)
assert self._LABEL_NAMEVAL_PATTERN.match(value), \
'value part ({}) must match the regex pattern {}'.format(
value, self._LABEL_NAMEVAL_PATTERN.pattern)
return self.mgmt.set_label(name, value)
def remove_label(self, name, silent_failure=False):
"""Remove label by name.
Args:
name: name of label
silent_failure: whether to raise an error or not in case of failure.
Returns: ``bool`` pass or fail
Raises:
:py:class:`LabelNotFoundException`.
"""
try:
self.mgmt.delete_label(name)
return True
except Exception: # TODO: add appropriate exception in wrapanapi
failure_signature = format_exc()
if silent_failure:
logger.warning(failure_signature)
return False
raise exceptions.LabelNotFoundException(failure_signature)
def navigate_and_get_rows(provider, obj, count, silent_failure=False):
"""Get <count> random rows from the obj list table,
if <count> is greater that the number of rows, return number of rows.
Args:
provider: containers provider
obj: the containers object
table: the object's Table object
count: number of random rows to return
silent_failure: If True and no records found for obj, it'll
return None instead of raise exception
return: list of rows"""
view = navigate_to(obj, 'All')
view.toolbar.view_selector.list_button.click()
if filter(lambda msg: 'No Records Found.' in msg.text, view.flash.messages) and silent_failure:
return []
view.paginator.set_items_per_page(1000)
rows = list(view.table.rows())
if not rows:
return []
return sample(rows, min(count, len(rows)))
def refresh_and_navigate(*args, **kwargs):
# Refreshing the page and navigate - we need this for cases that we already in
# the page and want to reload it
view = navigate_to(*args, **kwargs)
view.browser.refresh()
return view
class GetRandomInstancesMixin(object):
def get_random_instances(self, count=1):
"""Getting random instances of the object."""
all_instances = self.all()
return random.sample(all_instances, min(count, len(all_instances)))
| mfalesni/cfme_tests | cfme/containers/provider/__init__.py | Python | gpl-2.0 | 23,222 |
import traceback
TRACE_STACK = []
class Trace(object):
def __init__(self, exception, stack=None):
if not isinstance(exception, Exception):
raise ValueError("Expected an Exception object as first argument")
if not stack:
stack = traceback.extract_stack()
# pop off current frame and initial catch frame
#stack.pop()
#stack.pop()
# TODO: try to grab exception if it's not passed in explicitly
self._exception = exception
self._stack = stack
@property
def exception(self):
return self._exception
@property
def stack(self):
return self._stack
def __str__(self):
return ''.join(
traceback.format_list(self.stack) +
traceback.format_exception_only(
type(self.exception),
self.exception
)
).strip()
def __repr__(self):
return '<Trace (%s)>' % (
str(type(self.exception)).replace('exceptions.', ''),
)
def catch(e):
TRACE_STACK.append(Trace(e))
def dump(exception_type=None, lineno=None, module=None):
return TRACE_STACK
def clear():
del TRACE_STACK
TRACE_STACK = []
if __name__ == '__main__':
import random
for i in range(20):
try:
random.randint(0,5) / 0
except Exception, e:
catch(e)
print str(dump()[0])
| mvanveen/catcher | catcher.py | Python | mit | 1,373 |
# pylint: disable=unused-import
"""
Python APIs exposed by the bulk_email app to other in-process apps.
"""
# Public Bulk Email Functions
from __future__ import absolute_import
from bulk_email.models_api import (
is_bulk_email_enabled_for_course,
is_bulk_email_feature_enabled,
is_user_opted_out_for_course
)
def get_emails_enabled(user, course_id):
"""
Get whether or not emails are enabled in the context of a course.
Arguments:
user: the user object for which we want to check whether emails are enabled
course_id (string): the course id of the course
Returns:
(bool): True if emails are enabled for the course associated with course_id for the user;
False otherwise
"""
if is_bulk_email_feature_enabled(course_id=course_id):
return not is_user_opted_out_for_course(user=user, course_id=course_id)
return None
| ESOedX/edx-platform | lms/djangoapps/bulk_email/api.py | Python | agpl-3.0 | 899 |
'''
Generates a visual diff of all pending changes in the local SVN checkout.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import optparse
import os
import re
import shutil
import tempfile
# modules declared within this same directory
import svn
USAGE_STRING = 'Usage: %s [options]'
HELP_STRING = '''
Generates a visual diff of all pending changes in the local SVN checkout.
This includes a list of all files that have been added, deleted, or modified
(as far as SVN knows about). For any image modifications, pixel diffs will
be generated.
'''
TRUNK_PATH = os.path.join(os.path.dirname(__file__), os.pardir)
OPTION_DEST_DIR = '--dest-dir'
# default DEST_DIR is determined at runtime
OPTION_PATH_TO_SKDIFF = '--path-to-skdiff'
# default PATH_TO_SKDIFF is determined at runtime
def RunCommand(command):
"""Run a command, raising an exception if it fails.
@param command the command as a single string
"""
print 'running command [%s]...' % command
retval = os.system(command)
if retval is not 0:
raise Exception('command [%s] failed' % command)
def FindPathToSkDiff(user_set_path=None):
"""Return path to an existing skdiff binary, or raise an exception if we
cannot find one.
@param user_set_path if None, the user did not specify a path, so look in
some likely places; otherwise, only check at this path
"""
if user_set_path is not None:
if os.path.isfile(user_set_path):
return user_set_path
raise Exception('unable to find skdiff at user-set path %s' %
user_set_path)
trunk_path = os.path.join(os.path.dirname(__file__), os.pardir)
possible_paths = [os.path.join(trunk_path, 'out', 'Release', 'skdiff'),
os.path.join(trunk_path, 'out', 'Debug', 'skdiff')]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find skdiff in paths %s; maybe you need to '
'specify the %s option or build skdiff?' % (
possible_paths, OPTION_PATH_TO_SKDIFF))
def SvnDiff(path_to_skdiff, dest_dir):
"""Generates a visual diff of all pending changes in the local SVN checkout.
@param path_to_skdiff
@param dest_dir existing directory within which to write results
"""
# Validate parameters, filling in default values if necessary and possible.
path_to_skdiff = FindPathToSkDiff(path_to_skdiff)
if not dest_dir:
dest_dir = tempfile.mkdtemp()
# Prepare temporary directories.
modified_flattened_dir = os.path.join(dest_dir, 'modified_flattened')
original_flattened_dir = os.path.join(dest_dir, 'original_flattened')
diff_dir = os.path.join(dest_dir, 'diffs')
for dir in [modified_flattened_dir, original_flattened_dir, diff_dir] :
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
# Get a list of all locally modified (including added/deleted) files,
# descending subdirectories.
svn_repo = svn.Svn('.')
modified_file_paths = svn_repo.GetFilesWithStatus(
svn.STATUS_ADDED | svn.STATUS_DELETED | svn.STATUS_MODIFIED)
# For each modified file:
# 1. copy its current contents into modified_flattened_dir
# 2. copy its original contents into original_flattened_dir
for modified_file_path in modified_file_paths:
dest_filename = re.sub(os.sep, '__', modified_file_path)
# If the file had STATUS_DELETED, it won't exist anymore...
if os.path.isfile(modified_file_path):
shutil.copyfile(modified_file_path,
os.path.join(modified_flattened_dir, dest_filename))
svn_repo.ExportBaseVersionOfFile(
modified_file_path,
os.path.join(original_flattened_dir, dest_filename))
# Run skdiff: compare original_flattened_dir against modified_flattened_dir
RunCommand('%s %s %s %s' % (path_to_skdiff, original_flattened_dir,
modified_flattened_dir, diff_dir))
print '\nskdiff results are ready in file://%s/index.html' % diff_dir
def RaiseUsageException():
raise Exception('%s\nRun with --help for more detail.' % (
USAGE_STRING % __file__))
def Main(options, args):
"""Allow other scripts to call this script with fake command-line args.
"""
num_args = len(args)
if num_args != 0:
RaiseUsageException()
SvnDiff(path_to_skdiff=options.path_to_skdiff, dest_dir=options.dest_dir)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option(OPTION_DEST_DIR,
action='store', type='string', default=None,
help='existing directory within which to write results; '
'if not set, will create a temporary directory which '
'will remain in place after this script completes')
parser.add_option(OPTION_PATH_TO_SKDIFF,
action='store', type='string', default=None,
help='path to already-built skdiff tool; if not set, '
'will search for it in typical directories near this '
'script')
(options, args) = parser.parse_args()
Main(options, args)
| Gateworks/skia | tools/svndiff.py | Python | bsd-3-clause | 5,474 |
"""Make the custom certificate and private key files used by test_ssl
and friends."""
import os
import shutil
import sys
import tempfile
from subprocess import *
req_template = """
[req]
distinguished_name = req_distinguished_name
x509_extensions = req_x509_extensions
prompt = no
[req_distinguished_name]
C = XY
L = Castle Anthrax
O = Python Software Foundation
CN = {hostname}
[req_x509_extensions]
subjectAltName = @san
[san]
DNS.1 = {hostname}
{extra_san}
[dir_sect]
C = XY
L = Castle Anthrax
O = Python Software Foundation
CN = dirname example
[princ_name]
realm = EXP:0, GeneralString:KERBEROS.REALM
principal_name = EXP:1, SEQUENCE:principal_seq
[principal_seq]
name_type = EXP:0, INTEGER:1
name_string = EXP:1, SEQUENCE:principals
[principals]
princ1 = GeneralString:username
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = cadir
database = $dir/index.txt
crlnumber = $dir/crl.txt
default_md = sha256
default_days = 3600
default_crl_days = 3600
certificate = pycacert.pem
private_key = pycakey.pem
serial = $dir/serial
RANDFILE = $dir/.rand
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ v3_ca ]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer
basicConstraints = CA:true
"""
here = os.path.abspath(os.path.dirname(__file__))
def make_cert_key(hostname, sign=False, extra_san='',
ext='req_x509_extensions_full', key='rsa:3072'):
print("creating cert for " + hostname)
tempnames = []
for i in range(3):
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
req_file, cert_file, key_file = tempnames
try:
req = req_template.format(hostname=hostname, extra_san=extra_san)
with open(req_file, 'w') as f:
f.write(req)
args = ['req', '-new', '-days', '3650', '-nodes',
'-newkey', 'rsa:1024', '-keyout', key_file,
'-config', req_file]
if sign:
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
reqfile = f.name
args += ['-out', reqfile ]
else:
args += ['-x509', '-out', cert_file ]
check_call(['openssl'] + args)
if sign:
args = ['ca', '-config', req_file, '-out', cert_file, '-outdir', 'cadir',
'-policy', 'policy_anything', '-batch', '-infiles', reqfile ]
check_call(['openssl'] + args)
with open(cert_file, 'r') as f:
cert = f.read()
with open(key_file, 'r') as f:
key = f.read()
return cert, key
finally:
for name in tempnames:
os.remove(name)
TMP_CADIR = 'cadir'
def unmake_ca():
shutil.rmtree(TMP_CADIR)
def make_ca():
os.mkdir(TMP_CADIR)
with open(os.path.join('cadir','index.txt'),'a+') as f:
pass # empty file
with open(os.path.join('cadir','crl.txt'),'a+') as f:
f.write("00")
with open(os.path.join('cadir','index.txt.attr'),'w+') as f:
f.write('unique_subject = no')
with tempfile.NamedTemporaryFile("w") as t:
t.write(req_template.format(hostname='our-ca-server', extra_san=''))
t.flush()
with tempfile.NamedTemporaryFile() as f:
args = ['req', '-new', '-days', '3650', '-extensions', 'v3_ca', '-nodes',
'-newkey', 'rsa:3072', '-keyout', 'pycakey.pem',
'-out', f.name,
'-subj', '/C=XY/L=Castle Anthrax/O=Python Software Foundation CA/CN=our-ca-server']
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-create_serial',
'-out', 'pycacert.pem', '-batch', '-outdir', TMP_CADIR,
'-keyfile', 'pycakey.pem', '-days', '3650',
'-selfsign', '-extensions', 'v3_ca', '-infiles', f.name ]
check_call(['openssl'] + args)
args = ['ca', '-config', t.name, '-gencrl', '-out', 'revocation.crl']
check_call(['openssl'] + args)
if __name__ == '__main__':
os.chdir(here)
cert, key = make_cert_key('localhost')
with open('ssl_cert.pem', 'w') as f:
f.write(cert)
with open('ssl_key.pem', 'w') as f:
f.write(key)
print("password protecting ssl_key.pem in ssl_key.passwd.pem")
check_call(['openssl','rsa','-in','ssl_key.pem','-out','ssl_key.passwd.pem','-des3','-passout','pass:somepass'])
check_call(['openssl','rsa','-in','ssl_key.pem','-out','keycert.passwd.pem','-des3','-passout','pass:somepass'])
with open('keycert.pem', 'w') as f:
f.write(key)
f.write(cert)
with open('keycert.passwd.pem', 'a+') as f:
f.write(cert)
# For certificate matching tests
make_ca()
cert, key = make_cert_key('fakehostname')
with open('keycert2.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('localhost', True)
with open('keycert3.pem', 'w') as f:
f.write(key)
f.write(cert)
cert, key = make_cert_key('fakehostname', True)
with open('keycert4.pem', 'w') as f:
f.write(key)
f.write(cert)
extra_san = [
'otherName.1 = 1.2.3.4;UTF8:some other identifier',
'otherName.2 = 1.3.6.1.5.2.2;SEQUENCE:princ_name',
'email.1 = user@example.org',
'DNS.2 = www.example.org',
# GEN_X400
'dirName.1 = dir_sect',
# GEN_EDIPARTY
'URI.1 = https://www.python.org/',
'IP.1 = 127.0.0.1',
'IP.2 = ::1',
'RID.1 = 1.2.3.4.5',
]
cert, key = make_cert_key('allsans', extra_san='\n'.join(extra_san))
with open('allsans.pem', 'w') as f:
f.write(key)
f.write(cert)
unmake_ca()
print("\n\nPlease change the values in test_ssl.py, test_parse_cert function related to notAfter,notBefore and serialNumber")
check_call(['openssl','x509','-in','keycert.pem','-dates','-serial','-noout'])
| slozier/ironpython2 | Src/StdLib/Lib/test/make_ssl_certs.py | Python | apache-2.0 | 6,854 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.