repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ANTsX/ANTsPy | tests/test_viz.py | Python | apache-2.0 | 3,081 | 0.01915 | """
Test ants.learn module
nptest.assert_allclose
self.assertEqual
self.assertTrue
"""
import os
import unittest
from common import run_tests
from tempfile import mktemp
import numpy as np
import numpy.testing as nptest
import ants
class TestModule_surface(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_surf_example(self):
ch2i = ants.image_read( ants.get_ants_data("ch2") )
ch2seg = ants.threshold_image( ch2i, "Otsu", 3 )
wm = ants.threshold_image( ch2seg, 3, 3 )
wm2 = wm.smooth_image( 1 ).threshold_image( 0.5, 1e15 )
kimg = ants.weingarten_image_curvature( ch2i, 1.5 ).smooth_image( 1 )
wmz = wm2.iMath("MD",3)
rp = [(90,180,90), (90,180,270), (90,180,180)]
filename = mktemp(suffix='.png')
ants.surf( x=wm2, y=[kimg], z=[wmz],
inflation_factor=255, overlay_limits=(-0.3,0.3), verbose = True,
rotation_params = rp, filename=filename)
class TestModule_volume(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_vol_example(self):
ch2i = ants.image_read( ants.get_ants_data("mni") )
ch2seg = ants | .threshold_image( ch2i, "Otsu", 3 )
wm = ants.threshold_image( ch2seg, 3, 3 )
kimg = ants.weingarten_image_curvature( | ch2i, 1.5 ).smooth_image( 1 )
rp = [(90,180,90), (90,180,270), (90,180,180)]
filename = mktemp(suffix='.png')
result = ants.vol( wm, [kimg], quantlimits=(0.01,0.99), filename=filename)
class TestModule_render_surface_function(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_render_surface_function_example(self):
mni = ants.image_read(ants.get_ants_data('mni'))
mnia = ants.image_read(ants.get_ants_data('mnia'))
filename = mktemp(suffix='.html')
ants.render_surface_function(mni, mnia, alphasurf=0.1, auto_open=False, filename=filename)
class TestModule_plot(unittest.TestCase):
def setUp(self):
img2d = ants.image_read(ants.get_ants_data('r16'))
img3d = ants.image_read(ants.get_ants_data('mni'))
self.imgs = [img2d, img3d]
def tearDown(self):
pass
def test_plot_example(self):
for img in self.imgs:
ants.plot(img)
class TestModule_create_tiled_mosaic(unittest.TestCase):
def setUp(self):
img2d = ants.image_read(ants.get_ants_data('r16'))
img3d = ants.image_read(ants.get_ants_data('mni'))
self.imgs = [img2d, img3d]
def tearDown(self):
pass
def test_example(self):
img = ants.image_read(ants.get_ants_data('ch2')).resample_image((3,3,3))
p = ants.create_tiled_mosaic(img)
# test with output
outfile = mktemp(suffix='.png')
p = ants.create_tiled_mosaic(img, output=outfile)
# rgb is not none
rgb = img.clone()
p = ants.create_tiled_mosaic(img, rgb=rgb)
if __name__ == '__main__':
run_tests() |
nashve/mythbox | resources/lib/IMDbPY/bin/get_character.py | Python | gpl-2.0 | 1,570 | 0.001274 | #!/usr/bin/env python
"""
get_character.py
Usage: get_character "characterID"
Show some info about the character with the given characterID (e.g. '0000001'
for "Jesse James", using 'http' or 'mobile').
Notice that characterID, using 'sql', are not the same IDs used on the web.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You bad boy! You need to install the IMDbPY package!'
sys.exit(1)
if len(sys.argv) != 2:
print 'Only one argument is required:'
print ' %s "characterID" | ' % sys.argv[0]
sys.exit(2)
characterID = sys.argv[1]
i = imdb.IMDb()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
try:
# Get a character object with the data about the character identified by
# the given characterID.
character = i.get_character(characterID)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
if not | character:
print 'It seems that there\'s no character with characterID "%s"' % characterID
sys.exit(4)
# XXX: this is the easier way to print the main info about a character;
# calling the summary() method of a character object will returns a string
# with the main information about the character.
# Obviously it's not really meaningful if you want to know how
# to access the data stored in a character object, so look below; the
# commented lines show some ways to retrieve information from a
# character object.
print character.summary().encode(out_encoding, 'replace')
|
waynesun09/tp-libvirt | libvirt/tests/src/virsh_cmd/monitor/virsh_domblkstat.py | Python | gpl-2.0 | 2,304 | 0 | from autotest.client.shared import error
from virttest import virsh
from virttest import libvirt_xml
from virttest import utils_libvirtd
def run(test, params, env):
"""
Test command: virsh domblkstat.
The command get device block stats for a running domain.
1.Prepare test environment.
2.When the libvirtd == "off", stop the libvirtd service.
3.Perform virsh domblkstat operation.
4.Recover test environment.
5.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
domid = vm.get_id()
domuuid = vm.get_uuid()
blklist = libvirt_xml.VMXML.get_disk_blk(vm_name)
if blklist is None:
raise error.TestFail("Cannot find disk in %s" % vm_name)
# Select a block device from disks
blk = blklist[0]
libvirtd = params.get("libvirtd", "on")
vm_ref = params.get("domblkstat_vm_ref")
options = params.get("domblkstat_option", "")
status_error = params.get("status_error", "no")
if params.get("domblkinfo_dev") == "no":
blk = ""
if vm_ref == "id":
vm_ref = domid
elif vm_ref == "uuid":
vm_ref = domuuid
elif vm_ref == "hex_id":
vm_ref = hex(int(domid))
elif vm_ref.find("invalid") != -1:
vm_ref = params.get(vm_ref)
elif vm_ref == "name":
vm_ref = "%s %s" % (vm_name, params.get("domblkstat_extra"))
option_list = options.split(" ")
for option in option_list:
if virsh.has_command_help_match("domblkstat", option) is None:
raise error.TestNAError("The current libvirt doesn't support"
" '%s' option" % option)
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
result = virsh.domblkstat(vm_ref, blk, options, ignore_status=True)
status = result.exit_status
output = result.stdout.str | ip()
err = result.stderr.strip()
# recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
# check status_error
if status_err | or == "yes":
if status == 0 or err == "":
raise error.TestFail("Run successfully with wrong command!")
elif status_error == "no":
if status != 0 or output == "":
raise error.TestFail("Run failed with right command")
|
vasiliykochergin/euca2ools | euca2ools/commands/autoscaling/describemetriccollectiontypes.py | Python | bsd-2-clause | 1,989 | 0 | # Copyright 2013 Eucalyptus Sys | tems, Inc.
#
# Redistribution and us | e of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.autoscaling import AutoScalingRequest
from requestbuilder.mixins import TabifyingMixin
class DescribeMetricCollectionTypes(AutoScalingRequest, TabifyingMixin):
DESCRIPTION = 'Describe auto-scaling metrics and granularities'
LIST_TAGS = ['Metrics', 'Granularities']
def print_result(self, result):
for metric in result.get('Metrics', []):
print self.tabify(('METRIC-COLLECTION-TYPE', metric.get('Metric')))
for granularity in result.get('Granularities', []):
print self.tabify(('METRIC-GRANULARITY-TYPE',
granularity.get('Granularity')))
|
alexbruy/QGIS | python/plugins/processing/algs/gdal/GdalUtils.py | Python | gpl-2.0 | 6,616 | 0.001663 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GdalUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import subprocess
import platform
from osgeo import gdal
from qgis.PyQt.QtCore import QSettings
from qgis.core import QgsApplication, QgsVectorFileWriter
from processing.core.ProcessingLog import ProcessingLog
from processing.core.SilentProgress import SilentProgress
try:
from osgeo import gdal
gdalAvailable = True
except:
gdalAvailable = False
class GdalUtils:
supportedRasters = None
@staticmethod
def runGdal(commands, progress=None):
if progress is None:
progress = SilentProgress()
envval = os.getenv('PATH')
# We need to give some extra hints to get things picked up on OS X
isDarwin = False
try:
isDarwin = platform.system() == 'Darwin'
except IOError: # https://travis-ci.org/m-kuhn/QGIS#L1493-L1526
pass
if isDarwin and os.path.isfile(os.path.join(QgsApplication.prefixPath(), "bin", "gdalinfo")):
# Looks like there's a bundled gdal. Let's use it.
os.environ['PATH'] = "{}{}{}".format(os.path.join(QgsApplication.prefixPath(), "bin"), os.pathsep, envval)
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(QgsApplication.prefixPath(), "lib")
else:
# Other platforms should use default gdal finder codepath
settings = QSettings()
path = settings.value('/GdalTools/gdalPath', '')
if not path.lower() in envval.lower().split(os.pathsep):
envval += '{}{}'.format(os.pathsep, path)
os.putenv('PATH', envval)
fused_command = ' '.join([unicode(c) for c in commands])
progress.setInfo('GDAL command:')
progress.setCommand(fused_command)
progress.setInfo('GDAL command output:')
success = False
retry_count = 0
while success == False:
loglines = []
loglines.append('GDAL execution console output')
try:
proc = subprocess.Popen(
fused_command,
shell=True,
stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
for line in proc:
progress.setConsoleInfo(line)
loglines.append(line)
success = True
except IOError as e:
if retry_count < 5:
retry_count += 1
else:
raise IOError(e.message + u'\nTried 5 times without success. Last iteration stopped after reading {} line(s).\nLast line(s):\n{}'.format(len(loglines), u'\n'.join(loglines[-10:])))
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
GdalUtils.consoleOutput = lo | glines
@staticmethod
def getConsoleOutput():
return GdalUtils.consoleOutput
@staticmethod
def getSupportedRasters():
if not gdalAvailable:
return {}
if GdalUtils.supportedRasters is not None:
return GdalUtils.support | edRasters
if gdal.GetDriverCount() == 0:
gdal.AllRegister()
GdalUtils.supportedRasters = {}
GdalUtils.supportedRasters['GTiff'] = ['tif']
for i in range(gdal.GetDriverCount()):
driver = gdal.GetDriver(i)
if driver is None:
continue
shortName = driver.ShortName
metadata = driver.GetMetadata()
#===================================================================
# if gdal.DCAP_CREATE not in metadata \
# or metadata[gdal.DCAP_CREATE] != 'YES':
# continue
#===================================================================
if gdal.DMD_EXTENSION in metadata:
extensions = metadata[gdal.DMD_EXTENSION].split('/')
if extensions:
GdalUtils.supportedRasters[shortName] = extensions
return GdalUtils.supportedRasters
@staticmethod
def getSupportedRasterExtensions():
allexts = ['tif']
for exts in GdalUtils.getSupportedRasters().values():
for ext in exts:
if ext not in allexts and ext != '':
allexts.append(ext)
return allexts
@staticmethod
def getVectorDriverFromFileName(filename):
ext = os.path.splitext(filename)[1]
if ext == '':
return 'ESRI Shapefile'
formats = QgsVectorFileWriter.supportedFiltersAndFormats()
for k, v in formats.iteritems():
if ext in k:
return v
return 'ESRI Shapefile'
@staticmethod
def getFormatShortNameFromFilename(filename):
ext = filename[filename.rfind('.') + 1:]
supported = GdalUtils.getSupportedRasters()
for name in supported.keys():
exts = supported[name]
if ext in exts:
return name
return 'GTiff'
@staticmethod
def escapeAndJoin(strList):
joined = ''
for s in strList:
if s[0] != '-' and ' ' in s:
escaped = '"' + s.replace('\\', '\\\\').replace('"', '\\"') \
+ '"'
else:
escaped = s
joined += escaped + ' '
return joined.strip()
@staticmethod
def version():
return int(gdal.VersionInfo('VERSION_NUM'))
|
rpm5/createrepo_c | acceptance_tests/tests/base.py | Python | gpl-2.0 | 12,537 | 0.001276 | import os
import re
import time
import shutil
import pprint
import filecmp
import os.path
import tempfile
import unittest
import threading
import subprocess
from .fixtures import PACKAGESDIR
OUTPUTDIR = None
OUTPUTDIR_LOCK = threading.Lock()
def get_outputdir():
global OUTPUTDIR
if OUTPUTDIR:
return OUTPUTDIR
OUTPUTDIR_LOCK.acquire()
if not OUTPUTDIR:
prefix = time.strftime("./testresults_%Y%m%d_%H%M%S_")
OUTPUTDIR = tempfile.mkdtemp(prefix=prefix, dir="./")
OUTPUTDIR_LOCK.release()
return OUTPUTDIR
class _Result(object):
def __str__(self):
return pprint.pformat(self.__dict__)
class CrResult(_Result):
def __init__(self):
self.rc = None # Return code
self.out = None # stdout + stderr
self.dir = None # Directory that was processed
self.prog = None # Program name
self.cmd = None # Complete command
self.outdir = None # Output directory
self.logfile = None # Log file where the out was logged
class RepoDiffResult(_Result):
def __init__(self):
self.rc = None
self.out = None
self.repo1 = None
self.repo2 = None
self.cmd = None # Complete command
self.logfile = None # Log file where the out was logged
class RepoSanityCheckResult(_Result):
def __init__(self):
self.rc = None
self.out = None
self.repo = None
self.cmd = None
self.logfile = None
class BaseTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
self | .outdir = get_outputdir()
self.tcdir = os.path.join(self.outdir, self.__class__.__name__)
if not os.path.exists(self.tcdir):
os.mkdir(s | elf.tcdir)
if self.__class__.__doc__:
description_fn = os.path.join(self.tcdir, "description")
open(description_fn, "w").write(self.__class__.__doc__+'\n')
unittest.TestCase.__init__(self, *args, **kwargs)
self.tdir = None # Test dir for the current test
self.indir = None # Input dir for the current test
self._currentResult = None # Result of the current test
# Prevent use of a first line from test docstring as its name in output
self.shortDescription_orig = self.shortDescription
self.shortDescription = self._shortDescription
self.main_cwd = os.getcwd()
def _shortDescription(self):
return ".".join(self.id().split('.')[-2:])
def run(self, result=None):
# Hacky
self._currentResult = result # remember result for use in tearDown
unittest.TestCase.run(self, result)
def setUp(self):
os.chdir(self.main_cwd) # In case of TimedOutException in Nose test... the tearDown is not called :-/
caller = self.id().split(".", 3)[-1]
self.tdir = os.path.abspath(os.path.join(self.tcdir, caller))
os.mkdir(self.tdir)
self.indir = os.path.join(self.tdir, "input")
os.mkdir(self.indir)
description = self.shortDescription_orig()
if description:
fn = os.path.join(self.tdir, "description")
open(fn, "w").write(description+'\n')
#self.log = # TODO
os.chdir(self.tdir)
self.setup()
def setup(self):
pass
def tearDown(self):
if self.tdir and self._currentResult:
if not len(self._currentResult.errors) + len(self._currentResult.failures):
self.set_success()
self.teardown()
os.chdir(self.main_cwd)
def teardown(self):
pass
def runcmd(self, cmd, logfile=None, workdir=None, stdin_data=None):
"""Stolen from the kobo library.
Author of the original function is dmach@redhat.com"""
# TODO: Add time how long the command takes
if type(cmd) in (list, tuple):
import pipes
cmd = " ".join(pipes.quote(i) for i in cmd)
if logfile is not None:
already_exists = False
if os.path.exists(logfile):
already_exists = True
logfile = open(logfile, "a")
if already_exists:
logfile.write("\n{0}\n Another run\n{0}\n".format('='*79))
logfile.write("cd %s\n" % os.getcwd())
for var in ("PATH", "PYTHONPATH", "LD_LIBRARY_PATH"):
logfile.write('export %s="%s"\n' % (var, os.environ.get(var,"")))
logfile.write("\n")
logfile.write(cmd+"\n")
logfile.write("\n+%s+\n\n" % ('-'*77))
stdin = None
if stdin_data is not None:
stdin = subprocess.PIPE
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=workdir)
if stdin_data is not None:
class StdinThread(threading.Thread):
def run(self):
proc.stdin.write(stdin_data)
proc.stdin.close()
stdin_thread = StdinThread()
stdin_thread.daemon = True
stdin_thread.start()
output = ""
while True:
lines = proc.stdout.readline().decode('utf-8')
if lines == "":
break
if logfile:
logfile.write(lines)
output += lines
proc.wait()
if logfile:
logfile.write("\n+%s+\n\n" % ('-'*77))
logfile.write("RC: %s (%s)\n" % (proc.returncode, "Success" if not proc.returncode else "Failure"))
logfile.close()
if stdin_data is not None:
stdin_thread.join()
return proc.returncode, output
def set_success(self):
"""Create a SUCCESS file in directory of the current test"""
fn = os.path.join(self.tdir, "SUCCESS")
open(fn, "w")
def copy_pkg(self, name, dst):
"""Copy package from testdata into specified destination"""
src = os.path.join(PACKAGESDIR, name)
shutil.copy(src, dst)
return os.path.join(dst, name)
def indir_addpkg(self, name):
"""Add package into input dir for the current test"""
src = os.path.join(PACKAGESDIR, name)
return self.copy_pkg(src, self.indir)
def indir_makedirs(self, path):
"""Make a directory in input dir of the current test"""
path = path.lstrip('/')
final_path = os.path.join(self.indir, path)
os.makedirs(final_path)
return final_path
def indir_mkfile(self, name, content=""):
"""Create a file in input dir of the current test"""
fn = os.path.join(self.indir, name)
with open(fn, "w") as f:
f.write(content)
return fn
def tdir_makedirs(self, path):
"""Make a directory in test dir of the current test"""
path = path.lstrip('/')
final_path = os.path.join(self.tdir, path)
os.makedirs(final_path)
return final_path
def run_prog(self, prog, dir, args=None, outdir=None):
res = CrResult()
res.dir = dir
res.prog = prog
res.outdir = outdir
res.logfile = os.path.join(self.tdir, "out_%s" % res.prog)
res.cmd = "%(prog)s --verbose %(args)s %(dir)s" % {
"prog": res.prog,
"dir": res.dir,
"args": args or "",
}
res.rc, res.out = self.runcmd(res.cmd, logfile=res.logfile)
return res
def run_cr(self, dir, args=None, c=False, outdir=None):
"""Run createrepo and return CrResult object with results
:returns: Result of the createrepo run
:rtype: CrResult
"""
prog = "createrepo_c" if c else "createrepo"
if not outdir:
outdir = os.path.join(self.tdir, prog)
else:
outdir = os.path.join(self.tdir, outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
args = " -o %s %s" % (outdir, args if args else "")
res = self.run_prog(prog, dir, args, outdir)
return res
def run_sqlr(self, dir, args=None):
""""""
res = |
markokr/cc | cc/daemon/logtail.py | Python | bsd-2-clause | 12,194 | 0.009923 | #! /usr/bin/env python
"""
Logfile tailer for rotated log files.
Supports 2 operating modes: classic, rotated.
Assumes that:
. All log files reside in the same directory.
. We can find last log file by sorting the file list alphabetically.
In classic mode:
. When log is switched, the tailer continues tailing from the next file.
. When the tailer is restarted, it continues tailing from saved position.
In rotated mode:
. When log is switched, the tailer continues tailing from reopened file.
"""
from __future__ import with_statement
import cStringIO
import glob
import os
import re
import sys
import time
import skytools
import cc.util
from cc.daemon import CCDaemon
from cc.message import is_msg_req_valid
from cc.reqs import LogtailMessage
class LogfileTailer (CCDaemon):
""" Logfile tailer for rotated log files """
log = skytools.getLogger ('d:LogfileTailer')
BUF_MINBYTES = 64 * 1024
PROBESLEFT = 2 # number of retries after old log EOF and new log spotted
def reload (self):
super(LogfileTailer, self).reload()
self.op_mode = self.cf.get ('operation-mode', '')
if self.op_mode not in (None, '', 'classic', 'rotated'):
self.log.error ("unknown operation-mode: %s", self.op_mode)
self.file_mode = self.cf.get ('file-mode', '')
if self.file_mode not in (None, '', 'text', 'binary'):
self.log.error ("unknown file-mode: %s", self.file_mode)
self.logdir = self.cf.getfile ('logdir')
if self.op_mode in (None, '', 'classic'):
self.logmask = self.cf.get ('logmask')
elif self.op_mode == 'rotated':
self.logname = self.cf.get ('logname')
if re.search ('\?|\*', self.logname):
self.log.error ("wildcards in logname not supported: %s", self.logname)
self.logmask = self.logname
self.compression = self.cf.get ('compression', '')
if self.compression not in (None, '', 'none', 'gzip', 'bzip2'):
self.log.error ("unknown compression: %s", self.compression)
self.compression_level = self.cf.getint ('compression-level', '')
self.msg_suffix = self.cf.get ('msg-suffix', '')
if self.msg_suffix and not is_msg_req_valid (self.msg_suffix):
self.log.error ("invalid msg-suffix: %s", self.msg_suffix)
self.msg_suffix = None
self.use_blob = self.cf.getbool ('use-blob', True)
self.lag_maxbytes = cc.util.hsize_to_bytes (self.cf.get ('lag-max-bytes', '0'))
self.reverse_sort = False
self.buf_maxbytes = cc.util.hsize_to_bytes (self.cf.get ('buffer-bytes', '0'))
self.buf_maxlines = self.cf.getint ('buffer-lines', -1)
self.buf_maxdelay = 1.0
# compensate for our config class weakness
if self.buf_maxbytes <= 0: self.buf_maxbytes = None
if self.buf_maxlines < 0: self.buf_maxlines = None
# set defaults if nothing found in config
if self.buf_maxbytes is None and self.buf_maxlines is None:
self.buf_maxbytes = 1024 * 1024
if self.compression not in (None, '', 'none'):
if self.buf_maxbytes < self.BUF_MINBYTES:
self.log.info ("buffer-bytes too low, adjusting: %i -> %i", self.buf_maxbytes, self.BUF_MINBYTES)
self.buf_maxbytes = self.BUF_MINBYTES
def startup (self):
super(LogfileTailer, self).startup()
self.logfile = None # full path
self.logf = None # file object
self.logfpos = None # tell()
self.probesleft = self.PROBESLEFT
self.first = True
self.tailed_files = 0
self.tailed_bytes = 0
self.buffer = cStringIO.StringIO()
self.buflines = 0
self.bufseek = None
self.saved_fpos = None
self.save_file = None
self.logf_dev = self.logf_ino = None
sfn = self.get_save_filename()
try:
with open (sfn, "r") as f:
s = f.readline().split('\t', 1)
try:
self.logfile = s[1].strip()
self.saved_fpos = int(s[0])
self.log.info ("found saved state for %s", self.logfile)
except:
self.logfile = self.saved_fpos = None
if self.op_mode == 'rotated':
self.log.info ("cannot use saved state in this operation mode")
self.logfile = self.saved_fpos = None
lag = self.count_lag_bytes()
if lag is not None:
self.log.info ("currently | lagging %i bytes behind", lag)
if lag > self.lag_maxbytes:
self.log.warning | ("lag too big, skipping")
self.logfile = self.saved_fpos = None
else:
self.log.warning ("cannot determine lag, skipping")
self.logfile = self.saved_fpos = None
except IOError:
pass
self.save_file = open (sfn, "a")
def count_lag_bytes (self):
files = self.get_all_filenames()
if self.logfile not in files or self.saved_fpos is None:
return None
lag = 0
while True:
fn = files.pop()
st = os.stat(fn)
lag += st.st_size
if (fn == self.logfile):
break
lag -= self.saved_fpos
assert lag >= 0
return lag
def get_all_filenames (self):
""" Return sorted list of all log file names """
lfni = glob.iglob (os.path.join (self.logdir, self.logmask))
lfns = sorted (lfni, reverse = self.reverse_sort)
return lfns
def get_last_filename (self):
""" Return the name of latest log file """
files = self.get_all_filenames()
if files:
return files[-1]
return None
def get_next_filename (self):
""" Return the name of "next" log file """
files = self.get_all_filenames()
if not files:
return None
try:
i = files.index (self.logfile)
if not self.first:
fn = files[i+1]
else:
fn = files[i]
except ValueError:
fn = files[-1]
except IndexError:
fn = files[i]
return fn
def get_save_filename (self):
""" Return the name of save file """
return os.path.splitext(self.pidfile)[0] + ".save"
def save_file_pos (self):
self.save_file.truncate (0)
self.save_file.write ("%i\t%s" % (self.bufseek, self.logfile))
self.log.debug ("saved offset %i for %s", self.bufseek, self.logfile)
def is_new_file_available (self):
if self.op_mode in (None, '', 'classic'):
return (self.logfile != self.get_next_filename())
elif self.op_mode == 'rotated':
st = os.stat (self.logfile)
return (st.st_dev != self.logf_dev or st.st_ino != self.logf_ino)
else:
raise ValueError ("unsupported mode of operation")
def try_open_file (self, name):
""" Try open log file; sleep a bit if unavailable. """
if name:
assert self.buffer.tell() == 0
try:
self.logf = open (name, 'rb')
self.logfile = name
self.logfpos = 0
self.bufseek = 0
self.send_stats() # better do it async me think (?)
self.log.info ("Tailing %s", self.logfile)
self.stat_inc ('tailed_files')
self.tailed_files += 1
self.probesleft = self.PROBESLEFT
st = os.fstat (self.logf.fileno())
self.logf_dev, self.logf_ino = st.st_dev, st.st_ino
except IOError, e:
self.log.info ("%s", e)
time.sleep (0.2)
else:
self.log.debug ("no logfile available, waiting")
time.sleep (0.2)
def tail (self):
""" Keep reading from log file (line by line), switch to next file if current file is exhausted.
"""
while not self.last_sigint:
if not self.logf:
|
eoyilmaz/anima | tests/arnold/test_base85.py | Python | mit | 5,868 | 0.001022 | # -*- coding: utf-8 -*-
from anima.render.arnold import base85
import unittest
import struct
class Base85TestCase(unittest.TestCase):
"""tests the base85 module
"""
def setup(self):
"""setup the test
"""
pass
def test_arnold_b85_encode_is_working_properly(self):
"""testing if arnold_b85_encode is working properly
"""
raw_data = [
struct.pack('f', 2),
struct.pack('f', 3.484236717224121),
]
encoded_data = [
'8TFfd',
'8^RH(',
]
for i in range(len(raw_data)):
self.assertEqual(
encoded_data[i],
base85.arnold_b85_encode(raw_data[i])
)
def test_arnold_b85_encode_packs_zeros_properly(self):
"""testing if arnold_b85_encode is packing zeros properly
"""
raw_data = [
struct.pack('f', 0.0),
struct.pack('ffff', 0.0, 0.0, 3.484236717224121, 0.0)
]
encoded_data = [
'z',
'zz8^RH(z'
]
for i in range(len(raw_data)):
self.assertEqual(
encoded_data[i],
base85.arnold_b85_encode(raw_data[i])
)
def test_arnold_b85_encode_packs_ones_properly(self):
"""testing if arnold_b85_encode is packing ones properly
"""
raw_data = [
struct.pack('f', 1.0),
struct.pack('ffff', 1.0, 1.0, 3.484236717224121, 1.0)
]
encoded_data = [
'y',
'yy8^RH(y'
]
for i in range(len(raw_data)):
self.assertEqual(
encoded_data[i],
base85.arnold_b85_encode(raw_data[i])
)
def test_arnold_b85_decode_is_working_properly(self):
"""testing if arnold_b85_decode is working properly
"""
raw_data = [
struct.pack('f', 2),
struct.pack('f', 3.484236717224121),
]
encoded_data = [
'8TFfd',
'8^RH(',
]
for i in range(len(raw_data)):
self.assertEqual(
raw_data[i],
base85.arnold_b85_decode(encoded_data[i])
)
def test_arnold_b85_decode_unpacks_zeros_properly(self):
"" | "testing if arnold_b85_decode is unpacking zeros properly
"""
raw_data = [
struct.pack('f', 0.0),
struct.pack('ffff', 0.0, 0.0, 3.484236717224121, 0.0)
]
encoded_data = [
'z',
'zz8^RH(z'
]
for i in range(len(raw_d | ata)):
self.assertEqual(
raw_data[i],
base85.arnold_b85_decode(encoded_data[i])
)
def test_arnold_b85_decode_unpacks_ones_properly(self):
"""testing if arnold_b85_decode is unpacking zeros properly
"""
raw_data = [
struct.pack('f', 1.0),
struct.pack('ffff', 1.0, 1.0, 3.484236717224121, 1.0)
]
encoded_data = [
'y',
'yy8^RH(y'
]
for i in range(len(raw_data)):
self.assertEqual(
raw_data[i],
base85.arnold_b85_decode(encoded_data[i])
)
def test_arnold_b85_encoding_real_world_data(self):
"""testing encoding with some real world data
"""
# b85UINT
raw_data = [0, 1, 9, 8, 1, 2, 10, 9, 2, 3, 11, 10, 3, 4, 12, 11, 4, 5,
13, 12, 5, 6, 14, 13, 6, 7, 15, 14]
encoded_data = "&UOP6&psb:'7Bt>'Rg1B'n6CF(4ZUJ(P)gN"
data_format = '%sB' % len(raw_data)
self.assertEqual(
encoded_data,
base85.arnold_b85_encode(struct.pack(data_format, *raw_data))
)
self.assertEqual(
raw_data,
list(struct.unpack('%sB' % len(raw_data),
base85.arnold_b85_decode(encoded_data)))
)
# b85POINT2
raw_data = [0, 0.75, 0.0625, 0.75, 0.125, 0.75, 0.1875, 0.75, 0.25,
0.75, 0.3125, 0.75, 0.375, 0.75, 0.4375, 0.75, 0, 1,
0.0625, 1, 0.125, 1, 0.1875, 1, 0.25, 1, 0.3125, 1, 0.375,
1, 0.4375, 1]
encoded_data = "z8?r5N7e-P78?r5N7reTb8?r5N8$W,M8?r5N8+HY88?r5N8.koX8" \
"?r5N82:0x8?r5N85]GC8?r5Nzy7e-P7y7reTby8$W,My8+HY8y8." \
"koXy82:0xy85]GCy"
data_format = '%sf' % len(raw_data)
self.assertEqual(
encoded_data,
base85.arnold_b85_encode(struct.pack(data_format, *raw_data))
)
self.assertEqual(
raw_data,
list(struct.unpack('%sf' % len(raw_data),
base85.arnold_b85_decode(encoded_data)))
)
# b85POINT
raw_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47]
encoded_data = "zy8TFfd8[8>O8b)k:8eM,Z8hpC%8l>YE8oaoe8qI%u8s0108tl<@" \
"8vSGP8x:R`9$v]p9&]i+9(Dt;9)8OC9*,*K9*tZS9+h5[9,[ec9-" \
"O@k9.Bps9/6L&90*'.90rW691f2>92YbF93M=N94@mV954H^96'x" \
"f96L;j96pSn97?kr97d.v983G%98W_)99&w-99K:199oR59:>j99" \
":c-=9;2EA9;V]E9<%uI9<J8M"
data_format = '%sf' % len(raw_data)
self.assertEqual(
encoded_data,
base85.arnold_b85_encode(struct.pack(data_format, *raw_data))
)
self.assertEqual(
raw_data,
list(struct.unpack('%sf' % len(raw_data),
base85.arnold_b85_decode(encoded_data)))
)
|
samuelpulfer/icinga-flashlight | bin/blinkdingsdo.py | Python | mit | 5,168 | 0.033902 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, time, socket, logging, signal
import RPi.GPIO as GPIO
from threading import Thread
from daemon import D | aemon
def blinkdingsdo():
| HOST = '' # Symbolic name meaning all available interfaces
PORT = 8888 # Arbitrary non-privileged port
logfile = '/var/log/blinkdingdo.log'
logging.basicConfig(filename=logfile,format='%(asctime)s %(name)s %(levelname)s:%(message)s' ,level=logging.DEBUG)
mylog = logging.getLogger("default")
mylog.info("Beginn Log")
#Function for handling connections. This will be used to create threads
def clientthread(conn, addr):
#Sending message to connected client
conn.send('Wilkommen auf dem Server.\nFuer Hilfe bitte schreiend im Kreis herumrennen oder \'help\' eingeben.\n') #send only takes string
#infinite loop so that function do not terminate and thread do not end.
while True:
#Receiving from client
data = conn.recv(1024).strip()
if data == 'help':
reply = '''\
Blinkdingsdo v0.1
blink_on Schaltet das Blinklicht an.
blink_off Schaltet das Blinklicht aus.
alert Löst einen Cordlessalarm aus.
weather Zeigt den aktuellen Wetterbericht für ihre Region an.
quit Beendet die Verbindung.
'''
elif data == 'blink_on':
# hier sollte das Blinken angeschaltet werden
mylog.info('blink_on von ' + addr[0] + ':' + str(addr[1]))
try:
GPIO.remove_event_detect(12)
GPIO.output(8, GPIO.HIGH)
time.sleep(2)
GPIO.add_event_detect(12, GPIO.FALLING, callback= switchoff2, bouncetime=200)
except Exception as e:
mylog.info(str(e))
reply = 'Blinklicht eingeschaltet\n'
elif data == 'blink_off':
# hier sollte das Blinklicht ausgeschaltet werden
mylog.info('blink_off von ' + addr[0] + ':' + str(addr[1]))
GPIO.output(8, GPIO.LOW)
reply = 'Blinklicht ausgeschaltet\n'
elif data == 'alert':
# hier sollte der Alarm ausgelöst werden
alertthread = Thread(target=alert, args=(1,))
alertthread.start()
reply = 'Alarm ausgelöst\n'
elif data == 'weather':
reply = 'Seriously ????????????????????????\n'
elif data == 'quit':
conn.sendall('ByeBye\n')
break
else:
reply = 'Sie chönts afacht nöd\n'
conn.sendall(reply)
mylog.warning('Disconnected with ' + addr[0] + ':' + str(addr[1]))
conn.close()
def alert(x):
GPIO.output(10, GPIO.HIGH)
mylog.info('alarm ausgeloest')
time.sleep(2)
GPIO.output(10, GPIO.LOW)
def switchoff(x):
while True:
GPIO.wait_for_edge(12, GPIO.FALLING, bouncetime=200)
mylog.info('Switch betaetigt')
GPIO.output(8, GPIO.LOW)
def switchoff2(channel):
mylog.info('Switch betaetigt')
GPIO.output(8, GPIO.LOW)
def handler(signum, frame):
mylog.info('Programm wird beendet')
try:
s.close()
mylog.info('Socket geschlossen')
GPIO.remove_event_detect(12)
GPIO.output(8, GPIO.LOW)
GPIO.output(10, GPIO.LOW)
mylog.info('GPIOs zurueckgesetzt')
except Exception as e:
mylog.info(str(e))
mylog.info("Ende Log")
logging.shutdown()
self.delpid()
sys.exit(0)
mylog.info('Beginn initialisierung')
# RPi.GPIO Layout verwenden (wie Pin-Nummern)
GPIO.setmode(GPIO.BOARD)
# Pins auf Output setzen
GPIO.setup(8, GPIO.OUT)
GPIO.setup(10, GPIO.OUT)
# Pins auf Input setzen und PullUp aktivieren
GPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Outputs auf Low setzen
GPIO.output(8, GPIO.LOW)
GPIO.output(10, GPIO.LOW)
mylog.info('Initialisierung abgeschlossen')
#signal.signal(signal.SIGTERM, handler)
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, handler)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mylog.info('Socket created')
#Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
mylog.error('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
mylog.info('Socket bind complete')
#Start listening on socket
s.listen(10)
mylog.info('Socket now listening')
#thread01 = Thread(target=switchoff, args=(1,))
#thread01.start()
GPIO.add_event_detect(12, GPIO.FALLING, callback= switchoff2, bouncetime=200)
# Loop
while True:
#wait to accept a connection - blocking call
conn, addr = s.accept()
mylog.info('Connected with ' + addr[0] + ':' + str(addr[1]))
#start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.
x1 = Thread(target=clientthread, args=(conn, addr,))
x1.start()
#########################################################################
# Klasse ueberschreiben
class MyDaemon(Daemon):
def run(self):
blinkdingsdo()
#########################################################################
# Kommandozeilenparameter abfangen
if __name__ == "__main__":
daemon = MyDaemon('/tmp/daemon-example.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
lmcro/webserver | admin/wizards/flcache.py | Python | gpl-2.0 | 7,353 | 0.01292 | # -*- coding: utf-8 -*-
#
# Cherokee-admin's Common Static wizard
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import re
import CTK
import Wizard
from util import *
from configured import *
NOTE_WELCOME_H1 = N_("Welcome to the Cache wizard")
NOTE_WELCOME_P1 = N_("This wizard adds a rule to configure the cache mechanism shipped with Cherokee.")
NOTE_WELCOME_P2 = N_("It will boost the performance of your virtual server by caching content, and thus optimizing subsequent requests of the same Web resource.")
NOTE_POLICY = N_("What do you want the server to cache?")
NOTE_AUTH = N_("Enable HTTP's PURGE method, so cache object can be purged remotely.")
NOTE_ENC_EXTS = N_("Which extensions you want the Cache to store?")
NOTE_ADMIN_USER = N_("Administration user name")
NOTE_ADMIN_PASSWORD = N_("Administration user's password")
PREFIX = 'tmp!wizard!flcache'
URL_APPLY = r'/wizard/vserver/flcache/apply'
POLICIES = [
('dynamic', N_('Cacheable Dynamic Responses')),
('encoded', N_('Encoded responses of static files'))
]
ENCODED_EXTS_DEFAULT = "js,css,html,htm,xml"
ENCODED_EXTS_CONF = """
%(rule_pre)s!match!final = 0
%(rule_pre)s!match = and
%(rule_pre)s!match!left = extensions
%(rule_pre)s!match!left!check_local_file = 0
%(rule_pre)s!match!left!extensions = %(extensions | )s
%(rule_pre)s!match!right = header
%(rule_pre)s!match!right!complete = 0
%(rule_pre)s!match!right!header = Accept-Encoding
%(rule_pre)s!match!right!type = provided
%(rule_pre)s!flcache = allow
%(rule_pre)s!flcache!policy = all_but_forbidden
%(rule_pre)s!encoder!gzip = allow
"""
GLOBAL_CACHE_CONF = """
%(rule_pre)s | !match!final = 0
%(rule_pre)s!match = directory
%(rule_pre)s!match!directory = /
%(rule_pre)s!flcache = allow
%(rule_pre)s!flcache!policy = explicitly_allowed
"""
PURGE_METHOD_CONF = """
%(rule_pre)s!match!final = 1
%(rule_pre)s!match = method
%(rule_pre)s!match!method = purge
%(rule_pre)s!handler = admin
%(rule_pre)s!auth = authlist
%(rule_pre)s!auth!methods = digest
%(rule_pre)s!auth!realm = Front-Line Cache
%(rule_pre)s!auth!list!1!password = %(password)s
%(rule_pre)s!auth!list!1!user = %(username)s
"""
class Commit:
def Commit_Rule (self):
vsrv_num = CTK.cfg.get_val ('%s!vsrv_num'%(PREFIX))
tipe = CTK.cfg.get_val ('%s!policy' %(PREFIX))
purge = int (CTK.cfg.get_val('%s!purge'%(PREFIX), "0"))
vsrv_pre = 'vserver!%s' %(vsrv_num)
# Next rule
x, rule_pre = cfg_vsrv_rule_get_next (vsrv_pre)
if tipe == 'encoded':
# Encoded content
extensions = CTK.cfg.get_val ('%s!encoded_exts'%(PREFIX))
config = ENCODED_EXTS_CONF % (locals())
else:
# General caching
config = GLOBAL_CACHE_CONF % (locals())
# Apply the config
CTK.cfg.apply_chunk (config)
CTK.cfg.normalize ('%s!rule'%(vsrv_pre))
# Purge
if purge:
x, rule_pre = cfg_vsrv_rule_get_next (vsrv_pre)
username = CTK.cfg.get_val ('%s!admin!username'%(PREFIX))
password = CTK.cfg.get_val ('%s!admin!password'%(PREFIX))
config = PURGE_METHOD_CONF % (locals())
# Apply the config
CTK.cfg.apply_chunk (config)
CTK.cfg.normalize ('%s!rule'%(vsrv_pre))
del (CTK.cfg[PREFIX])
return CTK.cfg_reply_ajax_ok()
def __call__ (self):
if CTK.post.pop('final'):
CTK.cfg_apply_post()
return self.Commit_Rule()
return CTK.cfg_apply_post()
class Create:
def __call__ (self):
cont = CTK.Container()
# Basic Funcionality
combo_type = CTK.ComboCfg ('%s!policy'%(PREFIX), trans_options(POLICIES))
table = CTK.PropsTable()
table.Add (_('Type of Caching'), combo_type, _(NOTE_POLICY))
encoded_table = CTK.PropsTable ()
encoded_table.Add (_('Extensions'), CTK.TextCfg ('%s!encoded_exts'%(PREFIX), False, {'value': ENCODED_EXTS_DEFAULT}), _(NOTE_ENC_EXTS))
encoded_box = CTK.Box ({'style':'display:none;'})
encoded_box += encoded_table
combo_type.bind ('change',
"if ($(this).val() == 'dynamic') {%s} else {%s}"
%(encoded_box.JS_to_hide(), encoded_box.JS_to_show()))
submit = CTK.Submitter (URL_APPLY)
submit += table
submit += encoded_box
cont += CTK.RawHTML ("<h2>%s</h2>" %(_("Basic Functionality")))
cont += submit
# PURGE
check_auth = CTK.CheckCfgText ('%s!purge'%(PREFIX), False, _("Enable"))
table = CTK.PropsTable()
table.Add (_('Enable PURGE requests'), check_auth, _(NOTE_AUTH))
admin_table = CTK.PropsTable()
admin_table.Add (_('Admin Username'), CTK.TextCfg ('%s!admin!username'%(PREFIX), False, {'class':'noauto'}), _(NOTE_ADMIN_USER))
admin_table.Add (_('Admin Password'), CTK.TextCfg ('%s!admin!password'%(PREFIX), False, {'class':'noauto'}), _(NOTE_ADMIN_PASSWORD))
admin_box = CTK.Box ({'style':'display:none;'})
admin_box += admin_table
check_auth.bind ('change',
"if ($(this).find(':checked').size() <= 0) {%s} else {%s}"
%(admin_box.JS_to_hide(), admin_box.JS_to_show()))
submit = CTK.Submitter (URL_APPLY)
submit += table
submit += admin_box
cont += CTK.RawHTML ("<h2>%s</h2>" %(_("Purge of Cache Objects")))
cont += submit
# Global Submit
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Hidden('final', '1')
cont += submit
cont += CTK.DruidButtonsPanel_PrevCreate_Auto()
return cont.Render().toStr()
class Welcome:
def __call__ (self):
cont = CTK.Container()
cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_WELCOME_H1)))
cont += Wizard.Icon ('flcache', {'class': 'wizard-descr'})
box = CTK.Box ({'class': 'wizard-welcome'})
box += CTK.RawHTML ('<p>%s</p>' %(_(NOTE_WELCOME_P1)))
box += CTK.RawHTML ('<p>%s</p>' %(_(NOTE_WELCOME_P2)))
cont += box
# Send the VServer num
tmp = re.findall (r'^/wizard/vserver/(\d+)/', CTK.request.url)
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Hidden('%s!vsrv_num'%(PREFIX), tmp[0])
cont += submit
cont += CTK.DruidButtonsPanel_Next_Auto()
return cont.Render().toStr()
# Rule
CTK.publish ('^/wizard/vserver/(\d+)/flcache$', Welcome)
CTK.publish ('^/wizard/vserver/(\d+)/flcache/2', Create)
CTK.publish (r'^%s'%(URL_APPLY), Commit, method="POST")
|
zirou30/python_student | 65.py | Python | gpl-3.0 | 191 | 0.010695 | ''' |
hacking = 'Pentest'
hacking.lower() == 'pentest'
Out[19]: True
Retornou True pq a função built-in do python lower(),
converteu toda a string atribuída | em hacking para
minúscula.
''' |
alanmcruickshank/superset-dev | superset/models/annotations.py | Python | apache-2.0 | 1,544 | 0 | """a collection of Annotation-related models"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from flask_appbuilder import Model
from sqlalchemy import (
Column, DateTime, ForeignKey, Index, Integer, String, Text,
)
from sqlalchemy.orm import relationship
from superset.models.helpers import AuditMixinNullable
class AnnotationLayer(Model, AuditMixinNullable):
"""A logical namespace for a set of annotations"""
__tablename__ = 'annotation_layer'
id = Column(Integer, primary_ | key=True)
name = Column(String(250))
descr = Column(Text)
def __repr__(self):
return self.name
class Annotation(Model, AuditMixinNullable):
"""Time-related annotation"""
__tablename__ = 'annotation'
id = Column(Integer, primary_key=True)
start_dttm = Column(DateTime)
end_dttm = Column(DateTime)
layer_id = Column(Integer, ForeignKey('annotation_layer.id'))
short_descr = Column(String(500))
long_ | descr = Column(Text)
layer = relationship(
AnnotationLayer,
backref='annotation')
__table_args__ = (
Index('ti_dag_state', layer_id, start_dttm, end_dttm),
)
@property
def data(self):
return {
'start_dttm': self.start_dttm,
'end_dttm': self.end_dttm,
'short_descr': self.short_descr,
'long_descr': self.long_descr,
'layer': self.layer.name if self.layer else None,
}
|
prairiesariea/wsync | src/syncwrap/Helper.py | Python | gpl-3.0 | 1,723 | 0.005223 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2015 Prairies Ariea
#
# This file is part of Files Sync.
#
# Files Sync is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later versi | on.
#
# Files Sync is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See th | e
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Files Sync. If not, see <http://www.gnu.org/licenses/>.
#
##
import inspect
import sys
import os
import re
class Helper:
def lineno(self):
return inspect.currentframe().f_back.f_lineno
@staticmethod
def os_path_relpath(*tArgs, **dArgs):
relativePath = os.path.relpath(*tArgs, **dArgs)
pivotPythonVersion = (2,7)
thisPythonVersion = (sys.version_info[0],sys.version_info[1])
if thisPythonVersion[0] <= pivotPythonVersion[0] \
and thisPythonVersion[1] < pivotPythonVersion[1]:
sys.stderr.write("WARNING:%s:%s: " % (__file__, Helper().lineno())
+ "Running with this Python version %s" \
% str(sys.version_info)
+ " is deprecated.\n")
relativePath = re.sub('^(\.\./)*', '', relativePath)
#relativePath = relativePath.replace('../', '')
return relativePath
###
##
#
if __name__ == '__main__':
pass
|
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/scipy/spatial/kdtree.py | Python | agpl-3.0 | 37,132 | 0.005521 | # Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from heapq import heappush, heappop
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y | : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p | == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(np.float)
self.mins = np.minimum(maxes,mins).astype(np.float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split :
Input.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# maxes = np.amax(data,axis=0)
|
robbiet480/home-assistant | homeassistant/components/isy994/light.py | Python | apache-2.0 | 4,198 | 0.000238 | """Support for ISY994 lights."""
from typing import Callable, Dict
from pyisy.constants import ISY_VALUE_UNKNOWN
from homeassistant.components.light import (
DOMAIN as LIGHT,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
_LOGGER,
CONF_RESTORE_LIGHT_STATE,
DOMAIN as ISY994_DOMAIN,
ISY994_NODES,
)
from .entity import ISYNodeEntity
from .helpers import migrate_old_unique_ids
from .services import async_setup_device_services, async_setup_light_services
ATTR_LAST_BRIGHTNESS = "last_brightness"
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 light platform."""
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
isy_options = entry.options
restore_light_state = isy_options.get(CONF_RESTORE_LIGHT_STATE, False)
devices = []
for node in hass_isy_data[ISY994_NODES][LIGHT]:
devices.append(ISYLightEntity(node, restore_light_state))
await migrate_old_unique_ids(hass, LIGHT, devices)
async_add_entities(devices)
async_setup_device_services(hass)
async_setup_light_services(hass)
class ISYLightEntity(ISYNodeEntity, LightEntity, RestoreEntity):
"""Representation of an ISY994 light device."""
def __init__(self, node, restore_light_state) -> None:
"""Initialize the ISY994 light device."""
super().__init__(node)
self._last_brightness = None
self._restore_light_state = restore_light_state
@property
def is_on(self) -> bool:
"""Get whether the ISY994 light is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return False
return int(self._node.status) != 0
@property
def brightness(self) -> float:
"""Get the brightness of the ISY994 light."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return int(self._node.status)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 light device."""
self._last_brightness = self.brightness
if not self._node.turn_off():
_LOGGER.debug("Unable to turn off light")
def on_update(self, event: object) -> None:
"""Save brightness in the update event from the ISY994 Node."""
if self._node.status not in (0, ISY_VALUE_UNKNOWN):
self._last_brightness = self._node.status
super().on_update(event)
# pylint: disable=arguments-differ
def turn_on(self, brightness=None, **kwargs) -> None:
"""Send the turn on command to the ISY994 light device."""
if self._restore_light_state and brightness is None and self._last_brightness:
brightness = self._last_brightness
if not self._node.turn_on(val=brightness):
_LOGGER.debug("Unable to turn on l | ight")
@property
def device_state_attributes(self) -> Dict:
"""Return the light attributes."""
attribs = super().device_state_attributes
attribs[ATTR_LAST_BRIGHTNESS] = self._last_brightness
return attribs
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_added_to_hass(self) -> None:
| """Restore last_brightness on restart."""
await super().async_added_to_hass()
self._last_brightness = self.brightness or 255
last_state = await self.async_get_last_state()
if not last_state:
return
if (
ATTR_LAST_BRIGHTNESS in last_state.attributes
and last_state.attributes[ATTR_LAST_BRIGHTNESS]
):
self._last_brightness = last_state.attributes[ATTR_LAST_BRIGHTNESS]
def set_on_level(self, value):
"""Set the ON Level for a device."""
self._node.set_on_level(value)
def set_ramp_rate(self, value):
"""Set the Ramp Rate for a device."""
self._node.set_ramp_rate(value)
|
gnocchixyz/gnocchi | gnocchi/storage/s3.py | Python | apache-2.0 | 9,452 | 0 | # -*- encoding: utf-8 -*-
#
# Copyright © 2016-2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
import tenacity
from gnocchi import carbonara
from gnocchi.common import s3
from gnocchi import storage
from gnocchi import utils
boto3 = s3.boto3
botocore = s3.botocore
OPTS = [
cfg.StrOpt('s3_endpoint_url',
help='S3 endpoint URL'),
cfg.StrOpt('s3_region_name',
default=os.getenv("AWS_DEFAULT_REGION"),
help='S3 region name'),
cfg.StrOpt('s3_access_key_id',
secret=True,
default=os.getenv("AWS_ACCESS_KEY_ID"),
help='S3 access key id'),
cfg.StrOpt('s3_secret_access_key',
secret=True,
default=os.getenv("AWS_SECRET_ACCESS_KEY"),
help='S3 secret access key'),
cfg.StrOpt('s3_bucket_prefix',
# Max bucket length is 63 and we use "-" as separator
# 63 - 1 - len(uuid) = 26
max_length=26,
default='gnocchi',
help='Prefix to namespace metric bucket.'),
cfg.FloatOpt('s3_check_consistency_timeout',
min=0,
default=60,
help="Maximum time to wait checking data consistency when "
"writing to S3. Set to 0 to disable data consistency "
"validation."),
cfg.IntOpt('s3_max_pool_connections',
min=1,
default=50,
help="The maximum number of connections to keep in a "
"connection pool."),
]
def retry_if_operationaborted(exception):
return (isinstance(exception, botocore.exceptions.ClientError)
and exception.response['Error'].get('Code') == "OperationAborted")
class S3Storage(storage.StorageDriver):
WRITE_FULL = True
_consistency_wait = tenacity.wait_exponential(multiplier=0.1)
def __init__(self, conf):
super(S3Storage, self).__init__(conf)
self.s3, self._region_name, self._bucket_prefix = (
s3.get_connection(conf)
)
self._bucket_name = '%s-aggregates' % self._bucket_prefix
if conf.s3_check_consistency_timeout > 0:
self._consistency_stop = tenacity.stop_after_delay(
conf.s3_check_consistency_timeout)
else:
self._consistency_stop = None
def __str__(self):
return "%s: %s" % (self.__class__.__name__, self._bucket_name)
def upgrade(self):
super(S3Storage, self).upgrade()
try:
s3.create_bucket(self.s3, self._bucket_name, self._region_name)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') != "BucketAlreadyExists":
raise
@staticmethod
def _object_name(split_key, aggregation, | version=3):
name = '%s_%s_%s' % (
aggregation,
utils.timespan_total_seconds(split_key.sampling),
split_key,
)
return name + '_v%s' % version if version else name
@staticmethod
def _prefix(metric):
return | str(metric.id) + '/'
def _put_object_safe(self, Bucket, Key, Body):
put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body)
if self._consistency_stop:
def _head():
return self.s3.head_object(Bucket=Bucket,
Key=Key, IfMatch=put['ETag'])
tenacity.Retrying(
retry=tenacity.retry_if_result(
lambda r: r['ETag'] != put['ETag']),
wait=self._consistency_wait,
stop=self._consistency_stop)(_head)
def _store_metric_splits_unbatched(self, metric, key, aggregation, data,
offset, version):
self._put_object_safe(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version),
Body=data)
def _delete_metric_splits_unbatched(self, metric, key, aggregation,
version=3):
self.s3.delete_object(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version))
def _delete_metric(self, metric):
bucket = self._bucket_name
response = {}
while response.get('IsTruncated', True):
if 'NextContinuationToken' in response:
kwargs = {
'ContinuationToken': response['NextContinuationToken']
}
else:
kwargs = {}
try:
response = self.s3.list_objects_v2(
Bucket=bucket, Prefix=self._prefix(metric), **kwargs)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "NoSuchKey":
# Maybe it never has been created (no measure)
return
raise
s3.bulk_delete(self.s3, bucket,
[c['Key'] for c in response.get('Contents', ())])
def _get_splits_unbatched(self, metric, key, aggregation, version=3):
try:
response = self.s3.get_object(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version))
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == 'NoSuchKey':
return
raise
return response['Body'].read()
def _metric_exists_p(self, metric, version):
unaggkey = self._build_unaggregated_timeserie_path(metric, version)
try:
self.s3.head_object(Bucket=self._bucket_name, Key=unaggkey)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "404":
return False
raise
return True
def _list_split_keys_unbatched(self, metric, aggregations, version=3):
bucket = self._bucket_name
keys = {}
for aggregation in aggregations:
keys[aggregation] = set()
response = {}
while response.get('IsTruncated', True):
if 'NextContinuationToken' in response:
kwargs = {
'ContinuationToken': response['NextContinuationToken']
}
else:
kwargs = {}
response = self.s3.list_objects_v2(
Bucket=bucket,
Prefix=self._prefix(metric) + '%s_%s' % (
aggregation.method,
utils.timespan_total_seconds(
aggregation.granularity),
),
**kwargs)
# If response is empty then check that the metric exists
contents = response.get('Contents', ())
if not contents and not self._metric_exists_p(metric, version):
raise storage.MetricDoesNotExist(metric)
for f in contents:
try:
if (self._version_check(f['Key'], version)):
meta = f['Key'].split('_')
keys[aggregation].add(carbonara.SplitKey(
utils.to_timestamp(meta[2]),
sampling=aggregation.granularity))
except (ValueError, IndexE |
ezequielpereira/Time-Line | libs/wx/tools/Editra/src/syntax/_postscript.py | Python | gpl-3.0 | 9,149 | 0.002514 | ###############################################################################
# Name: postscript.py #
# Purpose: Define Postscript syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
########################################################## | #####################
"""
FILE: postscript.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for PostScript. (case sensitive)
@todo: l3 keywords and ghostscript
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _postscript.py 63834 2010-04-03 06:04:33Z CJP $"
__revision__ = "$Revision: 63834 $"
#---------------- | -------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
# PS Level 1 Operators
PS_L1 = (0, "$error = == FontDirectory StandardEncoding UserObjects abs add "
"aload anchorsearch and arc arcn arcto array ashow astore atan "
"awidthshow begin bind bitshift bytesavailable cachestatus ceiling "
"charpath clear cleardictstack cleartomark clip clippath closefile "
"closepath concat concatmatrix copy copypage cos count "
"countdictstack countexecstack counttomark currentcmykcolor "
"currentcolorspace currentdash currentdict currentfile currentflat "
"currentfont currentgray currenthsbcolor currentlinecap "
"currentlinejoin currentlinewidth currentmatrix currentmiterlimit "
"currentpagedevice currentpoint currentrgbcolor currentscreen "
"currenttransfer cvi cvlit cvn cvr cvrs cvs cvx def defaultmatrix "
"definefont dict dictstack div dtransform dup echo end eoclip "
"eofill eq erasepage errordict exch exec execstack executeonly "
"executive exit exp false file fill findfont flattenpath floor "
"flush flushfile for forall ge get getinterval grestore "
"grestoreall gsave gt idetmatrix idiv idtransform if ifelse image "
"imagemask index initclip initgraphics initmatrix inustroke "
"invertmatrix itransform known kshow le length lineto ln load log "
"loop lt makefont mark matrix maxlength mod moveto mul ne neg "
"newpath noaccess nor not null nulldevice or pathbbox pathforall "
"pop print prompt pstack put putinterval quit rand rcheck rcurveto "
"read readhexstring readline readonly readstring rectstroke repeat "
"resetfile restore reversepath rlineto rmoveto roll rotate round "
"rrand run save scale scalefont search setblackgeneration "
"setcachedevice setcachelimit setcharwidth setcolorscreen "
"setcolortransfer setdash setflat setfont setgray sethsbcolor "
"setlinecap setlinejoin setlinewidth setmatrix setmiterlimit "
"setpagedevice setrgbcolor setscreen settransfer setvmthreshold "
"show showpage sin sqrt srand stack start status statusdict stop "
"stopped store string stringwidth stroke strokepath sub systemdict "
"token token transform translate true truncate type ueofill "
"undefineresource userdict usertime version vmstatus wcheck where "
"widthshow write writehexstring writestring xcheck xor")
# PS Level 2 Operators
PS_L2 = (1, "GlobalFontDirectory ISOLatin1Encoding SharedFontDirectory "
"UserObject arct colorimage cshow currentblackgeneration "
"currentcacheparams currentcmykcolor currentcolor "
"currentcolorrendering currentcolorscreen currentcolorspace "
"currentcolortransfer currentdevparams currentglobal currentgstate "
"currenthalftone currentobjectformat currentoverprint "
"currentpacking currentpagedevice currentshared "
"currentstrokeadjust currentsystemparams currentundercolorremoval "
"currentuserparams defineresource defineuserobject deletefile "
"execform execuserobject filenameforall fileposition filter "
"findencoding findresource gcheck globaldict glyphshow gstate "
"ineofill infill instroke inueofill inufill inustroke "
"languagelevel makepattern packedarray printobject product "
"realtime rectclip rectfill rectstroke renamefile resourceforall "
"resourcestatus revision rootfont scheck selectfont serialnumber "
"setbbox setblackgeneration setcachedevice2 setcacheparams "
"setcmykcolor setcolor setcolorrendering setcolorscreen "
"setcolorspace setcolortranfer setdevparams setfileposition "
"setglobal setgstate sethalftone setobjectformat setoverprint "
"setpacking setpagedevice setpattern setshared setstrokeadjust "
"setsystemparams setucacheparams setundercolorremoval "
"setuserparams setvmthreshold shareddict startjob uappend ucache "
"ucachestatus ueofill ufill undef undefinefont undefineresource "
"undefineuserobject upath ustroke ustrokepath vmreclaim "
"writeobject xshow xyshow yshow")
# PS 3 Operators
PS_L3 = (2, "cliprestore clipsave composefont currentsmoothness "
"findcolorrendering setsmoothness shfill")
# RIP-specific operators
RIP_OP = (3, ".begintransparencygroup .begintransparencymask .bytestring "
".charboxpath .currentaccuratecurves .currentblendmode "
".currentcurvejoin .currentdashadapt .currentdotlength "
".currentfilladjust2 .currentlimitclamp .currentopacityalpha "
".currentoverprintmode .currentrasterop .currentshapealpha "
".currentsourcetransparent .currenttextknockout "
".currenttexturetransparent .dashpath .dicttomark "
".discardtransparencygroup .discardtransparencymask "
".endtransparencygroup .endtransparencymask .execn .filename "
".filename .fileposition .forceput .forceundef .forgetsave "
".getbitsrect .getdevice .inittransparencymask .knownget "
".locksafe .makeoperator .namestring .oserrno .oserrorstring "
".peekstring .rectappend .runandhide .setaccuratecurves ."
"setblendmode .setcurvejoin .setdashadapt .setdebug "
".setdefaultmatrix .setdotlength .setfilladjust2 .setlimitclamp "
".setmaxlength .setopacityalpha .setoverprintmode .setrasterop "
".setsafe .setshapealpha .setsourcetransparent .settextknockout "
".settexturetransparent .stringbreak .stringmatch .tempfile "
".type1decrypt .type1encrypt .type1execchar .unread arccos arcsin "
"copydevice copyscanlines currentdevice finddevice findlibfile "
"findprotodevice flushpage getdeviceprops getenv makeimagedevice "
"makewordimagedevice max min putdeviceprops setdevice")
# User Defined Operators
USER_DEF = (4, "")
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (stc.STC_PS_DEFAULT, 'default_style'),
(stc.STC_PS_BADSTRINGCHAR, 'unknown_style'),
(stc.STC_PS_BASE85STRING, 'string_style'),
(stc.STC_PS_COMMENT, 'comment_style'),
(stc.STC_PS_DSC_COMMENT, 'comment_style'),
(stc.STC_PS_DSC_VALUE, 'comment_style'), # STYLE ME
(stc.STC_PS_HEXSTRING, 'number_style'),
(stc.STC_PS_IMMEVAL, 'comment_style'), # STYLE ME
(stc.STC_PS_KEYWORD, 'class_style'),
(stc.STC_PS_LITERAL, 'scalar2_style'),
(stc.STC_PS_NAME, 'keyword_style'),
(stc.STC_PS_NUMBER, 'number_style'),
(stc.STC_PS_PAREN_ARRAY, 'default_style'), # STYLE ME
(stc.STC_ |
plotly/plotly.py | packages/python/plotly/plotly/validators/scatterpolar/marker/colorbar/_showexponent.py | Python | mit | 554 | 0 | impor | t _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showexponent",
parent_name="scatterpolar.marker.colorbar",
**kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "la | st", "none"]),
**kwargs
)
|
nuclear-wizard/moose | tutorials/darcy_thermo_mech/step07_adaptivity/problems/step7abc.py | Python | lgpl-2.1 | 3,210 | 0.011838 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import argparse
import vtk
import chigger
EDGE_COLOR = [0.5]*3
def frames():
"""Render frames"""
camera = vtk.vtkCamera()
camera.SetViewUp(0.0000000000, 1.0000000000, 0.0000000000)
camera.SetPosition(0.1520000000, 0.0128500000, 0.1198046424)
camera.SetFocalPoint(0.1520000000, 0.0128500000, 0.0000000000)
reader_a = chigger.exodus.ExodusReader('step7a_coarse_out.e')
reader_b = chigger.exodus.ExodusReader('step7b_fine_out.e')
reader_c = chigger.exodus.ExodusReader('step7c_adapt_out.e')
temp_a = chigger.exodus.ExodusResult(reader_a, camera=camera, variable='temperature',
viewport=[0,0,0.9,0.333],
edges=True, edge_color=EDGE_COLOR,
range=[300, 350], cmap='viridis')
temp_b = chigger.exodus.ExodusResult(reader_b, camera=camera, variable='temperature',
viewport=[0,0.333,0.9,0.666],
edges=True, edge_color=EDGE_COLOR,
range=[300, 350], cmap='viridis')
temp_c = chigger.exodus.ExodusResult(reader_c, camera=camera, variable='temperature',
viewport=[0,0.666,0.9,1],
edges=True, edge_color=EDGE_COLOR,
| range=[300, 350], cmap='viridis')
cbar = chigger.exodus.ExodusColorBar(temp_a,
viewport=[0.9,0,1,1],
length=0.8,
width=0.3,
colorbar_origin=[0.1, 0.1])
| cbar.setOptions('primary', title='Temperature (K)', font_size=28, font_color=[0,0,0])
time = chigger.annotations.TextAnnotation(position=[0.45,0.3], font_size=48, text_color=[0,0,0],
viewport=[0,0,1,1],
justification='center')
window = chigger.RenderWindow(temp_a, temp_b, temp_c, cbar, time, size=[1920, 1080],
background=[1,1,1],
motion_factor=0.24)
for i, t in enumerate(reader_a.getTimes()):
reader_a.setOptions(timestep=i)
reader_b.setOptions(timestep=i)
reader_c.setOptions(timestep=i)
time.setOptions(text='Time = {:.1f} sec.'.format(t))
filename = 'output/step07abc_{:05d}.png'.format(i)
window.write(filename)
window.start()
def movie():
chigger.utils.img2mov('output/step07abc_*.png', 'step07abc_result.mp4',
duration=20, num_threads=6)
if __name__ == '__main__':
if not os.path.isdir('output'):
os.mkdir('output')
frames()
movie()
|
BigEgg/LeetCode | Python/LeetCode/_051_100/_058_LengthOfLastWord.py | Python | mit | 190 | 0.005263 | class Solution:
| def lengthOfLastWord(self, s: str) -> int:
if not s:
retur | n 0
l = s.split()
if not l:
return 0
return len(l[-1])
|
joshbohde/scikit-learn | sklearn/linear_model/logistic.py | Python | bsd-3-clause | 4,271 | 0.001639 | import numpy as np
from ..base import ClassifierMixin
from ..linear_model.base import CoefSelectTransformerMixin
from ..svm.base import BaseLibLinear
from ..svm import liblinear
class LogisticRegression(BaseLibLinear, ClassifierMixin,
CoefSelectTransformerMixin):
"""
Logistic Regression.
Implements L1 and L2 regularized logistic regression.
Parameters
----------
penalty : string, 'l1' or 'l2'
Used to specify the norm used in the penalization
dual : boolean
Dual or primal formulation. Dual formulation is only
implemented for l2 penalty.
C : float
Specifies the strength of the regularization. The smaller it is
the bigger in the regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function
intercept_scaling : float, default: 1
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
tol: float, optional
tolerance for stopping criteria
Attributes
----------
`coef_` : array, shape = [n_classes-1, n_features]
Coefficient of the features in the decision function.
`intercept_` : array, shape = [n_classes-1]
intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
See also
--------
LinearSVC
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1):
super(LogisticRegression, self).__init__(penalty=penalty,
dual=dual, loss='lr', tol=tol, C=C,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling)
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered by arithmetical
order.
"""
X = np.asanyarray(X, dtype=np.float64, order='C')
probas = liblinear.predict_prob_wrap(X, self.raw_coef_,
self._get_solver_type(),
self.tol, self.C,
self.class_weight_label,
self.class_weight, self.label_,
| self._get_bias())
return probas[:, np.argsort(self.label_)]
def predict_log_proba(self, X):
"""
Log of Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X : array-like, shape = [n_samples, n_classes]
Returns th | e log-probabilities of the sample for each class in
the model, where classes are ordered by arithmetical
order.
"""
return np.log(self.predict_proba(X))
|
OxPython/Python_bytes_to_string | src/bytes_to_string.py | Python | epl-1.0 | 545 | 0.009208 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Jul 16, 2014
@author: anroco
In Python, how to convert bytes to string?
En Python, | ¿Cómo convertir bytes a string?
'''
#create a bytes object
b = b'El ni\xc3\xb1o come camar\xc3\xb3n'
print(b)
#return an encoded version of 'b' as a string object. Default encoding
#is 'utf-8'.
s = b.decode()
print(type(s))
print(s)
#create a bytes object encoded using 'cp855 | '
b = b'\xd8\xe1\xb7\xeb\xa8\xe5 \xd2\xb7\xe1'
#return a string using decode 'cp855'
s = b.decode('cp855')
print(s)
|
cloudevents/sdk-python | cloudevents/sdk/event/v03.py | Python | apache-2.0 | 4,218 | 0 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cloudevents.sdk.event import base, opt
class Event(base.BaseEvent):
_ce_required_fields = {"id", "source", "type", "specversion"}
_ce_optional_fields = {
"datacontentencoding",
"datacontenttype",
"schemaurl",
"subject",
"time",
}
def __init__(self):
self.ce__specversion = opt.Option("specversion", "0.3", True)
self.ce__id = opt.Option("id", None, True)
self.ce__source = opt.Option("source", None, True)
self.ce__type = opt.Option("type", None, True)
self.ce__datacontenttype = opt.Option("datacontenttype", None, False)
self.ce__datacontentencoding = opt.Option(
"datacontentencoding", None, False
)
self.ce__subject = opt.Option("subject", None, False)
self.ce__time = opt.Option("time", None, False)
self.ce__schemaurl = opt.Option("schemaurl", None, False)
self.ce__data = opt.Option("data", None, False)
self.ce__extensions = opt.Option("extensions", dict(), False)
def CloudEventVersion(self) -> str:
return self.ce__specversion.get()
def EventType(self) -> str:
return self.ce__type.get()
def Source(self) -> str:
return self.ce__source.get()
def EventID(self) -> str:
return self.ce__id.get()
def EventTime(self) -> str:
return self.ce__time.get()
def Subject(self) -> str:
return self.ce__subject.get()
def SchemaURL(self) -> str:
return self.ce__schemaurl.get()
def Data(self) -> object:
return self.ce__data.get()
def Extensions(self) -> dict:
return self.ce__extensions.get()
def ContentType(self) -> str:
return self.ce__datacontenttype.get()
def ContentEncoding(self) -> str:
return self.ce__datacontentencoding.get()
def SetEventType(self, eventType: str) -> base.BaseEvent:
self.Set("type", eventType)
return self
def SetSource(self, source: str) -> base.BaseEvent:
self.Set("source", source)
return self
def SetEventID(self, eventID: str) -> base.BaseEvent:
self.Set("id", eventID)
return self
def SetEventTime(self, eventTime: str) -> base.BaseEvent:
self.Set("time", eventTime)
return self
def SetSubject(self, subject: str) -> base.BaseEvent:
self.Set("subject", subject)
return self
def SetSchemaURL(self, schemaURL: str) -> base.BaseEvent:
self.Set("schemaurl", schemaURL)
return self
def SetData(self, data: object) -> base.BaseEvent:
self.Set("data", data)
return self
def SetExtensions(self, extensions: dict) -> base.BaseEve | nt:
self.Set("extensions", extensions)
return self
def SetContentType(self, contentType: str) -> base.BaseEven | t:
self.Set("datacontenttype", contentType)
return self
def SetContentEncoding(self, contentEncoding: str) -> base.BaseEvent:
self.Set("datacontentencoding", contentEncoding)
return self
@property
def datacontentencoding(self):
return self.ContentEncoding()
@datacontentencoding.setter
def datacontentencoding(self, value: str):
self.SetContentEncoding(value)
@property
def subject(self) -> str:
return self.Subject()
@subject.setter
def subject(self, value: str):
self.SetSubject(value)
@property
def schema_url(self) -> str:
return self.SchemaURL()
@schema_url.setter
def schema_url(self, value: str):
self.SetSchemaURL(value)
|
yakxxx/memek | crawler/api_client.py | Python | mit | 2,755 | 0.013067 | #coding: utf-8
import requests as req
from conf import *
import logging
import re
from datetime import datetime
class Api(object):
def get_promoted(self, page=1):
promoted = self._get('links/promoted', api_params = self._build_api_params({'page': page}))
self._clear_promoted(promoted)
return promoted
def get_comments(self, link_id):
comments = self._get('link/comments/'+link_id, api_params = self._build_api_params())
self._clear_comments(comments)
return comments
def _get(self, url, method_params=[], api_params={}):
full_url = API_URL + url + '/' + self._make_method_params_str(method_params) \
+ '/' + self._make_api_params_str(api_params)
ret = req.get(full_url)
if not self._is_resp_ok(ret):
logging.error('BAD RESPONSE: full_url: ' + full_url + "\n method_params: " + unicode(method_params) \
+ "\n api_params: " + unicode(api_params)+ "\n response: "+ unicode(ret.content))
raise ApiError('Bad Response')
return ret.json
def _make_method_params_str(self, method_params):
if method_params:
return '/'.join([unicode(param) for param in method_params])
else:
return ''
def _make_api_params_str(self, api_params):
return ','.join([unicode(k)+','+unicode(v) for k,v in api_params.items()])
def _is_resp_ok(self, resp):
if resp.status_code == 200:
if type(resp.json) == dict:
return not resp.json.get('error', False)
else:
return True
else:
return False
def _build_api_params(self, custom_params = {}):
default = { 'appkey' : APP_KEY,
'output' : 'clear'
}
default.update(custom_params)
return default
@classmethod
def parse_link_id_from_url(cls, url):
match = re.search(r'/(link|ramka)/([0-9]*)/', url)
if match:
return match.group(2)
else:
raise WrongData('url not containing link_id in proper format, %s' % url)
def _clear_promoted(self, promoted):
for p in promoted:
p['date'] = datetime.strptime(p['date'], '%Y-% | m-%d %H:%M:%S')
p['article_id'] = p['id']
del p['id']
return promoted
def _clear_comments(self, comments):
for c in comments:
c['date'] = datetime.strptime(c['date'], '%Y-%m-%d %H:%M:%S')
c['comment_id'] = c['id']
del c['id']
return comments
clas | s ApiError(Exception):
pass
class WrongData(Exception):
pass |
vasylbo/aiohttp | tests/test_streams.py | Python | apache-2.0 | 21,126 | 0 | """Tests for streams.py"""
import asyncio
import unittest
from unittest import mock
from aiohttp import streams
from aiohttp import test_utils
class TestStreamReader(unittest.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self, *args, **kwargs):
return streams.StreamReader(loop=self.loop, *args, **kwargs)
def test_create_waiter(self):
stream = self._make_one()
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, stream._create_waiter, 'test')
@mock.patch('aiohttp.streams.asyncio')
def test_ctor_global_loop(self, m_asyncio):
stream = streams.StreamReader()
self.assertIs(stream._loop, m_asyncio.get_event_loop.return_value)
def test_at_eof(self):
stream = self._make_one()
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_wait_eof(self):
stream = self._make_one()
wait_task = asyncio.Task(stream.wait_eof(), loop=self.loop)
def cb():
yield from asyncio.sleep(0.1, loop=self.loop)
stream.feed_eof()
asyncio.Task(cb(), loop=self.loop)
self.loop.run_until_complete(wait_task)
self.assertTrue(stream.is_eof())
self.assertIsNone(stream._eof_waiter)
def test_wait_eof_eof(self):
stream = self._make_one()
stream.feed_eof()
wait_task = asyncio.Task(stream.wait_eof(), loop=self.loop)
self.loop.run_until_complete(wait_task)
self.assertTrue(stream.is_eof())
def test_feed_empty_data(self):
stream = self._make_one()
stream.feed_data(b'')
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_feed_nonempty_data(self):
stream = self._make_one()
stream.feed_data(self.DATA)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(self.DATA, data)
def test_read_zero(self):
# Read zero bytes.
stream = self._make_one()
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0)) |
self.assertEqual(b'', data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(self.DATA, data)
def test_read(self):
# Read bytes.
stream = self._make_one()
read_task = asyncio.Task(stream.read(30), loop=self.loop)
def cb( | ):
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = self._make_one()
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line2', data)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = self._make_one()
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
data = self.loop.run_until_complete(stream.read())
self.assertIs(data, streams.EOF_MARKER)
@mock.patch('aiohttp.streams.internal_logger')
def test_read_eof_infinit(self, internal_logger):
# Read bytes.
stream = self._make_one()
stream.feed_eof()
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.assertTrue(internal_logger.warning.called)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = self._make_one()
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_read_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = self._make_one()
stream.feed_data(b'chunk1 ')
read_task = asyncio.Task(stream.readline(), loop=self.loop)
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b' chunk4', data)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = self._make_one(limit=3)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'line2\n', data)
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = self._make_one(limit=7)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
stream = self._make_one(limit=7)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'chunk3\n', data)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = self._make_one()
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
stream.feed_eof()
data = self.loop.run_u |
climberwb/video-api | src/comments/serializers.py | Python | mit | 2,962 | 0.01418 | from django.contrib.auth import get_user_model
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework import routers, serializers, viewsets, permissions
from rest_framework_jwt.authe | ntication import JSONWebTokenAuthentication
from rest_framework.reverse import reverse
from .models import Comment
# from accounts.models import MyUser
User = get_user_model()
class CommentVideoUrlHyperlinkedIdentityField(serializers.HyperlinkedIdentityField):
def get_url(self, obj,view_name,request,format):
kwargs = {
"cat_slug":obj.video.category.slug,
"vi | d_slug":obj.video.slug
}
# print(reverse(view_name,kwargs=kwargs))
return reverse(view_name,kwargs=kwargs,request=request,format=format)
class CommentUpdateSerializer(serializers.ModelSerializer):
user = serializers.CharField(source='user.username',read_only=True)
class Meta:
model = Comment
fields = [
'id',
'user',
'text'
]
class CommentCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = [
'text',
'user',
'video',
'parent'
]
class ChildCommentSerializer(serializers.HyperlinkedModelSerializer):
# user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
user = serializers.CharField(source='user.username',read_only=True)
class Meta:
model = Comment
fields = [
'id',
"user",
'text'
]
class CommentSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField("comment_detail_api",lookup_field="pk")
# user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
video = CommentVideoUrlHyperlinkedIdentityField("video_detail_api")
user = serializers.CharField(source='user.username',read_only=True)
children = serializers.SerializerMethodField(read_only=True)
def get_children(self,instance):
# queryset = instance.get_children()
queryset = Comment.objects.filter(parent__pk =instance.pk)
serializer = ChildCommentSerializer(queryset,context={"request":instance}, many=True)
return serializer.data
class Meta:
model = Comment
fields = [
"url",
'id',
"children",
# "parent",
"user",
'video',
'text'
]
class CommentViewSet(viewsets.ModelViewSet):
authentication_classes = [SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication]
permission_classes = [permissions.IsAuthenticated,]
queryset = Comment.objects.all()
serializer_class = CommentSerializer
|
credativ/pulp | server/test/unit/server/managers/repo/test_sync.py | Python | gpl-2.0 | 28,058 | 0.002281 | import datetime
import signal
import mock
from .... import base
from pulp.common import dateutils, constants
from pulp.common.tags import resource_tag, RESOURCE_REPOSITORY_TYPE, action_tag
from pulp.devel import mock_plugins
from pulp.plugins.model import SyncReport
from pulp.server.async import tasks
from pulp.server.async.tasks import TaskResult
from pulp.server.db.model.repository import Repo, RepoImporter, RepoSyncResult
from pulp.server.exceptions import PulpExecutionException, InvalidValue
import pulp.server.managers.factory as manager_factory
import pulp.server.managers.repo.cud as repo_manager
import pulp.server.managers.repo.importer as repo_importer_manager
import pulp.server.managers.repo.publish as repo_publish_manager
import pulp.server.managers.repo.sync as repo_sync_manager
class MockRepoPublishManager:
# Last call state
repo_id = None
base_progress_report = None
# Call behavior
raise_error = False
def validate_config(self, repo_data, distributor_config):
return True
def auto_publish_for_repo(self, repo_id, base_progress_report):
MockRepoPublishManager.repo_id = repo_id
MockRepoPublishManager.base_progress_report = base_progress_report
if M | ockRepoPublishManager.raise_error:
raise repo_publish_manager.PulpExecutionException(repo_i | d)
@classmethod
def reset(cls):
MockRepoPublishManager.repo_id = None
MockRepoPublishManager.raise_error = False
class RepoSyncManagerTests(base.PulpServerTests):
def setUp(self):
super(RepoSyncManagerTests, self).setUp()
mock_plugins.install()
# Create the manager instances for testing
self.repo_manager = repo_manager.RepoManager()
self.importer_manager = repo_importer_manager.RepoImporterManager()
self.sync_manager = repo_sync_manager.RepoSyncManager()
def tearDown(self):
super(RepoSyncManagerTests, self).tearDown()
mock_plugins.reset()
# Reset the manager factory
manager_factory.reset()
def clean(self):
super(RepoSyncManagerTests, self).clean()
Repo.get_collection().remove()
RepoImporter.get_collection().remove()
RepoSyncResult.get_collection().remove()
# Reset the state of the mock's tracker variables
MockRepoPublishManager.reset()
@mock.patch('pulp.server.managers.repo.sync.sync.apply_async_with_reservation')
def test_queue_sync(self, mock_sync_task):
repo_id = 'foo'
overrides = {'baz': 1}
self.sync_manager.queue_sync_with_auto_publish(repo_id, overrides)
kwargs = {
'repo_id': repo_id,
'sync_config_override': overrides
}
tags = [resource_tag(RESOURCE_REPOSITORY_TYPE, repo_id), action_tag('sync')]
mock_sync_task.assert_called_with(RESOURCE_REPOSITORY_TYPE, repo_id, tags=tags,
kwargs=kwargs)
@mock.patch('pulp.server.managers.repo._common.get_working_directory',
return_value="/var/cache/pulp/mock_worker/mock_task_id")
@mock.patch('pulp.server.managers.repo.publish.RepoPublishManager.queue_publish')
@mock.patch('pulp.server.managers.repo.publish.RepoPublishManager.auto_distributors')
@mock.patch('pulp.server.managers.event.fire.EventFireManager.fire_repo_sync_started')
@mock.patch('pulp.server.managers.event.fire.EventFireManager.fire_repo_sync_finished')
def test_sync(self, mock_finished, mock_started, mock_auto_distributors, mock_queue_publish,
mock_get_working_directory):
"""
Tests sync under normal conditions where everything is configured
correctly. No importer config is specified.
"""
# Setup
sync_config = {'bruce': 'hulk', 'tony': 'ironman'}
self.repo_manager.create_repo('repo-1')
self.importer_manager.set_importer('repo-1', 'mock-importer', sync_config)
mock_auto_distributors.return_value = [{'id': 'my_distributor'}]
mock_queue_publish.return_value.task_id = 'abc123'
# Test
report = self.sync_manager.sync('repo-1', sync_config_override=None)
# Verify
repo = Repo.get_collection().find_one({'id': 'repo-1'})
repo_importer = RepoImporter.get_collection().find_one({'repo_id': 'repo-1',
'id': 'mock-importer'})
# Database
self.assertTrue(repo_importer['last_sync'] is not None)
self.assertTrue(assert_last_sync_time(repo_importer['last_sync']))
# Call into the Importer
sync_args = mock_plugins.MOCK_IMPORTER.sync_repo.call_args[0]
self.assertEqual(repo['id'], sync_args[0].id)
self.assertTrue(sync_args[1] is not None)
self.assertEqual({}, sync_args[2].plugin_config)
self.assertEqual(sync_config, sync_args[2].repo_plugin_config)
self.assertEqual({}, sync_args[2].override_config)
# History Entry
history = list(RepoSyncResult.get_collection().find({'repo_id': 'repo-1'}))
self.assertEqual(1, len(history))
self.assertEqual('repo-1', history[0]['repo_id'])
self.assertEqual(RepoSyncResult.RESULT_SUCCESS, history[0]['result'])
self.assertEqual('mock-importer', history[0]['importer_id'])
self.assertEqual('mock-importer', history[0]['importer_type_id'])
self.assertTrue(history[0]['started'] is not None)
self.assertTrue(history[0]['completed'] is not None)
self.assertEqual(10, history[0]['added_count'])
self.assertEqual(1, history[0]['removed_count'])
self.assertTrue(history[0]['summary'] is not None)
self.assertTrue(history[0]['details'] is not None)
self.assertTrue(history[0]['error_message'] is None)
self.assertTrue(history[0]['exception'] is None)
self.assertTrue(history[0]['traceback'] is None)
self.assertEqual(1, mock_started.call_count)
self.assertEqual('repo-1', mock_started.call_args[0][0])
self.assertEqual(1, mock_finished.call_count)
self.assertEqual('repo-1', mock_finished.call_args[0][0]['repo_id'])
# auto publish tests
mock_auto_distributors.assert_called_once_with('repo-1')
mock_queue_publish.assert_called_once_with('repo-1', 'my_distributor')
self.assertTrue(isinstance(report, TaskResult))
self.assertEqual(report.spawned_tasks, [{'task_id': 'abc123'}])
@mock.patch('pulp.server.managers.repo._common.get_working_directory',
return_value="/var/cache/pulp/mock_worker/mock_task_id")
def test_sync_with_graceful_fail(self, mock_get_working_directory):
# Setup
sync_config = {'bruce': 'hulk', 'tony': 'ironman'}
self.repo_manager.create_repo('repo-1')
self.importer_manager.set_importer('repo-1', 'mock-importer', sync_config)
mock_plugins.MOCK_IMPORTER.sync_repo.return_value = SyncReport(
False, 10, 5, 1, 'Summary of the sync', 'Details of the sync')
# Test
self.assertRaises(PulpExecutionException, self.sync_manager.sync, 'repo-1')
# Verify
history = list(RepoSyncResult.get_collection().find({'repo_id': 'repo-1'}))
self.assertEqual(1, len(history))
self.assertEqual('repo-1', history[0]['repo_id'])
self.assertEqual(RepoSyncResult.RESULT_FAILED, history[0]['result'])
self.assertEqual('mock-importer', history[0]['importer_id'])
self.assertEqual('mock-importer', history[0]['importer_type_id'])
self.assertTrue(history[0]['started'] is not None)
self.assertTrue(history[0]['completed'] is not None)
# Cleanup
mock_plugins.reset()
@mock.patch('pulp.server.managers.repo._common.get_working_directory',
return_value="/var/cache/pulp/mock_worker/mock_task_id")
def test_sync_with_sync_config_override(self, mock_get_working_directory):
"""
Tests a sync when passing in an individual config of override options.
"""
# Setup
importer_config = {'thor': 'thor'}
|
ubc/compair | compair/models/lti_models/lti_user.py | Python | gpl-3.0 | 9,485 | 0.004323 | # sqlalchemy
import re
from sqlalchemy import func, select, and_, or_
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy_enum34 import EnumType
from flask import current_app
from . import *
from compair.core import db, display_name_generator
class LTIUser(DefaultTableMixin, UUIDMixin, WriteTrackingMixin):
__tablename__ = 'lti_user'
# table columns
lti_consumer_id = db.Column(db.Integer, db.ForeignKey("lti_consumer.id", ondelete="CASCADE"),
nullable=False)
user_id = db.Column(db.String(191), nullable=False)
lis_person_name_given = db.Column(db.String(255), nullable=True)
lis_person_name_family = db.Column(db.String(255), nullable=True)
lis_person_name_full = db.Column(db.String(255), nullable=True)
lis_person_contact_email_primary = db.Column(db.String(255), nullable=True)
global_unique_identifier = db.Column(db.String(255), nullable=True)
compair_user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"),
nullable=True)
system_role = db.Column(EnumType(SystemRole), nullable=False)
student_number = db.Column(db.String(255), nullable=True)
lis_person_sourcedid = db.Column(db.String(255), nullable=True)
# relationships
# compair_user via User Model
# lti_consumer via LTIConsumer Model
lti_memberships = db.relationship("LTIMembership", backref="lti_user", lazy="dynamic")
lti_user_resource_links = db.relationship("LTIUserResourceLink", backref="lti_user", lazy="dynamic")
# hybrid and other functions
lti_consumer_uuid = association_proxy('lti_consumer', 'uuid')
oauth_consumer_key = association_proxy('lti_consumer', 'oauth_consumer_key')
compair_user_uuid = association_proxy('compair_user', 'uuid')
def is_linked_to_user(self):
return self.compair_user_id != None
def generate_or_link_user_account(self):
from . import SystemRole, User
if self.compair_user_id == None and self.global_unique_identifier:
self.compair_user = User.query \
.filter_by(global_unique_identifier=self.global_unique_identifier) \
.one_or_none()
if not self.compair_user:
self.compair_user = User(
username=None,
password=None,
system_role=self.system_role,
firstname=self.lis_person_name_given,
lastname=self.lis_person_name_family,
email=self.lis_person_contact_email_primary,
global_unique_identifier=self.global_unique_identifier
)
if self.compair_user.system_role == SystemRole.student:
self.compair_user.student_number = self.student_number
# instructors can have their display names set to their full name by default
if self.compair_user.system_role != SystemRole.student and self.compair_user.fullname != None:
self.compair_user.displayname = self.compair_user.fullname
else:
self.compair_user.displayname = display_name_generator(self.compair_user.system_role.value)
db.session.commit()
@classmethod
def get_by_lti_consumer_id_and_user_id(cls, lti_consumer_id, user_id):
return LTIUser.query \
.filter_by(
lti_consumer_id=lti_consumer_id,
user_id=user_id
) \
.one_or_none()
@classmethod
def get_by_tool_provider(cls, lti_consumer, tool_provider):
from . import SystemRole
if tool_provider.user_id == None:
return None
lti_user = LTIUser.get_by_lti_consumer_id_and_user_id(
lti_consumer.id, tool_provider.user_id)
if not lti_user:
lti_user = LTIUser(
lti_consumer_id=lti_consumer.id,
user_id=tool_provider.user_id,
system_role=SystemRole.instructor \
if tool_provider.roles and any(
role.lower().find("instructor") >= 0 or
role.lower().find("faculty") >= 0 or
role.lower().find("staff") >= 0
for role in tool_provider.roles
) \
else SystemRole.student
)
db.session.add(lti_user)
lti_user.lis_person_name_given = tool_provider.lis_person_name_given
lti_user.lis_person_name_family = tool_provider.lis_person_name_family
lti_user.lis_person_name_full = tool_provider.lis_person_name_full
lti_user.handle_fullname_with_missing_first_and_last_name()
lti_user.lis_person_contact_email_primary = tool_provider.lis_person_contact_email_primary
lti_user.lis_person_sourcedid = tool_provider.lis_person_sourcedid
if lti_consumer.global_unique_identifier_param and lti_consumer.global_unique_identifier_param in tool_provider.launch_params:
lti_user.global_unique_identifier = tool_provider.launch_params[lti_consumer.global_unique_identifier_param]
if lti_consumer.custom_param_regex_sanitizer and lti_consumer.global_unique_identifier_param.startswith('custom_'):
regex = re.compile(lti_consumer.custom_param_regex_sanitizer)
lti_user.global_unique_identifier = regex.sub('', lti_user.global_unique_identifier)
if lti_user.global_unique_identifier == '':
lti_user.global_unique_identifier = None
else:
lti_user.global_unique_identifier = None
if lti_consumer.student_number_param and lti_consumer.student_number_param in tool_provider.launch_params:
lti_user.student_number = tool_provider.launch_params[lti_consumer.student_number_param]
if lti_consumer.custom_param_regex_sanitizer and lti_consumer.student_number_param.startswith('custom_'):
regex = re.compile(lti_consumer.custom_param_regex_sanitizer)
lti_user.student_number = regex.sub('', lti_user.student_number)
if lti_user.student_number == '':
lti_user.student_number = None
else:
lti_user.student_number = None
if not lti_user.is_linked_to_user() and lti_user.global_unique_identifier:
lti_user.generate_or_link_user_account()
db.session.commit()
return lti_user
@classmethod
def get_by_uuid_or_404(cls, model_uuid, joinedloads=[], title=None, message=None):
if not title:
title = "LTI User Unavailable"
if not message:
message = "Sorry, this LTI user was deleted or is no longer accessible."
return super(cls, cls).get_by_uuid_or_404(model_uuid, joinedloads, title, message)
# relationships
def update_user_profile(self):
if self.compair_user and self.compair_user.system_role == SystemRole.student:
# overwrite first/last name if student not allowed to change it
if not current_app.config.get('ALLOW_STUDENT_CHANGE_NAME'):
self.compair_user.firstname = self.lis_person_name_given
self.compair_user.lastname = self.lis_person_name_family
| # overwrite email if student not allowed to change it
if not current_app.config.get('ALLOW_STUDENT_CHANGE_EMAIL'):
self.compair_user.email = self.lis_person_contact_email_primary
# overwrite student number if student not allowed to change it and lti_consumer has a student_number_param
if not current_app.config.get('ALLOW_STUDENT_CHANGE_STUDENT_NUMBER') and self.lti_consumer. | student_number_param:
self.compair_user.student_number = self.student_number
def upgrade_system_role(self):
# upgrade system role is needed
if self.compair_user:
if self.compair_user.system_role == SystemRole.student and self.system_role in [SystemRole.instructor, SystemRole.sys_admin]:
self.compair_user.system_r |
m00nlight/hackerrank | algorithm/contests/Ad-Infinitum-10/A.py | Python | gpl-2.0 | 357 | 0.014006 | from __future__ import division
from | math import sqrt
def solve(s1, s2, q, L):
side = L * sqrt(2) - sqrt(2.0 * q)
return side / abs(s2 - s1)
if __name__ == '__main__':
L, s1, s2 = map(int, raw_input().strip().split()) |
Q = int(raw_input())
for _ in range(Q):
q = int(raw_input())
print '%.4lf' % solve(s1, s2, q, L) |
vileopratama/vitech | src/addons/auth_crypt/auth_crypt.py | Python | mit | 3,959 | 0.002273 | import logging
from passlib.context import CryptContext
import openerp
from openerp.osv import fields, osv
from openerp.addons.base.res import res_users
res_users.USER_PRIVATE_FIELDS.append('password_crypt')
_logger = logging.getLogger(__name__)
default_crypt_context = CryptContext(
# kdf which can be verified by the context. The default encryption kdf is
# the first of the list
['pbkdf2_sha512', 'md5_crypt'],
# deprecated algorithms are still verified as usual, but ``needs_update``
# will indicate that the stored hash should be replaced by a more recent
# algorithm. Passlib 1.6 supports an `auto` value which deprecates any
# algorithm but the default, but Ubuntu LTS only provides 1.5 so far.
deprecated=['md5_crypt'],
)
class res_users(osv.osv):
_inherit = "res.users"
def init(self, cr):
_logger.info("Hashing passwords, may be slow for databases with many users...")
cr.execute("SELECT id, password FROM res_users"
" WHERE password IS NOT NULL"
" AND password != ''")
for uid, pwd in cr.fetchall():
self._set_password(cr, openerp.SUPERUSER_ID, uid, pwd)
def set_pw(self, cr, uid, id, name, value, args, context):
if value:
self._set_password(cr, uid, id, value, context=context)
self.invalidate_cache(cr, uid, context=context)
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
return dict(cr.fetchall())
_columns = {
'password': fields.function(get_pw, fnct_inv=set_pw, type='char', string='Password', invisible=True, store=True),
'password_crypt': fields.char(string='Encrypted Password', invisible=True, copy=False),
}
def check_credentials(self, cr, uid, password):
# convert to base_crypt if needed
cr.execute('SELECT password, password_crypt FROM res_users WHERE id=%s AND active', (uid,))
encrypted = None
if cr.rowcount:
stored, encrypted = cr.fetchone()
if stored and not encrypted:
self._set_password(cr, uid, uid, stored)
self.invalidate_cache(cr, uid)
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
if encrypted:
valid_pass, replacement = self._crypt_context(cr, uid, uid)\
.verify_and_update(password, encrypted)
if replacement is not None:
self._set_encrypted_password(cr, uid, uid, replacement)
if valid_pass:
return
raise
def _set_password(self, cr, uid, id, password, context=None):
""" Encrypts then stores the provided plaintext password for the user
``id``
"""
encrypted = self._crypt_context(cr, uid, id, context=context).encrypt(password)
self._set_encrypted_password(cr, uid, id, encrypted, context=context)
def _set_encrypted_password(self, cr, uid, id, encrypted, context=None):
""" Store the provided encrypted password to the database, and clears
any plaintext password
:param uid: id of the current user
:param id: id of the user on which the password should be set
"""
cr.execute(
"UPDATE res_users SET password='', pas | sword_crypt=%s WHERE id=%s",
(encrypted, id))
def _crypt_context(self, cr, uid, id, context=None):
""" Passlib CryptConte | xt instance used to encrypt and verify
passwords. Can be overridden if technical, legal or political matters
require different kdfs than the provided default.
Requires a CryptContext as deprecation and upgrade notices are used
internally
"""
return default_crypt_context
|
piller-imre/exprail-python | exprail/token.py | Python | mit | 319 | 0 | """
Token class definition
"""
class Token(object):
""" | Represents a token with type and value"""
def __init__(self, type, value):
s | elf._type = type
self._value = value
@property
def type(self):
return self._type
@property
def value(self):
return self._value
|
UASLab/ImageAnalysis | scripts/explore/annotations.py | Python | mit | 11,987 | 0.004004 | import csv
import json
import numpy as np
import os
import pathlib
import navpy
from direct.showbase.ShowBase import ShowBase
from panda3d.core import CardMaker, LPoint3, NodePath, Texture, TransparencyAttrib
from direct.gui.DirectGui import *
from tkinter import *
class Annotations():
def __init__(self, render, surface, proj, ned_ref, tk_root):
self.render = render
self.surface = surface
self.proj = proj
self.ned_ref = ned_ref
self.tk_root = tk_root
self.icon = loader.loadTexture('explore/marker-icon-2x.png')
self.view_size = 100
self.id_prefix = "<edit me>"
self.next_id = 0
self.markers = []
self.nodes = []
self.load()
def ned2lla(self, n, e, d):
lla = navpy.ned2lla( [n, e, d],
self.ned_ref[0],
self.ned_ref[1],
self.ned_ref[2] )
# print(n, e, d, lla)
return lla
def add_marker(self, ned, comment, id):
marker = { "ned": ned, "comment": comment, "id": id }
self.markers.append(marker)
def add_marker_dict(self, m):
ned = navpy.lla2ned(m['lat_deg'], m['lon_deg'], m['alt_m'],
self.ned_ref[0], self.ned_ref[1], self.ned_ref[2])
if m['alt_m'] < 1.0:
# estimate surface elevation if needed
pos = np.array([ned[1], ned[0]]) # x, y order
norm = np.linalg.norm(pos)
if norm > 0:
v = pos / norm
# walk towards the center 1m at a time until we get onto
# the interpolation surface
while True:
z = self.surface.get_elevation(pos[0], pos[1])
print("pos:", pos, "v:", v, "z:", z)
if z < -1.0:
ned[2] = z
break
elif np.linalg.norm(pos) < 5:
# getting too close to the (ned) ned reference pt, failed
break
else:
pos -= v
print("ned updated:", ned)
if 'id' in m:
id = m['id']
if id >= self.next_id:
self.next_id = id + 1
else:
id = self.next_id
self.next_id += 1
self.add_marker(ned, m['comment'], id)
def load(self):
oldfile = os.path.join(self.proj.project_dir, 'annotations.json')
file = os.path.join(self.proj.analysis_dir, 'annotations.json')
if os.path.exists(oldfile):
print("Moving annotations file to new location...")
os.rename(oldfile, file)
oldcsv = os.path.join(self.proj.project_dir, 'annotations.csv')
newcsv = os.path.join(self.proj.analysis_dir, 'annotations.csv')
if os.path.exists(oldcsv):
os.rename(oldcsv, newcsv)
if os.path.exists(file):
print('Loading saved annotations:', file)
f = open(file, 'r')
root = json.load(f)
if type(root) is dict:
if 'id_prefix' in root:
self.id_prefix = root['id_prefix']
if 'markers' in root:
lla_list = root['markers']
else:
lla_list = root
f.close()
for m in lla_list:
if type(m) is dict:
#print("m is dict")
self.add_marker_dict( m )
elif type(m) is list:
#print("m is list")
ned = navpy.lla2ned(m[0], m[1], m[2],
self.ned_ref[0],
self.ned_ref[1],
self.ned_ref[2])
# print(m, ned)
ned[2] = self.surface.get_elevation(ned[1], ned[0])
if len(m) == 3:
self.add_marker( ned, "" )
else:
self.add_marker( ned, m[3] )
else:
print('No annotations file found.')
def save_kml(self):
import simplekml
import scipy
kml = simplekml.Kml()
# markers
for m in self.markers:
ned = m['ned']
lla = self.ned2lla( ned[0], ned[1], ned[2] )
name = "%s%03d" % (self.id_prefix, m['id'])
kml.newpoint(name=name,
coords=[(lla[1], lla[0], lla[2])],
description=m['comment']) # lon, lat, elevation
# area
points = []
for i in self.proj.image_list:
ned, ypr, quat = i.get_camera_pose(opt=True)
lla = self.ned2lla( ned[0], ned[1], ned[2] )
points.append([lla[1], lla[0]])
hull = scipy.spatial.ConvexHull(points)
poly = hull.points[hull.vertices].tolist()
poly.append(poly[0]) # close the loop
ls = kml.newlinestring(name=pathlib.Path(self.proj.project_dir).name,
coords=poly)
ls.style.linestyle.color = simplekml.Color.blue
kmlname = os.path.join(self.proj.analysis_dir, 'annotations.kml')
print('Saving annotations.kml:', kmlname)
kml.save(kmlname)
def save(self):
self.save_kml()
filename = os.path.join(self.proj.analysis_dir, 'annotations.json')
print('Saving annotations:', filename)
lla_list = []
for m in self.markers:
ned = m['ned']
lla = self.ned2lla( ned[0], ned[1], ned[2] )
jm = { 'lat_deg': lla[0],
'lon_deg': lla[1],
'alt_m': float("%.2f" % (lla[2])),
'comment': m['comment'],
'id': m['id'] }
lla_list.append(jm)
f = open(filename, 'w')
root = { 'id_prefix': self.id_prefix,
'markers': lla_list }
json.dump(root, f, indent=4)
f.close()
# write out simple csv version
filename = os.path.join(self.proj.analysis_dir, 'annotations.csv')
with open(filename, 'w') as f:
fieldnames = ['id', 'lat_deg', 'lon_deg', 'alt_m', 'comment']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for jm in lla_list:
tmp = dict(jm) # copy
tmp['id'] = "%s%03d" % (self.id_prefix, jm['id'])
writer.writerow(tmp)
def edit(self, id, ned, comment="", exists=False):
lla = self.ned2lla(ned[0], ned[1], ned[2])
new = Toplevel(self.tk_root)
self.edit_result = "cancel"
e = None
def on_ok():
new.quit()
new.withdraw()
print('comment:', e.get())
self.edit_result = "ok"
def on_del():
new.quit()
new.withdraw()
print('comment:', e.get())
self.edit_result = "delete"
def on_cancel():
print("on cancel")
new.quit()
new.withdraw()
new.protocol("WM_DELETE_WINDOW", on_cancel)
if not exists:
new.title("New marker")
f = Frame(new)
f.pack(side=TOP, fill=X)
w = Label(f, text="ID: ")
w.pack(side=LEFT)
ep = Entry(f)
ep.insert(0, self.id_prefix)
ep.pack(side=LEFT)
w = Label(f, text=" %03d" % self.next_id)
w.pack(side=LEFT)
else:
new.title("Edit marker")
f = Frame(new)
f.pack(side=TOP, fill=X)
w = Label(f, text="ID: ")
w.pack(side=LEFT)
ep = Entry(f)
ep.insert(0, self.id_prefix)
ep.pack(side=LEFT)
w = Label(f, text=" %03d" % id)
w.pack(side=LEFT)
f = Frame(new)
f.pack(side=TOP, fill=X)
w = Label(f, text="Lat: %.8f" % lla | [0])
w.pack(side=LEFT)
f = Frame(new)
| f.pack(side=TOP, f |
chen0031/rekall | rekall-core/rekall/plugins/overlays/windows/windows.py | Python | gpl-2.0 | 5,249 | 0.000953 | # Rekall Memory Forensics
# Copyright (C) 2012 Michael Cohen <scudette@users.sourceforge.net>
# Copyright (c) 2008 Volatile Systems
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# pylint: disable=protected-access
from rekall import obj
from rekall.plugins.overlays.windows import common
from rekall.plugins.overlays.windows import heap
from rekall.plugins.overlays.windows import pe_vtypes
from rekall.plugins.overlays.windows import xp
from rekall.plugins.overlays.windows import vista
from rekall.plugins.overlays.windows import win7
from rekall.plugins.overlays.windows import win8
from rekall.plugins.overlays.windows import crashdump
from rekall.plugins.overlays.windows import undocumented
class Ntoskrnl(pe_vtypes.BasicPEProfile):
"""A profile for Windows."""
@classmethod
def GuessVersion(cls, profile):
"""Guess the windows version of a profile."""
# If the version is provided, then just use it.
try:
major, minor = profile.metadatas("major", "minor")
version = major + minor / 10.0
profile.set_metadata("version", version)
return version
except TypeError:
pass
# Rekall is moving away from having features keyed by version, rather we
# use the profile to dictate the algorithms to use. In future we will
# remove all requirement to know the windows version, | but for now we
# just guess the version based on structures which are known to exist in
# | the profile.
version = 5.2
# Windows XP did not use a BalancedRoot for VADs.
if profile.get_obj_offset("_MM_AVL_TABLE", "BalancedRoot") == None:
version = 5.1
# Windows 7 introduces TypeIndex into the object header.
if profile.get_obj_offset("_OBJECT_HEADER", "TypeIndex") != None:
# Windows 10 introduces a cookie for object types.
if profile.get_constant("ObHeaderCookie"):
version = 10.0
elif profile._EPROCESS().m(
"VadRoot.BalancedRoot").obj_type == "_MMADDRESS_NODE":
version = 6.1
elif profile._EPROCESS().m("VadRoot").obj_type == "_MM_AVL_TABLE":
# Windows 8 uses _MM_AVL_NODE as the VAD traversor struct.
version = 6.2
elif profile._EPROCESS().m("VadRoot").obj_type == "_RTL_AVL_TREE":
# Windows 8.1 and on uses _RTL_AVL_TREE
version = 6.3
else:
raise RuntimeError("Unknown windows version")
profile.set_metadata("version", version)
major, minor = divmod(version, 1)
profile.set_metadata("minor", int(minor * 10))
profile.set_metadata("major", major)
return version
@classmethod
def Initialize(cls, profile):
super(Ntoskrnl, cls).Initialize(profile)
# Add undocumented types.
profile.add_enums(**undocumented.ENUMS)
if profile.metadata("arch") == "AMD64":
profile.add_overlay(undocumented.AMD64)
elif profile.metadata("arch") == "I386":
profile.add_overlay(undocumented.I386)
# Detect if this is a PAE system. PAE systems have 64 bit PTEs:
if profile.get_obj_size("_MMPTE") == 8:
profile.set_metadata("pae", True)
# Install the base windows support.
common.InitializeWindowsProfile(profile)
crashdump.InstallKDDebuggerProfile(profile)
heap.InitializeHeapProfile(profile)
# Get the windows version of this profile.
version = cls.GuessVersion(profile)
if 6.2 <= version <= 10:
win8.InitializeWindows8Profile(profile)
elif version == 6.1:
win7.InitializeWindows7Profile(profile)
elif version == 6.0:
vista.InitializeVistaProfile(profile)
elif 5.1 <= version <= 5.2:
xp.InitializeXPProfile(profile)
def GetImageBase(self):
if not self.image_base:
self.image_base = obj.Pointer.integer_to_address(
self.session.GetParameter("kernel_base"))
return self.image_base
class Ntkrnlmp(Ntoskrnl):
"""Alias for the windows kernel class."""
class Ntkrnlpa(Ntoskrnl):
"""Alias for the windows kernel class."""
class Ntkrpamp(Ntoskrnl):
"""Alias for the windows kernel class."""
class Nt(Ntoskrnl):
"""Alias for the windows kernel class."""
|
griffinfoster/pulsar-polarization-sims | scripts/plotLineValVsTint.py | Python | mit | 5,743 | 0.028034 | #!/usr/bin/env python
"""
"""
import os,sys
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import pylab as p
import cPickle as pkl
from scipy import interpolate
matplotlib.rc('xtick',labelsize=25)
matplotlib.rc('ytick',labelsize=25)
modeTitle=['Total Intensity','Invariant Interval','Matrix Template Matching']
fs=27 #fontsize
import numpy
def smooth(x,window_len=31,window='hanning'):
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='valid')
return y
if __name__ == "__main__":
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] [pklReduceDict.py DICT]')
o.set_description(__doc__)
o.add_option('-c','--cal',dest='calMode',default='cal',
help='cal mode to use: cal or uncal, default: cal')
o.add_option('-m','--mode',dest='mode',default='rms',
help='Data mode: rms, chi2, sigma ; default: rms')
o.add_option('-r','--rms', dest='rmsMode', default=0, type='int',
help='Set RMS mode, 0: total intesity, 1: invariant interval, 2: matrix template matching. default: 0')
o.add_option('-l','--leak',dest='leak', action='store_true',
help='Plot in terms of polarization leakage instead of IXR')
o.add_option('-s', '--savefig', dest='savefig', default=None,
help='Save figure in a format based on name extension')
o.add_option('-S','--show',dest='show', action='store_true',
help='Show the plot')
o.add_option('--dJ',dest='djval',default=5.0,type='float',
help='Polarization calibration error value to use, default: 5.0')
o.add_option('--info',dest='info',action='store_true',
help='Print parameter information in the dictionary and exit')
opts, args = o.parse_args(sys.argv[1:])
print 'Loading PKL file'
reduceDict=pkl.load(open(args[0]))
normSNR=1000. #SNR to normalize other values against
if opts.info:
snrs=[]
deltaJs=[]
ixrs=[]
for key,val in reduceDict.iteritems():
snrs.append(key[1])
deltaJs.append(key[2]*100.)
ixrs.append(10.*np.log10(1./(key[3]**2)))
snrs=np.array(snrs)
deltaJs=np.array(deltaJs)
ixrs=np.array(ixrs)
print 'SNR:', np.unique(snrs)
print 'delta J (\%):',np.unique(deltaJs)
print 'IXR (dB):', np.unique(ixrs)
exit()
dJVal=opts.djval
ixrdbs=[]
polLeakdbs=[]
deltaJs=[]
simVals=[]
snrs=[]
nobs=[]
print 'Selecting subset'
for key,val in reduceDict.iteritems():
if key[0]==opts.rmsMode and key[4].startswith(opts.calMode) and key[2]*100.==dJVal: #RMS, CAL, dJ mode selection
#data product slection
#val keys: ['rms', 'chi2', 'avgSigma', 'obsMJD', 'nobs', 'expMJD', 'sigmas']
deltaJ=key[2]*100.
polLeakdb=10.*np.log10((key[3]**2))
ixrdb=10.*np.log10(1./(key[3]**2))
snrs | .append(key[1])
if np.isnan(val['rms']) or np.isnan(val['avgSigma']) or val['rms']<0.:
print 'Warning: some selected values are NaN',key
continue #skip any simulations which returned N | aN
ixrdbs.append(ixrdb)
deltaJs.append(deltaJ)
polLeakdbs.append(polLeakdb)
if opts.mode.startswith('rms'): simVals.append(val['rms'])
elif opts.mode.startswith('chi'): simVals.append(val['chi2'])
elif opts.mode.startswith('sigma'): simVals.append(val['avgSigma'])
nobs.append(val['nobs'])
simVals=np.array(simVals)
polLeakdbs=np.array(polLeakdbs)
snrs=(np.array(snrs)/normSNR)**2.
fig=p.figure()
ax=fig.add_subplot(1,1,1)
fig.set_size_inches(9.,5.)
polLeakVals=np.unique(polLeakdbs)
cNorm=np.min(polLeakVals)
for pVal in polLeakVals:
idx=np.argwhere(polLeakdbs==pVal)
subSnrs=snrs[idx]
subSimVals=simVals[idx]
sIdx=np.argsort(subSnrs[:,0])
rgb=(np.exp(pVal/10.),0.,1.-np.exp(pVal/10.))
#slightly hardcoded plots and labels
#if pVal > -0.0001: labelStr='%0.f dB'%(-1.*pVal) #-0 to 0
if pVal > -0.1: continue
elif (pVal > -5.) or (pVal < -16. and pVal > -30) or pVal < -31.: continue #skip these lines
else: labelStr='%0.f dB'%(pVal)
midPoint=int(subSnrs.shape[0]*.33)
p.plot(subSnrs[sIdx,0],subSimVals[sIdx,0],color=rgb)
p.text(subSnrs[sIdx,0][midPoint],0.8*subSimVals[sIdx,0][midPoint],labelStr,fontsize=18)
#lines of constant time (5, 1, .5 us)
p.hlines([5,1,.5],np.min(snrs),np.max(snrs),linestyles=['dashed','dashdot','dotted'])
#slightly hardcoded plot limits
#print np.min(snrs),np.max(snrs)
#print np.min(simVals),np.max(simVals)
#p.xlim(np.min(snrs),np.max(snrs))
p.xlim(np.min(snrs),0.1)
p.ylim(0.2,30)
#p.ylim(np.min(simVals)-0.5.,100)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel(r'fraction of reference $\tau_{int}$', fontsize=23)
ax.set_ylabel('rms $(\mu s)$', fontsize=23)
if opts.savefig is None: p.show()
else: p.savefig(opts.savefig, bbox_inches='tight')
|
jolyonb/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_semantics.py | Python | agpl-3.0 | 19,838 | 0.003982 | """
Tests of modulestore semantics: How do the interfaces methods of ModuleStore relate to each other?
"""
from __future__ import absolute_import
import itertools
from collections import namedtuple
import ddt
from mock import patch
from xblock.core import XBlock, XBlockAside
from xblock.fields import Scope, String
from xblock.runtime import DictKeyValueStore, KvsFieldData
from xblock.test.tools import TestRuntime
from xmodule.course_module import CourseSummary
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.utils import SPLIT_MODULESTORE_SETUP, MongoModulestoreBuilder, PureModulestoreTestCase
DETACHED_BLOCK_TYPES = dict(XBlock.load_tagged_classes('detached'))
# These tests won't work with courses, since they're creating blocks inside courses
TESTABLE_BLOCK_TYPES = set(DIRECT_ONLY_CATEGORIES)
TESTABLE_BLOCK_TYPES.discard('course')
TestField = namedtuple('TestField', ['field_name', 'initial', 'updated'])
class AsideTest(XBlockAside):
"""
Test xblock aside class
"""
content = String(default="content", scope=Scope.content)
@ddt.ddt
class DirectOnlyCategorySemantics(PureModulestoreTestCase):
"""
Verify the behavior of Direct Only items
blocks intended to store snippets of course content.
"""
__test__ = False
DATA_FIELDS = {
'about': TestField('data', '<div>test data</div>', '<div>different test data</div>'),
'chapter': TestField('is_entrance_exam', True, False),
'sequential': TestField('is_entrance_exam', True, False),
'static_tab': TestField('data', '<div>test data</div>', '<div>different test data</div>'),
'course_info': TestField('data', '<div>test data</div>', '<div>different test data</div>'),
}
ASIDE_DATA_FIELD = TestField('content', '<div>aside test data</div>', '<div>aside different test data</div>')
def setUp(self):
super(DirectOnlyCategorySemantics, self).setUp()
self.course = CourseFactory.create(
org='test_org',
number='999',
run='test_run',
display_name='My Test Course',
modulestore=self.store
)
def assertBlockDoesntExist(self, block_usage_key, draft=None):
"""
Verify that loading ``block_usage_key`` raises an ItemNotFoundError.
Arguments:
block_usage_key: The xblock to check.
draft (optional): If omitted, verify both published and draft branches.
If True, verify only the draft branch. If False, verify only the
published branch.
"""
if draft is None or draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
with self.assertRaises(ItemNotFoundError):
self.store.get_item(block_usage_key)
if draft is None or not draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
with self.assertRaises(ItemNotFoundError):
self.store.get_item(block_usage_key)
def assertBlockHasContent(self, block_usage_key, field_name, content,
aside_field_name=None, aside_content=None, draft=None):
"""
Assert that the block ``block_usage_key`` has the value ``content`` for ``field_name``
when it is loaded.
Arguments:
block_usage_key: The xblock to check.
field_name (string): The name of the field to check.
content: The value to assert is in the field.
aside_field_name (string): The name of the field to check (in connected xblock aside)
aside_content: The value to assert is in the xblock aside field.
draft (optional): If omitted, verify both published and draft branches.
If True, verify only the draft branch. If False, verify only the
published branch.
"""
if draft is None or not draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
target_block = self.store.get_item(
block_usage_key,
)
self.assertEquals(content, target_block.fields[field_name].read_from(target_block))
if aside_field_name and aside_content:
aside = self._get_aside(target_block)
self.assertIsNotNone(aside)
self.assertEquals(aside_content, aside.fields[aside_field_name].read_from(aside))
if draft is None or draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
target_block = self.store.get_item(
block_usage_key,
)
self.assertEquals(content, target_block.fields[field_name].read_from(target_block))
if aside_field_name and aside_content:
aside = self._get_aside(target_block)
self.assertIsNotNone(aside)
self.assertEquals(aside_content, aside.fields[aside_field_name].read_from(aside))
def assertParentOf(self, parent_usage_key, child_usage_key, draft=None):
"""
Assert that the block ``parent_usage_key`` has ``child_usage_key`` listed
as one of its ``.children``.
Arguments:
parent_usage_key: The xblock to check as a parent.
child_usage_key: The xblock to check as a child.
draft (optional): If omitted, verify both published and draft branches.
If True, verify only the draft branch. If False, verify only the
published branch.
"""
if draft is None or not draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
parent_block = self.store.get_item(
parent_usage_key,
)
self.assertIn(child_usage_key, parent_block.children)
if draft is None or draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
parent_block = self.store.get_item(
parent_usage_key,
)
self.assertIn(child_usage_key, parent_block.children)
def assertNotParentOf(self, parent_usage_key, child_usage_key, draft=None):
"""
Assert that the block ``parent_usage_key`` does not have ``child_usage_key`` listed
as one of its ``.children``.
| Arguments:
parent_usage_key: The xblock to check as a parent.
child_usage_key: The xblock to check as a child.
draft (optional): If omitted, verify both published and draft branches.
If True, verify only the draft branch. If | False, verify only the
published branch.
"""
if draft is None or not draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
parent_block = self.store.get_item(
parent_usage_key,
)
self.assertNotIn(child_usage_key, parent_block.children)
if draft is None or draft:
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
parent_block = self.store.get_item(
parent_usage_key,
)
self.assertNotIn(child_usage_key, parent_block.children)
def assertCoursePointsToBlock(self, block_usage_key, draft=None):
"""
Assert that the context course for the test has ``block_usage_key`` listed
as one of its ``.children``.
Arguments:
block_usage_key: The xblock to check.
draft (optional): If omitted, verify both published and draft branches.
If True, verify only the draft branch. If False, verify only the
published branch.
"""
self.assertParentOf( |
rogerhu/django | django/db/models/aggregates.py | Python | bsd-3-clause | 2,701 | 0.00037 | """
Classes to represent the definitions of aggregate functions.
"""
from django.db.models.constants import LOOKUP_SEP
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for i in range(len(lookup_parts) + 1):
if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
return True
return False
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a ta | ble and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed | type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
|
iakovleva/allauth-login-page | account/urls.py | Python | mit | 760 | 0 | import importlib
from django.conf.urls import url, include
import allauth.socialaccount.providers as allauth_providers
from . import views as accounts_views
def get_provider_urls(provider):
try:
urls = importlib.import_module(provider.package + '.urls')
except ImportError:
return []
else:
return getattr( | urls, 'urlpatterns', [])
urlpatterns = [
url(r'^login/$', accounts_views.login, name='accounts_login'),
url(r'^logout/$', accounts_views.logout, na | me='accounts_logout'),
url(r'^settings/$', accounts_views.settings, name='accounts_settings'),
url(r'', include('allauth.socialaccount.urls')),
]
for provider in allauth_providers.registry.get_list():
urlpatterns += get_provider_urls(provider)
|
usc-isi-i2/WEDC | spark_dependencies/python_lib/nose2/plugins/failfast.py | Python | apache-2.0 | 615 | 0 | """
Stop the test run after the first error or failure.
This plugin implements :func:`testOutcome` and sets
``event.result.shouldStop`` if it sees an outcome with exc_info that
is not expected.
"""
from nose2 import events
__unittest = True
class FailFast(events.Plugin):
"""Stop the test run after error or failure"""
commandLineSwitch = (
'F', 'fail-fast', 'Stop | the test run after the first error or failure')
def testOutcome(self, event | ):
"""Stop on unexpected error or failure"""
if event.exc_info and not event.expected:
event.result.shouldStop = True
|
LuoZijun/solidity-sc2-replay-reader | bzip2.py | Python | gpl-3.0 | 17,390 | 0.009383 | #!/usr/bin/env python
# coding: utf-8
from __future__ import division
import sys, os, time, platform, math
from struct import unpack, pack
"""
生成测试数据:
rm test.txt test.txt.bz2
echo 'hello, world(世界)!!!!' > test.txt
bzip2 -z test.txt
解压数据:
python bzip2.py
参考资料:
https://en.wikipedia.org/wiki/Bzip2
注:
bzip2 并没有 RFC 文档,
所以你只有参考维基百科上面的 简洁介绍 以及 其它语言的现成代码来参考 。。。
"""
class BufferReader(object):
def __init__(self, file, endian="<"):
assert(hasattr(file, 'read'))
self.file = file
self.endian = endian
def read_u8(self, length=1):
# unsigned char
return unpack(self.endian + "%dB" % length, self.file.read(1*length))
def read_u16(self, length=1):
# unsigned short
return unpack(self.endian + "%dH" % length, self.file.read(2*length))
def read_u32(self, length=1):
# unsigned int
return unpack(self.endian + "%dI" % length, self.file.read(4*length))
def read_usize(self, length=1):
# unsigned long
if platform.architecture()[0] == '64bit':
words = 8
elif platform.architecture()[0] == '32bit':
words = 4
elif platform.architecture()[0] == '16bit':
words = 2
else:
raise ValueError('Ooops...')
return unpack(self.endian + "%dL" % length, self.file.read(words*length))
def read_u64(self, length=1):
# unsigned long long
return unpack(self.endian + "%dQ" % length, self.file.read(8*length))
def read_i8(self, length=1):
# signed char
return unpack(self.endian + "%db" % length, self.file.read(1*length))
def read_i16(self, length=1):
# short
return unpack(self.endian + "%dh" % length, self.file.read(2*length))
def read_i32(self, length=1):
# int
return unpack(self.endian + "%di" % length, self.file.read(4*length))
def read_isize(self, length=1):
# long
if platform.architecture()[0] == '64bit':
words = 8
elif platform.architecture()[0] == '32bit':
words = 4
elif platform.architecture()[0] == '16bit':
words = 2
else:
raise ValueError('Ooops...')
return unpack(self.endian + "%dl" % length, self.file.read(words*length))
def read_i64(self, length=1):
# long long
return unpack(self.endian + "%dq" % length, self.file.read(8*length))
def read_f32(self, length=1):
# float
return unpack(self.endian + "%df" % length, self.file.read(4*length))
def read_f64(self, length=1):
# double
return unpack(self.endian + "%dd" % length, self.file.read(8*length))
def read_bit(self, length=8):
assert(length%8 == 0)
base = 2
_bytes = self.read_byte(length=length//8)
bits = []
for n in _bytes:
_bits = []
while n != 0:
m = n % base
n = n // base
_bits.append(m)
for n in range(8-len(_bits)):
_bits.append(0)
if self.endian == '>' or self.endian == '!':
_bits.reverse()
bits.extend(_bits)
if self.endian == '<':
bits.reverse()
# while bits[0] == 0:
# bits = bits[1:]
return tuple(bits)
def read_byte(self, length=1):
return self.read_u8(length=length)
def read_string(self, length):
return str(self.file.read(length))
def seek(self, pos):
return self.file.seek(pos)
class HuffmanLength:
def __init__(self, code, bits = 0):
self.code = code
self.bits = bits
self.symbol = None
def __repr__(self):
return `(self.code, self.bits, self.symbol, self.reverse_symbol)`
def __cmp__(self, other):
if self.bits == other.bits:
return cmp(self.code, other.code)
else:
return cmp(self.bits, other.bits)
def reverse_bits(v, n):
a = 1 << 0
b = 1 << (n - 1)
z = 0
for i in range(n-1, -1, -2):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 1
b >>= 1
return z
def reverse_bytes(v, n):
a = 0xff << 0
b = 0xff << (n - 8)
z = 0
for i in range(n-8, -8, -16):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 8
b >>= 8
return z
class HuffmanTable:
def __init__(self, bootstrap):
l = []
start, bits = bootstrap[0]
for finish, endbits in bootstrap[1:]:
if bits:
for code in range(start, finish):
l.append(HuffmanLength(code, bits))
start, bits = finish, endbits
if endbits == -1:
break
l.sort()
self.table = l
def populate_huffman_symbols(self):
bits, symbol = -1, -1
for x in self.table:
symbol += 1
if x.bits != bits:
symbol <<= (x.bits - bits)
bits = x.bits
x.symbol = symbol
x.reverse_symbol = reverse_bits(symbol, bits)
#print printbits(x.symbol, bits), printbits(x.reverse_symbol, bits)
def ta | bles_by_bits(self):
d = {}
for x in self.table:
try:
d[x.bits].append(x)
except:
d[x.bits] = [x]
pass
def min_max_bits(self):
self.min_bits, self.max_bits = 16, -1
fo | r x in self.table:
if x.bits < self.min_bits: self.min_bits = x.bits
if x.bits > self.max_bits: self.max_bits = x.bits
def _find_symbol(self, bits, symbol, table):
for h in table:
if h.bits == bits and h.reverse_symbol == symbol:
#print "found, processing", h.code
return h.code
return -1
def find_next_symbol(self, field, reversed = True):
cached_length = -1
cached = None
for x in self.table:
if cached_length != x.bits:
cached = field.snoopbits(x.bits)
cached_length = x.bits
if (reversed and x.reverse_symbol == cached) or (not reversed and x.symbol == cached):
field.readbits(x.bits)
print "found symbol", hex(cached), "of len", cached_length, "mapping to", hex(x.code)
return x.code
raise "unfound symbol, even after end of table @ " + `field.tell()`
for bits in range(self.min_bits, self.max_bits + 1):
#print printbits(field.snoopbits(bits),bits)
r = self._find_symbol(bits, field.snoopbits(bits), self.table)
if 0 <= r:
field.readbits(bits)
return r
elif bits == self.max_bits:
raise "unfound symbol, even after max_bits"
class OrderedHuffmanTable(HuffmanTable):
def __init__(self, lengths):
l = len(lengths)
z = map(None, range(l), lengths) + [(l, -1)]
print "lengths to spans:", z
HuffmanTable.__init__(self, z)
def code_length_orders(i):
return (16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15)[i]
def distance_base(i):
return (1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577)[i]
def length_base(i):
return (3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258)[i-257]
def extra_distance_bits(n):
if 0 <= n <= 1:
return 0
elif 2 <= n <= 29:
return (n >> 1) - 1
else:
raise "illegal distance code"
def extra_length_bits(n):
if 257 <= n <= 260 or n == 285:
return 0
elif 261 <= n <= 284:
return ((n-257) >> 2) - 1
else:
raise "illegal length code"
def move_to_front(l, c):
l[:] = l[c:c+1] + l[0:c] + l[c+1:]
def bwt_transform(L):
# Semi-inefficient way to get the character counts
F = ''.join(sorted(L))
base = map(F.find,map(chr,range(256)))
pointers = [-1] * len(L)
for symbol, i in map(None, map(ord,L), xrange(len(L))):
pointers[base[symbol]] = i
base[symbol |
cg31/tensorflow | tensorflow/tensorboard/scripts/generate_testdata.py | Python | apache-2.0 | 7,184 | 0.012945 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate some standard test data for debugging TensorBoard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import math
import os
import os.path
import random
import shutil
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
tf.flags.DEFINE_string("target", None, """The directoy where serialized data
will be written""")
tf.flags.DEFINE_boolean("overwrite", False, """Whether to remove and overwrite
TARGET if it already exists.""")
FLAGS = tf.flags.FLAGS
# Hardcode a start time and reseed so script always generates the same data.
_start_time = 0
random.seed(0)
def _MakeHistogramBuckets():
v = 1E-12
buckets = []
neg_buckets = []
while v < 1E20:
buckets.append(v)
neg_buckets.append(-v)
v *= 1.1
# Should include DBL_MAX, but won't bother for test data.
return neg_buckets[::-1] + [0] + buckets
def _MakeHistogram(values):
"""Convert values into a histogram proto using logic from histogram.cc."""
limits = _MakeHistogramBuckets()
counts = [0] * len(limits)
for v in values:
idx = bisect.bisect_left(limits, v)
counts[idx] += 1
limit_counts = [(limits[i], counts[i]) for i in xrange(len(limits))
if counts[i]]
bucket_limit = [lc[0] for lc in limit_counts]
bucket = [lc[1] for lc in limit_counts]
sum_sq = sum(v * v for v in values)
return tf.HistogramProto(min=min(values),
max=max(values),
num=len(values),
sum=sum(values),
sum_squares=sum_sq,
bucket_limit=bucket_limit,
bucket=bucket)
def WriteScalarSeries(writer, tag, f, n=5):
"""Write a series of scalar events to writer, using f to create values."""
step = 0
wall_time = _start_time
for i in xrange(n):
v = f(i)
value = tf.Summary.Value(tag=tag, simple_value=v)
summary = tf.Summary(value=[value])
event = tf.Event(wall_time=wall_t | ime, s | tep=step, summary=summary)
writer.add_event(event)
step += 1
wall_time += 10
def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20):
"""Write a sequence of normally distributed histograms to writer."""
step = 0
wall_time = _start_time
for [mean, stddev] in mu_sigma_tuples:
data = [random.normalvariate(mean, stddev) for _ in xrange(n)]
histo = _MakeHistogram(data)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)])
event = tf.Event(wall_time=wall_time, step=step, summary=summary)
writer.add_event(event)
step += 10
wall_time += 100
def WriteImageSeries(writer, tag, n_images=1):
"""Write a few dummy images to writer."""
step = 0
session = tf.Session()
p = tf.placeholder("uint8", (1, 4, 4, 3))
s = tf.image_summary(tag, p)
for _ in xrange(n_images):
im = np.random.random_integers(0, 255, (1, 4, 4, 3))
summ = session.run(s, feed_dict={p: im})
writer.add_summary(summ, step)
step += 20
session.close()
def WriteAudioSeries(writer, tag, n_audio=1):
"""Write a few dummy audio clips to writer."""
step = 0
session = tf.Session()
min_frequency_hz = 440
max_frequency_hz = 880
sample_rate = 4000
duration_frames = sample_rate * 0.5 # 0.5 seconds.
frequencies_per_run = 1
num_channels = 2
p = tf.placeholder("float32", (frequencies_per_run, duration_frames,
num_channels))
s = tf.audio_summary(tag, p, sample_rate)
for _ in xrange(n_audio):
# Generate a different frequency for each channel to show stereo works.
frequencies = np.random.random_integers(
min_frequency_hz, max_frequency_hz,
size=(frequencies_per_run, num_channels))
tiled_frequencies = np.tile(frequencies, (1, duration_frames))
tiled_increments = np.tile(
np.arange(0, duration_frames), (num_channels, 1)).T.reshape(
1, duration_frames * num_channels)
tones = np.sin(2.0 * np.pi * tiled_frequencies * tiled_increments /
sample_rate)
tones = tones.reshape(frequencies_per_run, duration_frames, num_channels)
summ = session.run(s, feed_dict={p: tones})
writer.add_summary(summ, step)
step += 20
session.close()
def GenerateTestData(path):
"""Generates the test data directory."""
run1_path = os.path.join(path, "run1")
os.makedirs(run1_path)
writer1 = tf.train.SummaryWriter(run1_path)
WriteScalarSeries(writer1, "foo/square", lambda x: x * x)
WriteScalarSeries(writer1, "bar/square", lambda x: x * x)
WriteScalarSeries(writer1, "foo/sin", math.sin)
WriteScalarSeries(writer1, "foo/cos", math.cos)
WriteHistogramSeries(writer1, "histo1", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1],
[1, 1]])
WriteImageSeries(writer1, "im1")
WriteImageSeries(writer1, "im2")
WriteAudioSeries(writer1, "au1")
run2_path = os.path.join(path, "run2")
os.makedirs(run2_path)
writer2 = tf.train.SummaryWriter(run2_path)
WriteScalarSeries(writer2, "foo/square", lambda x: x * x * 2)
WriteScalarSeries(writer2, "bar/square", lambda x: x * x * 3)
WriteScalarSeries(writer2, "foo/cos", lambda x: math.cos(x) * 2)
WriteHistogramSeries(writer2, "histo1", [[0, 2], [0.3, 2], [0.5, 2], [0.7, 2],
[1, 2]])
WriteHistogramSeries(writer2, "histo2", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1],
[1, 1]])
WriteImageSeries(writer2, "im1")
WriteAudioSeries(writer2, "au2")
graph_def = tf.GraphDef()
node1 = graph_def.node.add()
node1.name = "a"
node1.op = "matmul"
node2 = graph_def.node.add()
node2.name = "b"
node2.op = "matmul"
node2.input.extend(["a:0"])
writer1.add_graph(graph_def)
node3 = graph_def.node.add()
node3.name = "c"
node3.op = "matmul"
node3.input.extend(["a:0", "b:0"])
writer2.add_graph(graph_def)
writer1.close()
writer2.close()
def main(unused_argv=None):
target = FLAGS.target
if not target:
print("The --target flag is required.")
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
print("Refusing to overwrite target %s without --overwrite" % target)
return -2
GenerateTestData(target)
if __name__ == "__main__":
tf.app.run()
|
timthelion/FreeCAD | src/Mod/Path/PathScripts/PathFromShape.py | Python | lgpl-2.1 | 4,466 | 0.015674 | # -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Dan Falck <ddfalck@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
''' Used to make GCode from FreeCAD shapes - Wires and Edges/Curves '''
import FreeCAD,FreeCADGui,Path,PathGui
from PathScripts import PathProject
from PySide import QtCore,QtGui
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
#TODO make the shape parametric
class FromShape:
def __init__(self,obj):
obj.addProperty("App::PropertyLink","Base","Shape","The base Shape of this toolpath")
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def execute(self,obj):
pass
class _ViewProviderFromShape:
def __init__(self,vobj): #mandatory
vobj.Proxy = self
def attach(self, vobj):
self.Object = vobj.Object
def __getstate__(self): #mandatory
return None
def __setstate__(self,state): #mandatory
return None
def getIcon(self): #optional
return ":/icons/Path-Shape.svg"
class CommandFromShape:
def GetResources(self):
return {'Pixmap' : 'Path-Shape',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_FromShape","Path from a Shape"),
'Accel': "P, S",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_FromShape","Creates a Path from a wire/curve")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelection()
if len(selection) != 1:
FreeCAD.Console.PrintError(translate("Path_FromShape","Please select exactly one Part-based object\n"))
return
if not(selection[0].isDerivedFrom("Part::Feature")):
FreeCAD.Console.PrintError(translate("Path_FromShape","Please select exactly one Part-based object\n"))
return
FreeCAD.ActiveDocument.openTransaction(translate("Path_FromShape","Create path from shape"))
FreeCADGui.addModule("PathScripts.PathUtils")
FreeCADGui.doCommand("obj = FreeCAD.activeDocument().addObject('Path::FeatureShape','PathShape')")
FreeCADGui.doC | ommand("obj.Shape = FreeCAD.activeDocument()."+selection[0].Na | me+".Shape")
FreeCADGui.doCommand('PathScripts.PathUtils.addToProject(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_FromShape',CommandFromShape())
|
zang-cloud/zang-python | docs/examples/sip_domains_example.py | Python | mit | 2,859 | 0 | from zang.exceptions.zang_exception import ZangException
from zang.configuration.configuration import Configuration
from zang.connectors.connector_factory import ConnectorFactory
from zang.domain.enums.http_method import HttpMethod
from docs.examples.credetnials import sid, authToken
url = 'https://api.zang.io/v2'
configuration = Configuration(sid, authToken, url=url)
sipDomainsConnector = ConnectorFactory(configuration).sipDomainsConnector
# view domain
try:
domain = sipDomainsConnector.viewDomain('TestDomainSid')
print(domain)
except ZangException as ze:
print(ze)
# list domains
try:
domains = sipDomainsConnector.listDomains()
print(domains.total)
except ZangException as ze:
print(ze)
# create domain
try:
domain = sipDomainsConnector.createDomain(
domainName='mydomain.com',
friendlyName='MyDomain',
voiceUrl='VoiceUrl',
voiceMethod=HttpMethod.POST,
voiceFallbackUrl='VoiceFallbackUrl',
voiceFallbackMethod=HttpMethod.GET)
print(domain.sid)
except ZangException as ze:
print(ze)
# update domain
try:
domain = sipDomainsConnector.updateDomain(
'TestDomainSid',
friendlyName='MyDomain3',
voiceUrl='VoiceUrl2',
voiceMethod=HttpMethod.POST,)
print(domain.voiceUrl)
except ZangException as ze:
print(ze)
# delete domain
try:
domain = sipDomainsConnector.deleteDomain('TestDomainSid')
print(domain.sid)
| except ZangException as ze:
print(ze)
# list mapped credentials lists
try:
credentialsLists = sipDomainsConnector.listMappedCredentialsLists(
'TestDomainSid')
print(credentialsLists.t | otal)
except ZangException as ze:
print(ze)
# map credentials list
try:
credentialsList = sipDomainsConnector.mapCredentialsLists(
'TestDomainSid', 'TestCredentialsListSid')
print(credentialsList.credentialsCount)
except ZangException as ze:
print(ze)
# delete mapped credentials list
try:
credentialsList = sipDomainsConnector.deleteMappedCredentialsList(
'TestDomainSid', 'TestCredentialsListSid')
print(credentialsList.friendlyName)
except ZangException as ze:
print(ze)
# list mapped ip access control lists
try:
aclLists = sipDomainsConnector.listMappedIpAccessControlLists(
'TestDomainSid')
print(aclLists.total)
except ZangException as ze:
print(ze)
# map ip access control list
try:
aclList = sipDomainsConnector.mapIpAccessControlList(
'TestDomainSid', 'TestIpAccessControlListSid')
print(aclList.credentialsCount)
except ZangException as ze:
print(ze)
# delete mapped ip access control list
try:
aclList = sipDomainsConnector.deleteMappedIpAccessControlList(
'TestDomainSid', 'TestIpAccessControlListSid')
print(aclList.friendlyName)
except ZangException as ze:
print(ze)
|
drvinceknight/gt | test_main.py | Python | mit | 1,347 | 0.005197 | """
Tests for main.py
"""
import pathlib
import main
def test_get_id():
path = pathlib.Path("./nbs/chapters/00-Introduction-to-the-course.ipynb")
assert main.get_id(path) == "00"
def test_get_id_with_no_id():
path = pathlib.Path("./nbs/other/Assessment.ipynb")
assert main.get_id(path) == "assessment"
def test_get_name():
path = pathlib.Path("./nbs/chapters/00-Introduction-to-the-course.ipynb")
assert main.get_name(path) == "Introduction to the course"
def test_get_with_no_id():
path = pathlib.Path("./nbs/other/Assessment.ipynb")
assert main.get_name(path) == "Assessm | ent"
def test_convert_html():
path = pathlib.Path("./nbs/other/Assessment.ipynb")
html_output = main.convert_html(path)
assert len(html_output) == 2
assert type(html_output) is tuple
assert type(html_output[0]) is str
def test_render_template():
path = pathlib.Path("./nbs/other/Assessment.ipynb")
path_id = main.get_id(path)
nb, _ = main.convert_html(path)
nb = nb.replace("{{root}}", main. | ROOT)
html = main.render_template("content.html", {"nb": nb,
"root": main.ROOT,
"id": path_id,})
assert type(html) is str
assert main.ROOT in html
assert path_id in html
assert nb in html
|
njpatel/avant-window-navigator | plugins/Rhythmbox/artdisplay-awn/AmazonCoverArtSearch.py | Python | gpl-2.0 | 7,998 | 0.038139 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2006 - Gareth Murphy, Martin Szulecki
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
from xml.dom import minidom
import re
import locale
import urllib
import rhythmdb
from Loader import Loader
LICENSE_KEY = "18C3VZN9HCECM5G3HQG2"
DEFAULT_LOCALE = "en_US"
ASSOCIATE = "webservices-20"
class Bag: pass
class AmazonCoverArtSearch (object):
def __init__ (self, loader):
self.searching = False
self.cancel = False
self.loader = loader
self._supportedLocales = {
"en_US" : ("us", "xml.amazon.com"),
"en_GB" : ("uk", "xml-eu.amazon.com"),
"de" : ("de", "xml-eu.amazon.com"),
"ja" : ("jp", "xml.amazon.co.jp")
}
self.db = None
self.entry = None
def __get_locale (self):
default = locale.getdefaultlocale ()
lc_id = DEFAULT_LOCALE
if default[0] is not None:
if self._supportedLocales.has_key (default[0]):
lc_id = default[0]
lc_host = self._supportedLocales[lc_id][1]
lc_name = self._supportedLocales[lc_id][0]
return ((lc_host, lc_name))
def search (self, db, entry, on_search_completed_callback, *args):
self.searching = True
self.cancel = False
self.db = db
self.entry = entry
self.on_search_completed_callback = on_search_completed_callback
self.args = args
self.keywords = []
st_artist = db.entry_get (entry, rhythmdb.PROP_ARTIST)
st_album = db.entry_get (entry, rhythmdb.PROP_ALBUM)
# Tidy up
# Replace quote characters
# don't replace single quote: could be important punctuation
for char in ["\""]:
st_artist = st_artist.replace (char, '')
st_album = st_album.replace (char, '')
self.st_album = st_album
self.st_artist = st_artist
# Remove variants of Disc/CD [1-9] from album title before search
for exp in ["\([Dd]isc *[1-9]+\)", "\([Cc][Dd] *[1-9]+\)"]:
p = re.compile (exp)
st_album = p.sub ('', st_album)
st_album_no_vol = st_album
for exp in ["\(*[Vv]ol.*[1-9]+\)*"]:
p = re.compile (exp)
st_album_no_vol = p.sub ('', st_album_no_vol)
self.st_album_no_vol = st_album_no_vol
# Save current search's entry properties
self.search_album = st_album
self.search_artist = st_artist
self.search_album_no_vol = st_album_no_vol
# TODO: Improve to decrease wrong cover downloads, maybe add severity?
# Assemble list of search keywords (and thus search queries)
if st_album == "Unknown":
self.keywords.append ("%s Best of" % (st_artist))
self.keywords.append ("%s Greatest Hits" % (st_artist))
self.keywords.append ("%s Essential" % (st_artist))
self.keywords.append ("%s Collection" % (st_artist))
self.keywords.append ("%s" % (st_artist))
elif st_artist == "Unknown":
self.keywords.append ("%s" % (st_album))
if st_album_no_vol != st_artist:
self.keywords.append ("%s" % (st_album_no_vol))
self.keywords.append ("Various %s" % (st_album))
else:
if st_album != st_artist:
self.keywords.append ("%s %s" % (st_artist, st_album))
if st_album_no_vol != st_album:
self.keywords.append ("%s %s" % (st_artist, st_album_no_vol))
if (st_album != "Unknown"):
self.keywords.append ("Various %s" % (st_album))
self.keywords.append ("%s" % (st_artist))
# Initiate asynchronous search
self.search_next ();
def __build_url (self, keyword):
(lc_host, lc_name) = self.__get_locale ()
url = "http://" + lc_host + "/onca/xml3?f=xml"
url += "&t=%s" % ASSOCIATE
url += "&dev-t=%s" % LICENSE_KEY
url += "&type=%s" % 'lite'
url += "&locale=%s" % lc_name
url += "&mode=%s" % 'music'
url += "&%s=%s" % ('KeywordSearch', urllib.quote (keyword))
return url
def search_next (self):
self.searching = True
if len (self.keywords)==0:
keyword = None
else:
keyword = self.keywords.pop (0)
if keyword is None:
# No keywords left to search -> no results
self.on_search_completed (None)
ret = False
else:
# Retrieve search for keyword
url = self.__build_url (keyword.strip ())
self.loader.get_url (url, self.on_search_response)
ret = True
return ret
def __unmarshal (self, element):
rc = Bag ()
if isinstance (element, minidom.Element) and (element.tagName == 'Details'):
rc.URL = element.attributes["url"].value
childElements = [e for e in element.childNodes if isinstance (e, minidom.Element)]
if childElements:
for child in childElements:
key = child.tagName
if hasattr (rc, key):
if type (getattr (rc, key)) <> type ([]):
setattr (rc, key, [getattr (rc, key)])
setattr (rc, key, getattr (rc, key) + [self.__unmarshal (child)])
elif isinstance(child, minidom.Element) and (child.tagName == 'Details'):
setattr (rc,key,[self.__unmarshal(child)])
else:
setattr (rc, key, self.__unmarshal(child))
else:
rc = "".join ([e.data for e in element.childNodes if isinstance (e, minidom.Text)])
if element.tagName == 'SalesRank':
rc = rc.replace ('.', '')
rc = rc.replace (',', '')
rc = int (rc)
return rc
def on_search_response (self, result_data):
if result_data is None:
self.search_next()
return
try:
xmldoc = minidom.parseString (result_data)
except:
self.search_next()
return
data = self.__unmarshal (xmldoc).ProductInfo
if hasattr(data, 'ErrorMsg'):
# Search was unsuccessful, try next keyword
self.search_next ()
else:
# We got some search results
self.on_search_results (data.Details)
def on_search_results (self, results):
self.on_search_completed (results)
def on_search_completed (self, result):
self.on_search_completed_callback (self, self.entry, result, *self.args)
self.searching = False
def __tidy_up_string (self, s):
# Lowercase
s = s.lower ()
# Strip
s = s.strip ()
# TODO: Convert accented to unaccented (fixes matching Salomé vs Salome)
s = s.replace (" - ", " ")
s = s.replace (": ", " ")
s = s.replace (" & ", " and ")
return s
def get_best_match (self, search_results):
# Default to "no match", our results must match our criteria
best_match = None
try:
if self.search_album != "Unknown":
album_check = self.__tidy_up_string (self.search_album)
for item in search_results:
# Check for album name in ProductName
product_name = self.__tidy_up_string (item.ProductName)
if product_name == album_check:
# Found exact album, can not get better than that
best_match = item
break
# If we already found a best_match, just keep checking for exact one
elif (best_match is None) and (product_name.find (album_check) != -1):
best_match = item
# If we still have no definite hit, use first result where artist matches
if (self.search_album == "Unknown" and self.search_artist != "Unknown"):
artist_check = self.__tidy_up_string (self.search_artist)
if | best_match is None:
# Check if artist appears in the Artists list
hit = False
for item in search_results:
if type (item.Artists.Artist) <> type ([]):
artists = [item.Artists.Artist]
else:
artists = item.Artists.Artist
for artist in artists:
artist = self.__tidy_up_string (artist)
if artist.find (artist_check) != | -1:
best_match = item
hit = True
break
if hit:
break
return best_match
except TypeError:
return None
|
SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydev_ipython/inputhooktk.py | Python | bsd-3-clause | 771 | 0.007782 | # encoding: utf-8
# Unlike what IPython does, we need to have an explicit inputhook because tkinter handles
# input hook in the C Source code
#-----------------------------------------------------------------------------
# Imports
#------------------------------------ | -----------------------------------------
from pydev_ipython.inputhook import stdin_ready
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
TCL_DONT_WAIT = 1 << 1
def create_inputhook_tk(app):
def inputhook_tk():
while app.dooneevent(TCL_DONT_WAIT) == 1:
if stdin_ready():
break
return 0
re | turn inputhook_tk
|
CERNDocumentServer/cds-videos | cds/version.py | Python | gpl-2.0 | 1,027 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015, 2018 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundatio | n, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immu | nities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CDS version."""
__version__ = "1.0.29"
|
tovmeod/anaf | anaf/infrastructure/tests.py | Python | bsd-3-clause | 22,223 | 0.00171 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from anaf.core.models import Group, Perspective, ModuleSetting
from models import Item, ItemValue, ItemField, ItemType, ItemStatus, ItemServicing
class InfrastructureModelsTest(TestCase):
"""Infrastructure models tests"""
def test_model_item_field(self):
"""Test item field model"""
obj = ItemField(name='test', label='test', field_type='text')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_type(self):
"Test item type model"
obj = ItemType(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_status(self):
"Test item status model"
obj = ItemStatus(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item(self):
"Test item model"
type = ItemType(name='test')
type.save()
status = ItemStatus(name='test')
status.save()
obj = Item(name='test', item_type=type, status=status)
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_value(self):
"Test item value model"
status = ItemStatus(name='test')
status.save()
type = ItemType(name='test')
type.save()
item = Item(name='test', item_type=type, status=status)
item.save()
field = ItemField(name='test', label='test', field_type='text')
field.save()
obj = ItemValue(value='test', field=field, item=item)
obj.save()
self.assertEquals('test', obj.value)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_servicing(self):
"Test item servicing model"
obj = ItemServicing(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
class InfrastructureViewsTest(TestCase):
username = "test"
password = "password"
def setUp(self):
self.group, created = Group.objects.get_or_create(name='test')
self.user, created = DjangoUser.objects.get_or_create(username=self.username, is_staff=True)
self.user.set_password(self.password)
self.user.save()
perspective, created = Perspective.objects.get_or_create(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.type = ItemType(name='test')
self.type.set_default_user()
self.type.save()
self.status = ItemStatus(name='test')
self.status.set_default_user()
self.status.save()
self.field = ItemField(
name='test', label='test', field_type='text')
self.field.set_default_user()
self.field.save()
self.item = Item(
name='test', item_type=self.type, status=self.status)
self.item.set_default_user()
self.item.save()
self.value = ItemValue(field=self.field, item=self.item)
self.value.save()
self.servicing = ItemServicing(name='test')
self.servicing.set_default_user()
self.servicing.save()
######################################
# Testing views when user is logged in
######################################
def test_index_login(self):
"Test index page with login at /infrastructu | re/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure'))
self.assertEquals(response.status | _code, 200)
def test_index_infrastructure_login(self):
"Test index page with login at /infrastructure/index/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_index'))
self.assertEquals(response.status_code, 200)
def test_infrastructure_index_owned(self):
"Test index page with login at /infrastructure/owned/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_index_owned'))
self.assertEquals(response.status_code, 200)
# Type
def test_infrastructure_type_add(self):
"Test index page with login at /infrastructure/type/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_type_add'))
self.assertEquals(response.status_code, 200)
def test_infrastructure_type_view(self):
"Test index page with login at /infrastructure/type/view/<type_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_type_view', args=[self.type.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_type_edit(self):
"Test index page with login at /infrastructure/type/edit/<type_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_type_edit', args=[self.type.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_type_delete(self):
"Test index page with login at /infrastructure/type/delete/<type_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_type_delete', args=[self.type.id]))
self.assertEquals(response.status_code, 200)
# Field
def test_infrastructure_field_add(self):
"Test index page with login at /infrastructure/field/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_field_add'))
self.assertEquals(response.status_code, 200)
def test_infrastructure_field_view(self):
"Test index page with login at /infrastructure/field/view/<field_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_field_view', args=[self.field.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_field_edit(self):
"Test index page with login at /infrastructure/field/edit/<field_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_field_edit', args=[self.field.id]))
|
yeyanchao/calibre | src/calibre/gui2/preferences/saving.py | Python | gpl-3.0 | 1,820 | 0.006593 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.preferences import ConfigWidgetBase, test_widget, \
AbortCommit
from calibre.gui2.preferences.saving_ui import Ui_Form
from calibre.utils.config import ConfigProxy
from calibre.library.save_to_disk import config
from calibre.gui2 import gprefs
class ConfigWidget(ConfigWidgetBase, Ui_Form):
def genesis(self, gui):
self.gui = gui
self.proxy = ConfigProxy(config())
r = self.register
for x in ('asciiize', 'update_metadata', 'save_cover', 'write_opf',
'replace_whitespace', 'to_lowercase', 'formats', 'timefmt'):
r(x, self.proxy)
r('show_files_after_save', gprefs)
self.save_template.changed_signal.connect(self.changed | _signal.emit)
def initialize(self):
ConfigWidgetBase.initialize(self)
self.save_template.blockSignals(True)
self.save_template.initialize('save_to_disk', self.proxy['template'],
self.proxy.help('template'))
self.save_template.blockSignals(False)
def restore_defaults(self):
ConfigWidgetBase.restore_defaults(self)
self.save_templat | e.set_value(self.proxy.defaults['template'])
def commit(self):
if not self.save_template.validate():
raise AbortCommit('abort')
self.save_template.save_settings(self.proxy, 'template')
return ConfigWidgetBase.commit(self)
def refresh_gui(self, gui):
gui.iactions['Save To Disk'].reread_prefs()
if __name__ == '__main__':
from PyQt4.Qt import QApplication
app = QApplication([])
test_widget('Import/Export', 'Saving')
|
google/nitroml | nitroml/automl/metalearning/metalearner/component_test.py | Python | apache-2.0 | 2,460 | 0.002439 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""Tests for nitroml.automl.metalearning.metalearner.component."""
from absl.testing import absltest
from nitroml.automl.metalearning import artifacts
from nitroml.automl.metalearning.metalearner.component import MetaLearner
from tfx.types import channel_utils
from tfx.types import standard_artifacts
class ComponentTest(absltest.TestCase):
def setUp(self):
super(ComponentTest, self).setUp()
num_train = 5
self.meta_train_data = {}
for ix in range(num_train):
self.meta_train_data[f'hparams_train_{ix}'] = channel_utils.as_channel(
[standard_artifacts.HyperParameters()])
self.meta_train_data[
f'meta_train_features_{ix}'] = channel_utils.as_channel(
[artifacts.MetaFeatures()])
self.custom_config = {'some': 'thing', 'some other': 1, 'thing': 2}
def testConstructWithMajorityVoting(self):
metalearner = MetaLearner(
algorithm='majority_voting',
custom_config=self.custom_config,
**self.meta_train_data)
self.assertEqual(artifacts.KCandidateHyperParameters.TYPE_NAME,
metalearner.outputs['output_hyperparameters'].type_name)
self.assertEqual(standard_artifacts.Model.TYPE_NAME,
metalearner.outputs['metamodel'].type_name)
def testConstructWithNearestNeighbor(self):
metalearner = MetaLearner(
algorithm='nearest_neighbor',
custom_config=self.custom_config,
**self.meta | _tra | in_data)
self.assertEqual(artifacts.KCandidateHyperParameters.TYPE_NAME,
metalearner.outputs['output_hyperparameters'].type_name)
self.assertEqual(standard_artifacts.Model.TYPE_NAME,
metalearner.outputs['metamodel'].type_name)
if __name__ == '__main__':
absltest.main()
|
Gebesa-Dev/Addons-gebesa | account_invoice_replace/models/account_invoice_replace.py | Python | agpl-3.0 | 12,245 | 0.000572 | # -*- coding: utf-8 -*-
# Copyright 2018, Esther Cisneros
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from datetime import timedelta
from openerp import _, api, fields, models
from openerp.exceptions import UserError
class AccountInvoice(models.Model):
_name = 'account.invoice'
_inherit = 'account.invoice'
invoice_replaced = fields.Many2one(
'account.invoice',
string=_("Invoice that replaces"),
)
date_cancelled = fields.Date(
string=_("Cancellation Date"),
)
number_cancel = fields.Char(
string=("Nombre de la factura cancelada"),
)
@api.multi
def action_cancel(self):
for inv in self:
if inv.id == inv.invoice_replaced.id:
raise UserError(_("Please select an invoice to substitute different to the invoice to be canceled"))
inv.date_cancelled = fields.Date.today()
inv.number_cancel = inv.number
return super(AccountInvoice, self).action_cancel()
@api.model
def send_email_invoice_canceled(self):
limit_date = timedelta(days=1)
date_today_ = fields.Date.today()
dd = fields.Datetime.from_string(date_today_)
date_cancel = dd - limit_date
inv_ids = self.search([
('state', '=', ['cance | l']),
('company_id', '=', 1),
('type', '=', 'out_invoice'),
('date_cancelled', '=', date_cancel)])
table = ''
remp_date = ''
remp_rep = ''
for inve in inv_ids:
if not inve.date_cancelled:
remp_date = '---'
else:
remp_date = inve.date_cancelled
if not inve.invoice_rep | laced:
remp_rep = '---'
else:
remp_rep = inve.invoice_replaced.number
table += """
<tr><td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td align="right" style="border-bottom: 1px solid silver;">
%s</td></tr>
""" % (remp_date, inve.partner_id.name, inve.number_cancel, remp_rep)
mail_obj = self.env['mail.mail']
body_mail = u"""
<div summary="o_mail_notification" style="padding:0px; width:700px;
margin:0 auto; background: #FFFFFF repeat top /100%%; color:#77777
7">
<table cellspacing="0" cellpadding="0" style="width:700px;
border-collapse:collapse; background:inherit; color:inherit">
<tbody><tr>
<td valign="center" width="270" style="padding:5px 10px
5px 5px;font-size: 18px">
<p>Las siguientes facturas ya fueron canceladas</p>
</td>
<td valign="center" align="right" width="270"
style="padding:5px 15px 5px 10px; font-size: 12px;">
<p>
<strong>Sent by</strong>
<a href="http://erp.portalgebesa.com" style="text-
decoration:none; color: #a24689;">
<strong>%s</strong>
</a>
<strong>using</strong>
<a href="https://www.odoo.com" style="text-
decoration:none; color: #a24689;"><strong>Odoo
</strong></a>
</p>
</td>
</tr>
</tbody></table>
</div>
<div style="padding:0px; width:700px; margin:0 auto; background:
#FFFFFF repeat top /100%%; color:#777777">
<table cellspacing="0" cellpadding="0" style="vertical-align:
top; padding:0px; border-collapse:collapse; background:inherit;
color:inherit">
<tbody><tr>
<td valign="top" style="width:700px; padding:5px 10px
5px 5px; ">
<div>
<hr width="100%%" style="background-color:
rgb(204,204,204);border:medium none;clear:both;
display:block;font-size:0px;min-height:1px;
line-height:0;margin:15px auto;padding:0">
</div>
</td>
</tr></tbody>
</table>
</div>
<div style="padding:0px; width:700px; margin:0 auto; background:
#FFFFFF repeat top /100%%;color:#777777">
<table style="border-collapse:collapse; margin: 0 auto; width:
700px; background:inherit; color:inherit">
<tbody><tr>
<th width="16%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Fecha de Cancelacion</strong></th>
<th width="54%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Cliente</strong></th>
<th width="15%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Factura Cancelada</strong></th>
<th width="15%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Factura que Sustituye</strong></th>
</tr>
%s
</tbody>
</table>
</div>
""" % (self.env.user.company_id.name, table)
mail = mail_obj.create({
'subject': 'Facturas Canceladas',
'email_to': 'sergio.hernandez@gebesa.com,pedro.acosta@gebesa.com,andrea.mejia@gebesa.com,monica.sanchez@gebesa.com,jesus.castrellon@gebesa.com,christiansen.duenez@gebesa.com,esmeralda.gutierrez@gebesa.com,sistemas@gebesa.com',
'headers': "{'Return-Path': u'odoo@gebesa.com'}",
'body_html': body_mail,
'auto_delete': True,
'message_type': 'comment',
'model': 'account.invoice',
#'res_id': inv_ids[0].id,
})
mail.send()
@api.model
def send_email_invoice_canceled_tgalbo(self):
limit_date = timedelta(days=1)
date_today_ = fields.Date.today()
dd = fields.Datetime.from_string(date_today_)
date_cancel = dd - limit_date
inv_ids = self.search([
('state', '=', ['cancel']),
('company_id', '=', 4),
('type', '=', 'out_invoice'),
('date_cancelled', '=', date_cancel)])
table = ''
remp_date = ''
remp_rep = ''
for inve in inv_ids:
if not inve.date_cancelled:
remp_date = '---'
else:
remp_date = inve.date_cancelled
if not inve.invoice_replaced:
remp_rep = '---'
else:
remp_rep = inve.invoice_replaced.number
table += """
<tr><td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td align="right" style="border-bottom: 1px solid silver;">
%s</td></tr>
""" % (remp_date, inve.partner_id.name, inve.number_cancel, remp_rep)
mail_obj = self.env['mail.mail']
body_mail = u"""
<div summary="o_mail_notification" style="padding:0px; width:700px;
margin:0 auto; background: #FFFFFF repeat top /100%%; color:#77777
7">
|
angr/cle | cle/backends/elf/relocation/__init__.py | Python | bsd-2-clause | 1,417 | 0.002823 | import os
import logging
import importlib
import archinfo
from collections import defaultdict
from ...relocation import Relocation
ALL_RELOCATIONS = defaultdict(dict)
complaint_log = set()
path = os.path.dirname(os.path.abspath(__file__))
l = logging.getLogger(name=__name__)
def load_relocations():
for filename in os.listdir(path):
if not filename.endswith('.py'):
continue
if filename == '__init__.py':
continue
l.debug('Importing ELF relocation module: %s', filename[:-3])
module = importlib.import_module('.%s' % filename[:-3], 'cle.backends.elf.relocation')
try:
arch_name = module.ar | ch
except AttributeError:
continue
for item_name in dir(module):
if item_name not in archinfo.defines:
continue
item = getattr(module, item_name)
if not isinstance(item, type) or not issubclass(item, Relocation):
continue
ALL_RELOCATIONS[arch_name][archinfo.defines[item_name]] = item
def get_relocation(arch, r_type):
| if r_type == 0:
return None
try:
return ALL_RELOCATIONS[arch][r_type]
except KeyError:
if (arch, r_type) not in complaint_log:
complaint_log.add((arch, r_type))
l.warning("Unknown reloc %d on %s", r_type, arch)
return None
load_relocations()
|
espressif/esp-idf | tools/test_apps/build_system/ldgen_test/check_placements.py | Python | apache-2.0 | 2,847 | 0.00281 | #!/usr/bin/env python
#
# Copyright 2020 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Check placements in this test app for main
# specified in main/linker.lf
import argparse
import subprocess
from pyparsing import LineEnd, LineStart, Literal, Optional, Word, alphanums, hexnums
argparser = argparse.ArgumentParser()
argparser.add_argument('objdump')
argparser.add_argument('elf')
args = argparser.parse_args()
contents = subprocess.check_output([args.objdump, '-t', args.elf]).decode()
def check_location(symbol, expected):
pattern = (LineStart() + Word(hexnums).setResultsName('address')
+ Optional(Word(alphanums, exact=1))
+ Optional(Word(alphanums,exact=1))
+ Word(alphanums + '._*').setResultsName('actual')
+ Word(hexnums)
+ Literal(symbol)
+ LineEnd())
try:
results = pattern.searchString(contents)[0]
except IndexError:
raise Exception("check placement fail: '%s' was not found" % (symbol))
if results.actual != expected:
raise Exception("check placement fail: '%s' was placed in '%s', not in '%s'" % (symbol, results.actual, expected))
print("check placement pass: '%s' was successfully placed in '%s'" % (symbol, results.actual))
return int(results.address, 16)
# src1:func1 (noflash) - explicit mapping for func2 using 'rtc' scheme
# should have been dropped since it is unreferenced.
func1 = check_location('func1', '.iram0.text')
sym1_start = check_location('_sym1_start', '*ABS*')
sym1_end = check_location('_sym1_end', '*ABS*')
assert func1 >= sym1_start, 'check placement f | ail: func1 comes before __sym1_start'
assert func1 < sym1_end, 'check placement fail: func1 comes after __sym1_end'
assert sym1_start % 9 == 0, '_sym1_start is not aligned as specified in linker fragment'
assert sym1_end % 12 == 0, '_sym1_end is not aligned as specified in linker fragment'
print('check placement pass: _sym1_start < func1 < __sym1_end and alignments checked')
# src1:func2 (rtc) - explicit mapping for func2 using 'rtc' scheme
check_lo | cation('func2', '.rtc.text')
# src1 (default) - only func3 in src1 remains that has not been
# mapped using a different scheme
check_location('func3', '.flash.text')
check_location('func4', '.iram0.text')
|
vivsh/django-ginger | ginger/conf/urls.py | Python | mit | 580 | 0.001724 |
from django.conf.urls import include, url
from ginger.views import utils
__all__ = ('include', 'url', 'scan', 'scan_to_include')
def scan(module, predicate=None):
view_classes = utils.find_views(module, predicate=predicate)
urls = []
for view in view_classes:
if hasattr(view, 'as_urls'):
urls.extend(view.as_urls())
else:
| urls.append(view.as_url())
pattern = urls
return pattern
def scan_to | _include(module, predicate=None, app_name=None, namespace=None):
return scan(module, predicate), app_name, namespace
|
seeminglee/pyglet64 | tests/test.py | Python | bsd-3-clause | 18,095 | 0.002487 | #!/usr/bin/env python
'''Test framework for pyglet. Reads details of components and capabilities
from a requirements document, runs the appropriate unit tests.
Overview
--------
First, some definitions:
Test case:
A single test, implemented by a Python module in the tests/ directory.
Tests can be interactive (requiring the user to pass or fail them) or
non-interactive (the test passes or fails itself).
Section:
A list of test cases to be run in a specified order. Sections can
also contain other sections to an arbitrary level.
Capability:
A capability is a tag that can be applied to a test-case, which specifies
a particular instance of the test. The tester can select which
capabilities are present on their system; and only test cases matching
those capabilities will be run.
There are platform capabilities "WIN", "OSX" and "X11", which are
automatically selected by default.
The "DEVELOPER" capability is used to mark test cases which test a feature
under active development.
The "GENERIC" capability signifies that the test case is equivalent under
all platforms, and is selected by default.
Other capabilities can be specified and selected as needed. For example,
we may wish to use an "NVIDIA" or "ATI" capability to specialise a
test-case for a particular video card make.
Some tests generate regression images if enabled, so you will only
need to run through the interactive procedure once. During
subsequent runs the image shown on screen will be compared with the
regression images and passed automatically if they match. There are
command line options for enabling this feature.
By default regression images are saved in tests/regression/images/
Running tests
-------------
The test procedure is interactive (this is necessary to facilitate the
many GUI-related tests, which cannot be completely automated). With no
command-line arguments, all test cases in all sections will be run::
python tests/test.py
Before each test, a description of the test will be printed, including
some information of what you should look for, and what interactivity
is provided (including how to stop the test). Press ENTER to begin
the test.
When the test is complete, assuming there were no detectable errors
(for example, failed assertions or an exception), you will be asked
to enter a [P]ass or [F]ail. You should Fail the test if the behaviour
was not as described, and enter a short reason.
Details of each test session are logged for future use.
Command-line options:
--plan=
Specify the test plan file (defaults to tests/plan.txt)
--test-root=
Specify the top-level directory to look for unit tests in (defaults
to test/)
--capabilities=
Specify the capabilities to select, comma separated. By default this
only includes your operating system capability (X11, WIN or OSX) and
GENERIC.
--log-level=
Specify the minimum log level to write (defaults to 10: info)
--log-file=
Specify log file to write to (defaults to "pyglet.%d.log")
--regression-capture
Save regression images to disk. Use this only if the tests have
already been shown to pass.
--regression-check
Look for a regression image on disk instead of prompting the user for
passage. If a regression image is found, it is compared with the test
case using the tolerance specified below. Recommended only for
developers.
--regression-tolerance=
Specify the tolerance when comparing a regression image. A value of
2, for example, means each sample component must be +/- 2 units
of the regression image. Tolerance of 0 means images must be identical,
tolerance of 256 means images will always match (if correct dimensions).
Defaults to 2.
--regression-path=
Specify the directory to store and look for regression images.
Defaults to tests/regression/images/
--developer
Selects the DEVELOPER capability.
--no-interactive=
Don't write descriptions or prompt for confirmation; just run each
test in succcession.
After the command line options, you can specify a list of sections or test
cases to run.
Examples
--------
python tests/test.py --capabilities=GENERIC,NVIDIA,WIN window
Runs all tests in the window section with the given capabilities.
python tests/test.py --no-interactive FULLSCREEN_TOGGLE
Test just the FULLSCREEN_TOGGLE test case without prompting for input (useful
for development).
python tests/image/PIL_RGBA_SAVE.py
Run a single test outside of the test harness. Handy for development; it
is equivalent to specifying --no-interactive.
Writing tests
-------------
Add the test case to the appropriate section in the test plan (plan.txt).
Create one unit test script per test case. For example, the test for
window.FULLSCREEN_TOGGLE is located at::
tests/window/FULLSCREEN_TOGGLE.py
The test file must contain:
- A module docstring describing what the test does and what the user should
look for.
- One or more subclasses of unittest.TestCase.
- No other module-level code, except perhaps an if __name__ == '__main__'
condition for running tests stand-alone.
- Optionally, the attribute "__noninteractive = True" to specify that
the test is not interactive; doesn't require user intervention.
During development, test cases should be marked with DEVELOPER. Once finished
add the WIN, OSX and X11 capabilities, or GENERIC if it's platform
independent.
Writing regression tests
------------------------
Your test case should subclass tests.regression.ImageRegressionTestCase
instead of unitttest.TestCase. At the point where the buffer (window
image) should be checked/saved, call self.capture_regression_image().
If this method returns True, you can exit straight away (regression
test passed), otherwise continue running interactively (regression image
was captured, wait for user confirmation). You can call
capture_regression_image() several times; only the final image will be
used.
'''
__docformat__ = 'restructuredtext'
__version__ = | '$Id: $'
import array
import logging
import os
import optparse
import re
import sys
import time
import unittest
# So we can find test | s.regression and ensure local pyglet copy is tested.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import tests.regression
import pyglet.image
regressions_path = os.path.join(os.path.dirname(__file__),
'regression', 'images')
class TestCase(object):
def __init__(self, name):
self.name = name
self.short_name = name.split('.')[-1]
self.capabilities = set()
def get_module_filename(self, root=''):
path = os.path.join(*self.name.split('.'))
return '%s.py' % os.path.join(root, path)
def get_module(self, root=''):
name = 'tests.%s' % self.name
module = __import__(name)
for c in name.split('.')[1:]:
module = getattr(module, c)
return module
def get_regression_image_filename(self):
return os.path.join(regressions_path, '%s.png' % self.name)
def test(self, options):
if not options.capabilities.intersection(self.capabilities):
return
options.log.info('Testing %s.', self)
if options.pretend:
return
module = None
try:
module = self.get_module(options.test_root)
except IOError:
options.log.warning('No test exists for %s', self)
except Exception:
options.log.exception('Cannot load test for %s', self)
if not module:
return
module_interactive = options.interactive
if hasattr(module, '__noninteractive') and \
getattr(module, '__noninteractive'):
module_interactive = False
if options.regression_check and \
os.path.exists(self.get_regression_image_filename()):
result = RegressionCheckTestResult(
self, options.regression_tolerance)
module_interactive = False
elif options.regression_capture:
result = RegressionCaptureTestResu |
bunbun/ruffus | ruffus/cmdline.py | Python | mit | 31,898 | 0.002759 | ################################################################################
#
#
# cmd_line_helper.py
#
# Copyright (c) 10/9/2009 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
import sys
from .ruffus_utility import CHECKSUM_REGENERATE
from . import proxy_logger
from . import task
import logging.handlers
import logging
"""
********************************************
:mod:`ruffus.cmdline` -- Overview
********************************************
.. moduleauthor:: Leo Goodstadt <ruffus@llew.org.uk>
#
# Using argparse (new in python v 2.7)
#
from ruffus import *
parser = cmdline.get_argparse( description='WHAT DOES THIS PIPELINE DO?')
parser.add_argument("--input_file")
options = parser.parse_args()
# logger which can be passed to ruffus tasks
logger, logger_mutex = cmdline.setup_logging (__name__, options.log_file, options.verbose)
#_____________________________________________________________________________________
# pipelined functions go here
#_____________________________________________________________________________________
cmdline.run (options)
#
# Using optparse (new in python v 2.6)
#
from ruffus import *
parser = cmdline.get_optgparse(version="%prog 1.0", usage = "\n\n %prog [options]")
parser.add_option("-c", "--custom", dest="custom", action="count")
(options, remaining_args) = parser.parse_args()
# logger which can be passed to ruffus tasks
logger, logger_mutex = cmdline.setup_logging ("this_program", options.log_file, options.verbose)
#_____________________________________________________________________________________
# pipelined functions go here
#_____________________________________________________________________________________
cmdline.run (options)
"""
#
# print options
#
flowchart_formats = ["svg", "svgz", "png",
"jpg", "psd", "tif", "eps", "pdf", "dot"]
# "jpeg", "gif", "plain", "ps", "wbmp", "canon",
# "cmap", "cmapx", "cmapx_np", "fig", "gd", "gd2",
# "gv", "imap", "imap_np", "ismap", "jpe", "plain-ext",
# "ps2", "tk", "vml", "vmlz", "vrml", "x11", "xdot", "xlib"
# Replace last comma with " and". Mad funky, unreadable reverse replace code: couldn't resist!
flowchart_formats_str = ", ".join(["%r" % ss for ss in flowchart_formats])[
::-1].replace(" ,", ", or "[::-1], 1)[::-1]
# _________________________________________________________________________________________
# get_argparse
# _________________________________________________________________________________________
def get_argparse(*args, **args_dict):
"""
Set up argparse
to allow for ruffus specific options:
--verbose
--version
--log_file
-t, --target_tasks
-j, --jobs
-n, --just_print
--flowchart
--touch_files_only
--recreate_database
--checksum_file_name
--key_legend_in_graph
--draw_graph_horizontally
--flowchart_format
--forced_tasks
Optionally specify ignored_args = ["verbose", "recreate_database",...]
listing names which will not be added as valid options on the command line
Optionally specify version = "%(prog)s version 1.234"
"""
import argparse
# version and ignored_args are for append_to_argparse
orig_args_dict = dict(args_dict)
if "version" in args_dict:
del args_dict["version"]
if "ignored_args" in args_dict:
del args_dict["ignored_args"]
parser = argparse.ArgumentParser(*args, **args_dict)
return append_to_argparse(parser, **orig_args_dict)
# _________________________________________________________________________________________
# append_to_argparse
# _________________________________________________________________________________________
def append_to_argparse(parser, **args_dict):
"""
Common options:
--verbose
--version
--log_file
"""
if "version" in args_dict:
prog_version = args_dict["version"]
else:
prog_version = "%(prog)s 1.0"
#
# ignored_args contains a list of options which will *not* be added
#
if "ignored_args" in args_dict:
if isinstance(args_dict["ignored_args"], str):
ignored_args = set([args_dict["ignored_args"]])
else:
try:
ignored_args = set(args_dict["ignored_args"])
except:
raise Exception(
"Error: expected ignored_args = ['list_of', 'option_names']")
else:
ignored_args = set()
common_options = parser.add_argument_group('Common options')
if "verbose" not in ignored_args:
common_options.add_argument(
'--verbose', "-v", const="+", default=[], nargs='?',
action="append",
help="Print more verbose messages for each additional verbose level.")
if "version" not in ignored_args:
common_options.add_argument(
'--version', action='version', version=prog_version)
if "log_file" not in ignored_args:
common_options.add_argument("-L", "--log_file", metavar="FILE", type=str,
help="Name and path of log file")
#
# pipeline
#
pipeline_options = parser.add_argument_group('pipeline arguments')
if "target_tasks" not in ignored_args:
pipeline_options.add_argument("-T", "--target_tasks", action="append",
| metavar="JOBNAME", type=str,
| help="Target task(s) of pipeline.", default=[])
if "jobs" not in ignored_args:
pipeline_options.add_argument("-j", "--jobs", default=1, metavar="N", type=int,
help="Allow N jobs (commands) to run simultaneously.")
if "use_threads" not in ignored_args:
pipeline_options.add_argument("--use_threads", action="store_true",
help="Use multiple threads rather than processes. Needs --jobs N with N > 1")
if "just_print" not in ignored_args:
pipeline_options.add_argument("-n", "--just_print", action="store_true",
help="Don't actually run any commands; just print the pipeline.")
if "touch_files_only" not in ignored_args:
pipeline_options.add_argument("--touch_files_only", action="store_true",
help="Don't actually run any commands; just 'touch' the output for each task to make them appear up to date.")
if "recreate_database" not in ignored_args:
pipeline_options.add_argument("--recreate_database", action="store_true",
help="Don't actually run any commands; just recreate the checksum database.")
if "checksum_file_name" not in ignored_args:
pipeline_options.add_argument("--checksum_file_na |
bcoe/Adventures-in-Document-Thumbnailing | setup.py | Python | mit | 533 | 0.015009 | #!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup, find_packages
setup(
name="tomthumb" | ,
version="0.0.1",
description="A handy document thumbnailing | tool.",
author="Benjamin Coe",
author_email="bencoe@gmail.com",
url="https://github.com/bcoe/Adventures-in-Document-Thumbnailing",
packages = find_packages(),
install_requires = [
'PIL'
],
entry_points = {
'console_scripts': [
'tomthumb = tomthumb.__main__:main'
]
}
) |
cepheidxa/python | prime.py | Python | gpl-3.0 | 3,162 | 0.007278 | #!/usr/bin/python3
from ctypes import *
import random
import logging
import log
logger = logging.getLogger('test.prime')
__all__ = ['isPrime', 'getRandPrime', 'inv']
libgmp = CDLL('libgmp.so.10')
class _mpz_t(Structure):
_fields_ = [('_mp_alloc', c_int), ('_mp_size', c_int), ('_mp_d', POINTER(c_ulong))]
def _mpz_init_set_str(p, s, base):
libgmp.__gmpz_init_set_str(pointer(p), s, base)
def _mpz_probab_prime_p(p, reps):
return libgmp.__gmpz_probab_prime_p(pointer(p), reps)
def isPrime(n):
"""Test if n is a prime."""
if type(n) != int:
raise ValueError('type error, n should be int type')
v = _mpz_t()
_mpz_init_set_str(v, bytes(str(n), encoding = 'ascii'), 10)
if _mpz_probab_prime_p(v, 50) == 0:
return False
else:
return True
def getRandPrime(bits = 1024):
"""Get a random prime."""
r = random.SystemRandom()
v = r.getrandbits(bits)
while not isPrime(v):
v = r.getrandbits(bits)
return v
def inv(v, p):
v = v % p
if v < 0:
v = v + p
if v == 0:
raise ValueError('v should not be zero in inv')
elif v == 1:
return 1
pp, vv = p, v
a, b = divmod(pp, vv)
l1=[1,0]
l2 = [0, 1]
l3 = list(map(lambda x: x[0] - a * x[1], zip(l1, l2)))
w | hile b != 1:
pp, vv = vv, b
a, b = divmod(pp, vv)
l1 = l2
l2 = l3
l3 = list(map(lambda x: x[0] - a * x[1], zip(l1, l2)))
ret = l3[1]
ret = ret % p
if ret < 0:
ret = ret + p
return ret
def expmod(x, n, p):
b = n
t = x % p
ret = 1
while b != 0:
if b & 0x1 != 0:
r | et *= t
ret %= p
t = t * t % p
b >>= 1
return ret
def isQR(a, p):
"""is a a quadratic residule mod p, p must be a prime
if p is prime, and (a, p) = 1, then
a is a a quadratic residule of p is eaual to
a ** ((p-1)/2) = 1 mod p
a is not a a quadratic residule of p is eaual to
a ** ((p-1)/2) = -1 mod p
"""
if not isPrime(p):
raise ValueError('p must be prime in sqrtmod in module {0}'.format(__name__))
return expmod(a, (p - 1)>>1, p) == 1
def sqrtmod(a, p):
"""Get x where x^2 = a mod p, p must be a prime
Cipolla's algorithm, refered to Wikipedia
"""
if not isPrime(p):
raise ValueError('p must be prime in sqrtmod in module {0}'.format(__name__))
if isQR(a, p) == False:
return None
r = random.SystemRandom()
while True:
b = r.randint(1, p-1)
if b * b % p == a % p:
return b
if isQR((b * b - a) % p, p) == False:
break
#define y, y**2 = 5, get solution in Fp(y) filed
t = [b, 1]
y2 = (b * b - a) % p
q = (p + 1) >> 1
ret = [1, 0]
while q != 0:
if q & 0x01 != 0:
ret = [(ret[0] * t[0] + y2 * ret[1] * t[1]) % p, (ret[0] * t[1] + ret[1] * t[0]) % p]
t = [(t[0] * t[0] + y2 * t[1] * t[1]) % p, 2 * t[0] * t[1] % p]
q >>= 1
if ret[1]:
logger.error('sqrt({0}, {1}), b = {2}, ret=({3}, {4})'.format(a, p, b, ret[0], ret[1]))
return ret[0]
|
vicky2135/lucious | oscar/bin/pilprint.py | Python | bsd-3-clause | 2,624 | 0.001524 | #!/usr/local/django-oscar/oscar/bin/python2.7
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image fil | es")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show avai | lable drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
anhstudios/swganh | data/scripts/templates/object/building/poi/shared_tatooine_evil_nomad_small2.py | Python | mit | 457 | 0.045952 | #### NOTICE: THIS F | ILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTA | TION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_tatooine_evil_nomad_small2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
simply-jos/birth-of-the-toolkit | REScripts/TSCBViewer/viewer.py | Python | mit | 2,829 | 0.000353 | import pygame
import math
import os
import sys
import struct
from Util import BinaryReader, Camera
from BoTWHeightmap import Heightmap, Config
bgcolor = (0, 0, 0)
size = [1024, 768]
camera = Camera(size)
camera.SetLodLevel(32.0)
# Initialize pygame
pygame.init()
screen = pygame.display.set_mode(size)
# Run a loop until done rendering
done = False
clock = pygame.time.Clock()
# Create a new binary reader
filename = sys.argv[1]
f = open(filename, 'rb')
breader = BinaryReader(f, ">")
# Create a new heightmap
heightmap = Heightmap(breader)
currentLodLevel = 0
while not done:
clock.tick(30)
pygame.event.pump()
# move the camera
speed = 20
zoomSpeed = 1
keyTransform = {
pygame.K_w: (0, -1, 0),
pygame.K_a: (-1, 0, 0),
pygame.K_s: (0, 1, 0),
pygame.K_d: (1, 0, 0),
pygame.K_q: (0, 0, -0.05),
pygame.K_e: (0, 0, 0.05)
}
keysDown = pygame.key.get_pressed()
for k in keyTransform:
if keysDown[k]:
t = keyTransform[k]
camera.Translate((t[0] * speed, t[1] * speed))
camera.ZoomDelta(t[2] * zoomSpeed)
keyLodLevel = {
pygame.K_1: 32.0 / pow(2, 0),
pygame.K_2: 32.0 / pow(2, 1),
pygame.K_3: 32.0 / pow(2, 2),
pygame.K_4: 32.0 / pow(2, 3),
pygame.K_5: 32.0 / pow(2, 4),
pygame.K_6: 32.0 / pow(2, 5),
pygame.K_7: 32.0 / pow(2, 6),
pygame.K_8: 32.0 / pow(2, 7),
pygame.K_9: 32.0 / pow(2, 8),
pygame.K_0: "ALL"
}
for k in keyLodLevel:
if keysDown[k]:
l = keyLodLevel[k]
camera.SetLodLevel(l)
for ev in pygame.ev | ent.get():
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_t:
Config.draw_textures = not Config.draw_textures
elif ev.key == pygame.K_p:
Config.draw_coverage = not Config.draw_coverage
elif ev.key | == pygame.K_g:
Config.draw_grid = not Config.draw_grid
elif ev.key == pygame.K_o:
Config.draw_overdraw = not Config.draw_overdraw
elif ev.key == pygame.K_b:
bgcolor = (0, (0, 255)[bgcolor[1] == 0], 0)
elif ev.key == pygame.K_m:
Config.NextTexType()
print Config.draw_texType
elif ev.key == pygame.K_c:
Config.NextChannel()
print Config.draw_channel
elif ev.key == pygame.K_v:
Config.disable_alpha = not Config.disable_alpha
elif ev.type == pygame.MOUSEBUTTONDOWN:
clickCoords = pygame.mouse.get_pos()
heightmap.unk1Collection.CheckClick(camera, clickCoords)
screen.fill(bgcolor)
heightmap.Draw(screen, camera)
pygame.display.flip()
f.close()
|
ProgVal/Limnoria-test | plugins/Alias/test.py | Python | bsd-3-clause | 7,992 | 0.001878 | # -*- coding: utf8 -*-
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.conf as conf
import supybot.plugin as plugin
import supybot.registry as registry
from supybot.utils.minisix import u
from . import plugin as Alias
class FunctionsTest(SupyTestCase):
def testFindBiggestDollar(self):
self.assertEqual(Alias.findBiggestDollar(''), 0)
self.assertEqual(Alias.findBiggestDollar('foo'), 0)
self.assertEqual(Alias.findBiggestDollar('$0'), 0)
self.assertEqual(Alias.findBiggestDollar('$1'), 1)
self.assertEqual(Alias.findBiggestDollar('$2'), 2)
self.assertEqual(Alias.findBiggestDollar('$2 $10'), 10)
self.assertEqual(Alias.findBiggestDollar('$3'), 3)
self.assertEqual(Alias.findBiggestDollar('$3 $2 $1'), 3)
self.assertEqual(Alias.findBiggestDollar('foo bar $1'), 1)
self.assertEqual(Alias.findBiggestDollar('foo $2 $1'), 2)
self.assertEqual(Alias.findBiggestDollar('foo $0 $1'), 1)
self.assertEqual(Alias.findBiggestDollar('foo $1 $3'), 3)
self.assertEqual(Alias.findBiggestDollar('$10 bar $1'), 10)
class AliasTestCase(ChannelPluginTestCase):
plugins = ('Alias', 'Filter', 'Utilities', 'Format', 'Reply')
def testNoAliasWithNestedCommandName(self):
self.assertError('alias add foo "[bar] baz"')
def testDoesNotOverwriteCommands(self):
# We don't have dispatcher commands anymore
#self.assertError('alias add alias "echo foo bar baz"')
self.assertError('alias add add "echo foo bar baz"')
self.assertError('alias add remove "echo foo bar baz"')
self.assertError('alias add lock "echo foo bar baz"')
self.assertError('alias add unlock "echo foo bar baz"')
def testAliasHelp(self):
self.assertNotError('alias add slashdot foo')
self.assertRegexp('help slashdot', "Alias for .*foo")
self.assertNotError('alias add nonascii echo éé')
self.assertRegexp('help nonascii', "Alias for .*echo éé")
def testRemove(self):
self.assertNotError('alias add foo echo bar')
self.assertResponse('foo', 'bar')
self.assertNotError('alias remove foo')
self.assertError('foo')
def testDollars(self):
self.assertNotError('alias add rot26 "rot13 [rot13 $1]"')
self.assertResponse('rot26 foobar', 'foobar')
def testMoreDollars(self):
self.assertNotError('alias add rev "echo $3 $2 $1"')
self.assertResponse('rev foo bar baz', 'baz bar foo')
def testAllArgs(self):
self.assertNotError('alias add swap "echo $2 $1 $*"')
self.assertResponse('swap 1 2 3 4 5', '2 1 3 4 5')
self.assertError('alias add foo "echo $1 @1 $*"')
self.assertNotError('alias add moo echo $1 $*')
self.assertError('moo')
self.assertResponse('moo foo', 'foo')
self.assertResponse('moo foo bar', 'foo bar')
def testChannel(self):
self.assertNotError('alias add channel echo $channel')
self.assertResponse('alias channel', self.channel)
def testNick(self):
self.assertNotError('alias add sendingnick "rot13 [rot13 $nick]"')
self.assertResponse('sendingnick', self.nick)
def testAddRemoveAlias(self):
cb = self.irc.getCallback('Alias')
cb.addAlias(self.irc, 'foobar', 'echo sbbone', lock=True)
self.assertResponse('foobar', 'sbbone')
self.assertRaises(Alias.AliasError, cb.removeAlias, 'foobar')
cb.removeAlias('foobar', evenIfLocked=True)
self.failIf('foobar' in cb.aliases)
self.assertError('foobar')
self.assertRegexp('alias add abc\x07 ignore', 'Error.*Invalid')
def testOptionalArgs(self):
self.assertNotError('alias add myrepr "repr @1"')
self.assertResponse('myrepr foo', '"foo"')
self.assertResponse('myrepr ""', '""')
def testNoExtraSpaces(self):
self.assertNotError('alias add foo "action takes $1\'s money"')
self.assertResponse('foo bar', '\x01ACTION takes bar\'s money\x01')
def testNoExtraQuotes(self):
self.assertNotError('alias add myre "echo s/$1/$2/g"')
self.assertResponse('myre foo bar', 's/foo/bar/g')
def testUnicode(self):
self.assertNotError(u('alias add \u200b echo foo'))
self.assertResponse(u('\u200b'), 'foo')
self.assertNotError('alias add café echo bar')
self.assertResponse('café', 'bar')
def testSimpleAliasWithoutArgsImpliesDollarStar(self):
self.assertNotError('alias add exo echo')
self.assertResponse('exo foo bar baz', 'foo bar baz')
class EscapedAliasTestCase(ChannelPluginTestCase):
plugins = ('Alias', 'Utilities')
def setUp(self):
registry._cache.update(
{'supybot.plugins.Alias.escapedaliases.a1a3dfoobar': 'echo baz',
'supybot.plugins.Alias.escapedaliases.a1a3dfoobar.locked': 'False'})
super(EscapedAliasTestCase, self).setUp()
def testReadDatabase(self):
self.assertResponse('foo.bar', 'baz')
def testAdd(self):
self.assertNotError('alias add spam.egg echo hi')
self.assertResponse('spam.egg', 'hi')
self.assertNotError('alias add spam|egg echo hey')
self.assertResponse('spam|egg', 'hey')
self.assertNotError('alias remove spam.egg')
self.assertError('spam.egg')
self.assertNotError('spam|egg')
self.assertNotError('alias remove spam|egg')
self.assertError('spam.egg')
| self.assertError('spam|egg')
def testWriteDatabase(self):
self.assertNotError('alias add fooo.spam echo egg')
self.assertResponse('fooo.spam', 'egg')
self.failUnless(hasattr(conf.supybot.plugins.Alias.escapedaliases,
'a1a4dfooospam'))
| self.assertEqual(conf.supybot.plugins.Alias.escapedaliases.a1a4dfooospam(),
'echo egg')
self.assertNotError('alias add foo.spam.egg echo supybot')
self.assertResponse('foo.spam.egg', 'supybot')
self.failUnless(hasattr(conf.supybot.plugins.Alias.escapedaliases,
'a2a3d8dfoospamegg'))
self.assertEqual(conf.supybot.plugins.Alias.escapedaliases.a2a3d8dfoospamegg(),
'echo supybot')
self.assertEqual(Alias.unescapeAlias('a2a3d8dfoospamegg'),
'foo.spam.egg')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
GarySparrow/mFlaskWeb | app/main/views.py | Python | mit | 9,886 | 0 | from flask import render_template, redirect, url_for, abort, flash, request,\
current_app, make_response
from flask_login import login_required, current_user
from flask_sqlalchemy import get_debug_queries
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm,\
CommentForm
from .. import db
from ..models import Permission, Role, User, Post, Comment
from ..decorators import admin_required, permission_required
import os
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
@main.route('/shutdown')
def server_shutdown():
if not current_app.testing:
abort(404)
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down...'
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and \
form.validate_on_submit():
post = Post(body=form.body.data,
author=current_user._get_current_object())
db.session.add(post)
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
pagination = query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html', form=form, posts=posts,
show_followed=show_followed, pagination=pagination)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
| user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user. | name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form,
comments=comments, pagination=pagination)
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('You are already following this user.')
return redirect(url_for('.user', username=username))
current_user.follow(user)
flash('You are now following %s.' % username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not following this user.')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
flash('You are not following %s anymore.' % username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination,
follows=follows)
@main.route('/followed-by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, tit |
YuepengGuo/backtrader | backtrader/indicators/__init__.py | Python | gpl-3.0 | 2,021 | 0 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://w | ww.gnu.o | rg/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader import Indicator
from backtrader.functions import *
# The modules below should/must define __all__ with the Indicator objects
# of prepend an "_" (underscore) to private classes/variables
from .basicops import *
# base for moving averages
from .mabase import *
# moving averages (so envelope and oscillators can be auto-generated)
from .sma import *
from .ema import *
from .smma import *
from .wma import *
from .dema import *
from .kama import *
from .zlema import *
# depends on moving averages
from .deviation import *
# depend on basicops, moving averages and deviations
from .atr import *
from .aroon import *
from .bollinger import *
from .cci import *
from .crossover import *
from .dpo import *
from .directionalmove import *
from .envelope import *
from .macd import *
from .momentum import *
from .oscillator import *
from .prettygoodoscillator import *
from .priceoscillator import *
from .rsi import *
from .stochastic import *
from .trix import *
from .williams import *
|
arthurdarcet/aiohttp | tests/test_multipart.py | Python | apache-2.0 | 40,036 | 0 | import asyncio
import io
import json
import zlib
from unittest import mock
import pytest
import aiohttp
from aiohttp import payload
from aiohttp.hdrs import (
CONTENT_DISPOSITION,
CONTENT_ENCODING,
CONTENT_TRANSFER_ENCODING,
CONTENT_TYPE,
)
from aiohttp.helpers import parse_mimetype
from aiohttp.multipart import MultipartResponseWrapper
from aiohttp.streams import DEFAULT_LIMIT as stream_reader_default_limit
from aiohttp.streams import StreamReader
from aiohttp.test_utils import make_mocked_coro
BOUNDARY = b'--:'
@pytest.fixture
def buf():
return bytearray()
@pytest.fixture
def stream(buf):
writer = mock.Mock()
async def write(chunk):
buf.extend(chunk)
writer.write.side_effect = write
return writer
@pytest.fixture
def writer():
return aiohttp.MultipartWriter(boundary=':')
class Response:
def __init__(self, headers, content):
self.headers = headers
self.content = content
class Stream:
def __init__(self, content):
self.content = io.BytesIO(content)
async def read(self, size=None):
return self.content.read(size)
def at_eof(self):
return self.content.tell() == len(self.content.getbuffer())
async def readline(self):
return self.content.readline()
def unread_data(self, data):
self.content = io.BytesIO(data + self.content.read())
class StreamWithShortenRead(Stream):
def __init__(self, content):
self._first = True
super().__init__(content)
async def read(self, size=None):
if size is not None and self._first:
self._first = False
size = size // 2
return await super().read(size)
class TestMultipartResponseWrapper:
def test_at_eof(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.at_eof()
assert wrapper.resp.content.at_eof.called
async def test_next(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.stream.next = make_mocked_coro(b'')
wrapper.stream.at_eof.return_value = False
await wrapper.next()
assert wrapper.stream.next.called
async def test_release(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
await wrapper.release()
assert wrapper.resp.release.called
async def test_release_when_stream_at_eof(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(),
mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
wrapper.stream.next = make_mocked_coro(b'')
wrapper.stream.at_eof.return_value = True
await wrapper.next()
assert wrapper.stream.next.called
assert wrapper.resp.release.called
class TestPartReader:
async def test_next(self) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
result = await obj.next()
assert b'Hello, world!' == result
assert obj.at_eof()
async def test_next_next(self) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
result = await obj.next()
assert b'Hello, world!' == result
ass | ert obj.at_eof()
result = await obj.next()
assert result is None
async def test_read(self) -> None:
| obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
result = await obj.read()
assert b'Hello, world!' == result
assert obj.at_eof()
async def test_read_chunk_at_eof(self) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'--:'))
obj._at_eof = True
result = await obj.read_chunk()
assert b'' == result
async def test_read_chunk_without_content_length(self) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello, world!\r\n--:'))
c1 = await obj.read_chunk(8)
c2 = await obj.read_chunk(8)
c3 = await obj.read_chunk(8)
assert c1 + c2 == b'Hello, world!'
assert c3 == b''
async def test_read_incomplete_chunk(self) -> None:
loop = asyncio.get_event_loop()
stream = Stream(b'')
def prepare(data):
f = loop.create_future()
f.set_result(data)
return f
with mock.patch.object(stream, 'read', side_effect=[
prepare(b'Hello, '),
prepare(b'World'),
prepare(b'!\r\n--:'),
prepare(b'')
]):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
c1 = await obj.read_chunk(8)
assert c1 == b'Hello, '
c2 = await obj.read_chunk(8)
assert c2 == b'World'
c3 = await obj.read_chunk(8)
assert c3 == b'!'
async def test_read_all_at_once(self) -> None:
stream = Stream(b'Hello, World!\r\n--:--\r\n')
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream)
result = await obj.read_chunk()
assert b'Hello, World!' == result
result = await obj.read_chunk()
assert b'' == result
assert obj.at_eof()
async def test_read_incomplete_body_chunked(self) -> None:
stream = Stream(b'Hello, World!\r\n-')
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream)
result = b''
with pytest.raises(AssertionError):
for _ in range(4):
result += await obj.read_chunk(7)
assert b'Hello, World!\r\n-' == result
async def test_read_boundary_with_incomplete_chunk(self) -> None:
loop = asyncio.get_event_loop()
stream = Stream(b'')
def prepare(data):
f = loop.create_future()
f.set_result(data)
return f
with mock.patch.object(stream, 'read', side_effect=[
prepare(b'Hello, World'),
prepare(b'!\r\n'),
prepare(b'--:'),
prepare(b'')
]):
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
c1 = await obj.read_chunk(12)
assert c1 == b'Hello, World'
c2 = await obj.read_chunk(8)
assert c2 == b'!'
c3 = await obj.read_chunk(8)
assert c3 == b''
async def test_multi_read_chunk(self) -> None:
stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream)
result = await obj.read_chunk(8)
assert b'Hello,' == result
result = await obj.read_chunk(8)
assert b'' == result
assert obj.at_eof()
async def test_read_chunk_properly_counts_read_bytes(self) -> None:
expected = b'.' * 10
size = len(expected)
obj = aiohttp.BodyPartReader(
BOUNDARY, {'CONTENT-LENGTH': size},
StreamWithShortenRead(expected + b'\r\n--:--'))
result = bytearray()
while True:
chunk = await obj.read_chunk()
if not chunk:
break
result.extend(chunk)
assert size == len(result)
assert b'.' * size == result
assert obj.at_eof()
async def test_read_does_not_read_boundary(self) -> None:
stream = Stream(b'Hello, world!\r\n--:')
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, stream)
result = await obj.read()
assert b'Hello, world!' == result
assert b'--:' == (await stream.read())
async def test_multiread(self) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY, {}, Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--'))
result = await obj.read()
assert b'Hello,' == result
result = await obj.read()
assert b'' == result
assert obj.at_eof()
async def test_read_multil |
flexiant/xen | tools/python/xen/util/xsm/acm/acm.py | Python | gpl-2.0 | 56,344 | 0.00378 | #===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 International Business Machines Corp.
# Author: Reiner Sailer
# Author: Bryan D. Payne <bdpayne@us.ibm.com>
# Author: Stefan Berger <stefanb@us.ibm.com>
#============================================================================
import commands
import logging
import os, string, re
import threading
import struct
import stat
import base64
from xen.xend import sxp
from xen.xend import XendConstants
from xen.xend import XendOptions
from xen.xend.XendLogging import log
from xen.xend.XendError import VmError
from xen.util import dictio, xsconstants, auxbin, xpopen
from xen.xend.XendConstants import *
#global directories and tools for security management
install_policy_dir_prefix = auxbin.xen_configdir() + "/acm-security/policies"
security_dir_prefix = XendOptions.instance().get_xend_security_path()
policy_dir_prefix = security_dir_prefix + "/policies"
res_label_filename = policy_dir_prefix + "/resource_labels"
boot_filename = | "/boot/grub/menu.lst"
altboot_filename = "/boot/grub/grub.conf"
xensec_tool = "/usr/sbin/xensec_tool"
#global patterns for map file
#police_reference_tagname = "POLICYREFERENCENAME"
primary_entry_re = re.compile("\s*PRIMARY\s+.*", re.IGNORECASE)
secondary_entry_re = re.compile("\s*SECONDARY\s+.*", re.IGNORECASE)
label_template_re = re.compile(".*security_label_template.xml", re.IGNORECASE)
mapping_filename_re = re.compile | (".*\.map", re.IGNORECASE)
policy_reference_entry_re = re.compile("\s*POLICYREFERENCENAME\s+.*", re.IGNORECASE)
vm_label_re = re.compile("\s*LABEL->SSID\s.+[VM|ANY]\s+.*", re.IGNORECASE)
res_label_re = re.compile("\s*LABEL->SSID\s+RES\s+.*", re.IGNORECASE)
all_label_re = re.compile("\s*LABEL->SSID\s+.*", re.IGNORECASE)
access_control_re = re.compile("\s*access_control\s*=", re.IGNORECASE)
#global patterns for boot configuration file
xen_title_re = re.compile("\s*title\s+XEN", re.IGNORECASE)
any_title_re = re.compile("\s*title\s", re.IGNORECASE)
xen_kernel_re = re.compile("\s*kernel.*xen.*\.gz", re.IGNORECASE)
kernel_ver_re = re.compile("\s*module.*vmlinuz", re.IGNORECASE)
any_module_re = re.compile("\s*module\s", re.IGNORECASE)
empty_line_re = re.compile("^\s*$")
binary_name_re = re.compile(".*[chwall|ste|chwall_ste].*\.bin", re.IGNORECASE)
policy_name_re = re.compile(".*[chwall|ste|chwall_ste].*", re.IGNORECASE)
#decision hooks known to the hypervisor
ACMHOOK_sharing = 1
ACMHOOK_authorization = 2
ACMHOOK_conflictset = 3
#other global variables
NULL_SSIDREF = 0
#general Rlock for map files; only one lock for all mapfiles
__mapfile_lock = threading.RLock()
__resfile_lock = threading.RLock()
log = logging.getLogger("xend.util.security")
#Functions exported through XML-RPC
xmlrpc_exports = [
'on',
'set_resource_label',
'get_resource_label',
'list_labels',
'get_labeled_resources',
'set_policy',
'reset_policy',
'get_policy',
'activate_policy',
'rm_bootpolicy',
'get_xstype',
'get_domain_label',
'set_domain_label'
]
# Our own exception definition. It is masked (pass) if raised and
# whoever raises this exception must provide error information.
class XSMError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
def err(msg):
"""Raise ACM exception.
"""
raise XSMError(msg)
active_policy = None
def mapfile_lock():
__mapfile_lock.acquire()
def mapfile_unlock():
__mapfile_lock.release()
def resfile_lock():
__resfile_lock.acquire()
def resfile_unlock():
__resfile_lock.release()
def refresh_security_policy():
"""
retrieves security policy
"""
global active_policy
active_policy = 'INACCESSIBLE'
if os.access("/proc/xen/privcmd", os.R_OK|os.W_OK):
active_policy = "INACTIVE"
def get_active_policy_name():
refresh_security_policy()
return active_policy
# now set active_policy
refresh_security_policy()
def on():
"""
returns none if security policy is off (not compiled),
any string otherwise, use it: if not security.on() ...
"""
if get_active_policy_name() not in ['INACTIVE', 'NULL', '']:
return xsconstants.XS_POLICY_ACM
return 0
def calc_dom_ssidref_from_info(info):
"""
Calculate a domain's ssidref from the security_label in its
info.
This function is called before the domain is started and
makes sure that:
- the type of the policy is the same as indicated in the label
- the name of the policy is the same as indicated in the label
- calculates an up-to-date ssidref for the domain
The latter is necessary since the domain's ssidref could have
changed due to changes to the policy.
"""
import xen.xend.XendConfig
if isinstance(info, xen.xend.XendConfig.XendConfig):
if info.has_key('security_label'):
seclab = info['security_label']
tmp = seclab.split(":")
if len(tmp) != 3:
raise VmError("VM label '%s' in wrong format." % seclab)
typ, policyname, vmlabel = seclab.split(":")
if typ != xsconstants.ACM_POLICY_ID:
raise VmError("Policy type '%s' must be changed." % typ)
if get_active_policy_name() != policyname:
raise VmError("Active policy '%s' different than "
"what in VM's label ('%s')." %
(get_active_policy_name(), policyname))
ssidref = label2ssidref(vmlabel, policyname, "dom")
return ssidref
else:
return 0x0
raise VmError("security.calc_dom_ssidref_from_info: info of type '%s'"
"not supported." % type(info))
def getmapfile(policyname):
"""
in: if policyname is None then the currently
active hypervisor policy is used
out: 1. primary policy, 2. secondary policy,
3. open file descriptor for mapping file, and
4. True if policy file is available, False otherwise
"""
if not policyname:
policyname = get_active_policy_name()
map_file_ok = False
primary = None
secondary = None
#strip last part of policy as file name part
policy_dir_list = string.split(policyname, ".")
policy_file = policy_dir_list.pop()
if len(policy_dir_list) > 0:
policy_dir = string.join(policy_dir_list, "/") + "/"
else:
policy_dir = ""
map_filename = policy_dir_prefix + "/" + policy_dir + policy_file + ".map"
# check if it is there, if not check if policy file is there
if not os.path.isfile(map_filename):
policy_filename = policy_dir_prefix + "/" + policy_dir + policy_file + "-security_policy.xml"
if not os.path.isfile(policy_filename):
err("Policy file \'" + policy_filename + "\' not found.")
else:
err("Mapping file \'" + map_filename + "\' not found.")
f = open(map_filename)
for line in f:
if policy_reference_entry_re.match(line):
l = line.split()
if (len(l) == 2) and (l[1] == policyname):
map_file_ok = True
elif primary_entry_re.match(line):
l = line.split()
if len(l) == 2:
primary = l[1]
elif secondary_entry_re.match(line):
l = line.split()
if len(l) == 2:
secondary = l[1]
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractInacloudspaceWordpressCom.py | Python | bsd-3-clause | 658 | 0.028875 | def extractInacloudspaceWordpressCom(item):
'''
Parser for 'inacloudspace.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['titl | e'].lower():
return None
tagmap = [
('Drunken Exquisiteness', 'Drunken Exquisiteness', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, t | l_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
DMPwerkzeug/DMPwerkzeug | rdmo/core/tests/test_mail.py | Python | apache-2.0 | 2,649 | 0.000755 | from django.conf import settings
from django.core import mail
from rdmo.core.mail import send_mail
def test_send_mail(db):
send_mail('Subject', 'Message', to=['user@example.com'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['user@example.com']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_send_mail_cc(db):
send_mail('Subject', 'Message', to=['user@example.com'], cc=['user2@example.com'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['user@example.com']
assert mail.outbox[0].cc == ['user2@example.com']
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_send_mail_bcc(db):
send_mail('Subject', 'Message', bcc=['user2@example.com'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == []
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == ['user2@example.com']
assert mail.outbox[0].attachments == []
def test_send_mail_from_email(db):
send_mail('Subject', 'Message', from_email='user@example.com', to=['user2@example.com'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert m | ail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == 'user@example.com'
assert mail.outbox[0].to == ['user2@example.com']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_send_mail_from_attachments(db):
send_mail('Subject', 'Message', to=['user2@example.com'], attachments=[
('Attachment', b'attachment', ' | plain/text')
])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['user2@example.com']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == [
('Attachment', b'attachment', 'plain/text')
]
|
51reboot/actual_09_homework | 04/guantao/list.py | Python | mit | 382 | 0.035503 | #coding:utf-8
LIST_NUM = [(1,4),(5,1),(2,3),(6,9),(7,1)]
'''
用max函数获取到元素的最大值,然后用冒泡进行排序
'''
for j in range(len( | LIST_NUM) -1):
for i in range(len(LIST_NUM) -1):
if max(LIST_NUM[i]) > max(LIST_NUM[i + 1]):
A = LIST_NUM[i]
LIST_NUM[i] = LIST_NUM[i + 1]
LIST_N | UM[i + 1] = A
print LIST_NUM
|
karolmajta/officetune | server/src/officetune/officetune/admin.py | Python | mit | 175 | 0.005714 | '''
Created on 13-09-2013
@author: kamil
'''
from django.contrib import | admin
from officetune.models import Song, Vote
admin.site.register(Vote)
admin. | site.register(Song)
|
windskyer/k_cinder | paxes_cinder/scohack/scosample/imgupload_ostest.py | Python | apache-2.0 | 11,651 | 0.000343 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# =================================================================
# =================================================================
import httplib
import json
import logging
import sys
import requests
import os
import hashlib
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
XFER_TYPE = enum('URL', 'FILE')
def runtest():
"""
Test file upload
"""
# setup logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
debug_stream = logging.StreamHandler()
logger.addHandler(debug_stream)
##############################################################
# common
paxes_ip = "9.5.126.255" # Sadek
# paxes_ip = "9.5.127.146" # Harish
# keystone --insecure token-get
tenant_id = "36d6476ee75945a0bb47e6b08c0ae050" # Sadek
# tenant_id = "affc458e94c843119d8f0f442408faad" # Harish
x_auth_token = "b649417985b74582a969bc853871a810" # Sadek
# x_auth_token = "74447c39d56c4598970a2dc58a652d7e" # Harish
hmc_id = '2fa9da84-12d1-3256-af87-1b1b1c0134a8' # Sadek
# hmc_id="07247992-444c-3e08-9820-3b5c426174ca" # Harish
# to create a volume:
# paxes-devcli cinder volume-create json scovolume42 42
# use "id", eg "id": "b769d931-0346-4265-a7a4-5bfe9ae22e4f",
x_imgupload_cinder_volume_id = \
"78ec5b63-893a-4274-ac33-6ef3257bc9d2" # billsco001
# "83a167d6-3d9e-4f14-bdcc-5f87e2361cee" # Bill 1
# x_imgupload_cinder_volume_id = \
# "5b4d33c0-b2fb-414c-bfbf-5023add3ad99" # Sadek 2
# x_imgupload_cinder_volume_id = \
# "81107edc-06c9-4c8b-9706-214809ea97d7" # Harish
##############################################################
# xfer specifics
# vios = "2B74BDF1-7CC8-439E-A7D7-15C2B5864DA8" # Sadek # N23
vios = "1BC0CB5E-15BF-492A-8D06-25B69833B54E" # Sadek # N24
# vios = None # Harish
####
# STANDARD TESTS
# set xfer type
# xfer_type = XFER_TYPE.FILE
xfer_type = XFER_TYPE.URL
# set xfer test
xfer_test = "2MB"
sha256 = {}
if xfer_type == XFER_TYPE.URL:
url = "http://9.47.161.56:8080/SHA256SUM"
r = requests.get(url)
for line in r.content.strip().split("\n"):
print line
ws = line.split()
sha256[ws[0]] = ws[1]
elif xfer_type == XFER_TYPE.FILE:
with open("/tmp/SHA256SUM", "r") as f:
content = f.readlines()
for line in content:
ws = line.split()
sha256[ws[0]] = ws[1]
else:
raise Exception("Programming error")
image_file = {}
image_size = {} # for image_file only
copy_from = {}
# 2MB
image_file["2MB"] = "/tmp/testfile2MB.txt"
image_size["2MB"] = os.path.getsize(image_file["2MB"])
copy_from["2MB"] = 'http://9.47.161.56:8080/testfile2MB.txt'
# checksum = \
# 'e025e4f9d3ccf1a9b25202304d4a3d4822cd6e76843a51b803623f740bc03e66'
# 1GB
image_file["1GB"] = "/tmp/testfile1GB.txt"
image_size["1GB"] = os.path.getsize(image_file["1GB"])
copy_from["1GB"] = 'http://9.47.161.56:8080/testfile1GB.txt'
# checksum = \
# '6e86684bdba30e8f1997a652dcb2ba5a199880c44c2c9110b325cd4ca5f48152'
# 2GB
image_file["2GB"] = "/tmp/testfile2GB.txt"
image_size["2GB"] = os.path.getsize(image_file["2GB"])
copy_from["2GB"] = 'http://9.47.161.56:8080/testfile2GB.txt'
# checksum = \
# '067002c822d7b7f0a826c6bbd53d30b70b13048f25f10be2e9aacc8056bbc4cc'
# PAVEL
image_file["PAVEL"] = "/tmp/testfilePAVEL.txt"
image_size["PAVEL"] = os.path.getsize(image_file["PAVEL"])
copy_from["PAVEL"] = 'http://9.47.161.56:8080/testfilePAVEL.txt'
# checksum = \
# 'b8bbde7ba106d0f6e6cd1c6e033bfa4e6e11d5b4b944aa3e6d08b5a7d3a4252e'
# 3GB
image_file["3GB"] = "/tmp/testfile3GB.txt"
image_size["3GB"] = os.path.getsize(image_file["3GB"])
copy_from["3GB"] = 'http://9.47.161.56:8080/testfile3GB.txt'
# checksum = \
# '18c0 | f13594702add11573ad72ed9baa42facad5ba5fe9a7194465a246a31e000'
####
| # Images
# copy_from = 'http://9.47.161.56:8080/cirros-0.3.0-x86_64-disk.img'
# checksum = None
# copy_from = 'http://9.47.161.56:8080/IBM_SCE_3.2_PPC64_App-disk3.raw'
# checksum = None
# copy_from = ('http://pokgsa.ibm.com/home/g/j/gjromano/web/public/'
# 'ZippedImagesAndOVFs242/RHEL62/rhel6_2_ds6_21.gz') # Harish
# checksum = None
# image_size = None
# if xfer_type == XFER_TYPE.FILE:
# image_size = os.path.getsize(image_file)
# if checksum:
# assert checksum == _sha256sum(image_file)
##############################################################
# test cinder w/ simple get
print "test cinder w/ simple GET"
method = "GET"
simple_apiurl = 'http://%s:9000/v1/%s' % (paxes_ip, tenant_id, )
# simple_apiurl = ('https://%s/paxes/openstack/volume/v1/%s/imgupload' %
# (paxes_ip, tenant_id, ))
headers = {'X-Auth-Project-Id': 'demo',
'User-Agent': 'python-cinderclient',
'Accept': 'application/json',
'X-Auth-Token': x_auth_token}
url = "%s/types" % (simple_apiurl,)
resp = requests.request(method,
url,
headers=headers)
if resp.text:
try:
body = json.loads(resp.text)
except ValueError:
pass
body = None
else:
body = None
if resp.status_code >= 400:
raise Exception("status_code: >%s<" % (resp.status_code,))
print body
##############################################################
# headers
headers = {'X-Auth-Token': x_auth_token,
'Content-Type': 'application/octet-stream',
'User-Agent': 'python-cinderclient',
'x-imgupload-cinder-volume-id': x_imgupload_cinder_volume_id,
'x-imgupload-hmc-id': hmc_id}
# add vios header
if vios is not None:
print "VIOS specified: >%s<" % (vios,)
headers['x-imgupload-vios-id'] = vios
# optional
if xfer_type == XFER_TYPE.FILE:
headers['x-imgupload-file-size'] = image_size[xfer_test]
headers['Transfer-Encoding'] = 'chunked'
if xfer_type == XFER_TYPE.FILE:
k = image_file[xfer_test].rsplit("/")[-1]
elif xfer_type == XFER_TYPE.URL:
k = copy_from[xfer_test].rsplit("/")[-1]
else:
raise Exception("Programming error")
headers['x-imgupload-file-checksum'] = sha256[k]
##############################################################
# request
url = 'http://%s:9000/v1/%s/imgupload' % (paxes_ip, tenant_id, )
if xfer_type == XFER_TYPE.URL:
# url specific headers
headers['x-imgupload-copy-from'] = copy_from[xfer_test]
print "Upload w/ URL"
resp = requests.request("POST",
url,
headers=headers)
if resp.text:
try:
body = json.loads(resp.text)
except ValueError:
pass
body = None
else:
body = None
print resp
if resp.status_code >= 400:
print resp.json()
raise Exception("status._code: >%s<, reason: >%s<" %
(resp.status_code, resp.reason))
print body
elif xfer_type == XFER_TYPE.FILE:
# local file specific headers
print "Upload file"
cinder_conn = httplib.HTTPConnection(paxes_ip, '9000',
{'timeout': 600.0})
cinder_conn.putrequest("POST", url)
for header, value in headers.items():
cinder_conn.putheader(header, value)
cinder_conn.endheaders()
CHUNKSIZE = 1024 * 64 # 64kB
f = open(image_file[xfer_test])
chunk = f.read(CHUNKSIZE)
# Chunk it, baby...
while chunk:
cinder_co |
jeffreywolf/pyday-2015 | factorial.py | Python | mit | 447 | 0.071588 | #! /usr/bin/env python
import argparse
| def getArgs():
parser = argparse.ArgumentParser(
description = """Factorial calculator
"""
)
parser.add_argument(
"-n",
type = int,
required = True,
help = "Enter n to set N!"
)
return parser.parse_args()
def factorial(n):
if n == 0:
return 1
else:
r | eturn n*factorial(n-1)
def main():
args = getArgs()
result = factorial(args.n)
print result
if __name__ == "__main__":
main() |
alejandro-mc/trees | randNNIWalks.py | Python | mit | 4,063 | 0.022151 | #randNNIWalks.py
# | writes random SPR walks to files
#calls GTP on each NNI random walk file to get
#the ditances between each tree and the first tree of the sequence
#the results are written to csv files with lines delimited by \t
import tree_utils as tu
import w_tree_utils as wtu
import os
import sys
import numpy as np
import random
from math import sqrt
__pid__ = 0
__prefix__ = "NNI_"
#daf: distance algorithm file
def randNNIwalk(daf,size,ste | ps,runs,seed,weighted = False):
global __pid__
global __prefix__
#set the seed
random.seed(seed)
np.random.seed(seed)
#select tree utils module
if weighted:
tum = wtu
genRandBinTree = lambda leaves: wtu.genRandBinTree(leaves,np.random.exponential)
else:
tum = tu
genRandBinTree = lambda leaves: tu.genRandBinTree(leaves)
tum.treeNorm = lambda x: 0.25
out_file_name = __prefix__ + str(size) + "_" + str(steps) + "_" +\
str(runs) + "_" + str(seed)
normsfile_name = out_file_name + '.norms'
#create a file for each spr sequence
for k in range(runs):
rand_tree = genRandBinTree(list(range(size)))
total_nodes = size-1
#write current sequence to file
infile_prefix = "tmpnniseq" + str(__pid__)
infile = infile_prefix + str(k)
with open(infile,'w') as treefile, open(normsfile_name,'w') as nrmfile:
treefile.write(tum.toNewickTree(rand_tree) + "\n")
current_tree = rand_tree
#write tree norms-----
#save norm of first tree
norm1 = sqrt(tum.treeNorm(rand_tree))
walknorms = ''
for i in range(steps):
current_tree = tum.randNNI(current_tree,total_nodes)
treefile.write(tum.toNewickTree(current_tree) + "\n")
#write ||T1|| + ||T2||
walknorms += str(norm1 + sqrt(tum.treeNorm(current_tree))) + ','
#write norms sequence
nrmfile.write(walknorms[0:-1] + '\n')
#assumes GTP file is in current working directory
outfile = "tempseq" + str(__pid__) + ".csv"
infile_prefix = "tmpnniseq" + str(__pid__)
infile = infile_prefix + str(k)
os.system("java -jar " + daf + " -r 0 -o " + outfile + " " + infile)
#append output to final sequence file
os.system("cat " + outfile + " | ./toLines.py >> " + out_file_name)
#cleanup
os.system("rm " + outfile)
os.system("rm " + infile_prefix + "*")
if __name__=='__main__':
if len(sys.argv)<6:
print ("Too few arguments!!")
print ("Usage: [-w] <distance algorithm file .jar> <size or size range> <no. NNI steps> <no. runs> <seed or seed range>")
sys.exit(-1)
WEIGHTED = False
if len(sys.argv) == 7:
WEIGHTED = sys.argv.pop(1) == '-w'
dist_algo_file = sys.argv[1]
if dist_algo_file != "gtp.jar":
__prefix__ = "RNI_"
if WEIGHTED:
__prefix__ = 'W' + __prefix__
else:
__prefix__ = 'U' + __prefix__
#take a single size or a range of sizes
if ":" in sys.argv[2]:
size_start, size_end = map(lambda x: int(x),sys.argv[2].split(':'))
else:
size_start = int(sys.argv[2])
size_end = size_start + 1
size_range = range(size_start,size_end)
steps = int(sys.argv[3])
runs = int(sys.argv[4])
#take a single seed or a range of seeds
if ":" in sys.argv[5]:
seed_start,seed_end = map(lambda x: int(x),sys.argv[5].split(':'))
else:
seed_start = int(sys.argv[5])
seed_end = seed_start + 1
seed_range = range(seed_start,seed_end)
#set pid property before calling randSPRWalk
__pid__ = os.getpid()
for size in size_range:
for seed in seed_range:
randNNIwalk(dist_algo_file,size,steps,runs,seed,WEIGHTED) |
bchiroma/DreamProject_2 | dream/KnowledgeExtraction/KEtool_examples/TwoServers/TwoServers_example.py | Python | gpl-3.0 | 10,371 | 0.01832 | '''
Created on 19 Feb 2014
@author: Panos
'''
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
from ImportExceldata import Import_Excel
from DistributionFitting import DistFittest
from xml.etree import ElementTree as et
from ExcelOutput import Output
from ReplaceMissingValues import HandleMissingValues
import xlrd
import json
#================================================ This script is a simple example of the Knowledge extraction tool ===============================================================#
#The following is the Main script, that calls two Python objects in order to conduct the three main components of the Knowledge extraction tool
#In the following example the operation times of the topology's two machines are given in an Excel document.
#Import_Excel object imports data from the Excel document to the tool and DistFittest object fits the data to a statistical distribution using Kolmogorov-Smirnov test
workbook = xlrd.open_workbook('inputsTwoServers.xls') #Using xlrd library opens the Excel document with the input data
worksheets = workbook.sheet_names()
worksheet_OperationTime = worksheets[0] #It creates a variable that holds the first Excel worksheet
X=Import_Excel() #Call the import_Excel object
OperationTimes= X.Input_data(worksheet_OperationTime,workbook) #It defines a Python dictionary, giving as name OpearationTimes and as value the returned dictionary from the import_Excel object
Machine1_OpearationTimes = OperationTimes.get('Machine1',[]) #Two lists are defined (Machine1_OpearationTimes, Machine2_OpearationTimes) with the operation times data of each machine
Machine2_OpearationTimes = OperationTimes.get('Machine2',[])
A=HandleMissingValues() #Call the HandleMissingValues object
Machine1_OpearationTimes= A.DeleteMissingValue(Machine1_OpearationTimes) #It deletes the missing values in the lists with the operation times data
Machine2_OpearationTimes= A.DeleteMissingValue(Machine2_OpearationTimes)
Dict={}
B=DistFittest() #It calls the DistFittest object
Dict['M1']=B.ks_test(Machine1_OpearationTimes) #It conducts the Kolmogorov-Smirnov test in the list with the operation times data
Dict['M2']=B.ks_test(Machine2_OpearationTimes)
M1=Dict.get('M1')
M2=Dict.get('M2')
#==================================== Output preparation: output the updated values in the CMSD information model of Topology10 ====================================================#
datafile=('CMSD_TwoServers.xml') #It defines the name or the directory of the XML file that is manually written the CMSD information model
tree = et.parse(datafile) #This file will be parsed using the XML.ETREE Python library
M1Parameters=[]
M1ParameterValue=[]
for index in list(Dict['M1'].keys()):
if index is not 'distributionType':
M1Parameters.append(index)
M1ParameterValue.append(Dict['M1'][index])
if Dict['M1']['distributionType']=='Normal':
del M1['min']
del M1['max']
elif Dict['M2']['distributionType']=='Normal':
del M2['min']
del M2['max']
M2Parameters=[]
M2ParameterValue=[]
for index in list(Dict['M2'].keys()):
if index is not 'distributionType':
M2Parameters.append(index)
M2ParameterValue.append(Dict['M2'][index])
root=tree.getroot()
process=tree.findall('./DataSection/ProcessPlan/Process') #It creates a new variable and using the 'findall' order in XML.ETREE library, this new variable holds all the processes defined in the XML file
for process in process:
process_identifier=process.find('Identifier').text #It creates a new variable that holds the text of the Identifier element in the XML file
if process_identifier=='A020': #It checks using if...elif syntax if the process identifier is 'A020', so the process that uses the first machine
OperationTime=process.get('OpeationTime') #It gets the element attribute OpearationTime inside the Process node
Distribution=process.get('./OperationTime/Distribution') #It gets the element attribute Distribution inside the OpearationTime node
Name=process.find('./OperationTime/Distribution/Name') #It finds the subelement Name inside the Distribution attribute
Name.text=Dict['M1']['distributionType'] #It changes the text between the Name element tags, putting the name of the distribution (e.g. in Normal distribution that will be Normal)
DistributionParameterA=process.get('./OperationTime/Distribution/DistributionParameterA')
Name=process.find('./OperationTime/Distribution/DistributionParameterA/Name')
Name.text=str(M1Parameters[0]) #It changes the text between the Name element tags, putting the name of the distribution's first parameter (e.g. in Normal that will be the mean)
Value=process.find('./OperationTime/Distribution/DistributionParameterA/Value')
Value.text=str(M1ParameterValue[0]) #It changes the text between the Value element tags, putting the value of the distribution's first parameter (e.g. in Normal so for mean value that will be 5.0)
DistributionParameterB=process.get('./OperationTime/Distribution/DistributionParameterB')
Name=process.find('./OperationTime/Distribution/DistributionParameterB/Name')
Name.text=str(M1Parameters[1]) #It changes the text between the Name element tags, putting the name of the distribution's second parameter (e.g. in Normal that will be the standarddeviation)
Value=process.find('./OperationTime/Distribution/DistributionParameterB/Value')
Value.text=str(M1ParameterValue[1]) #It changes the text between the Value element tags, putting the value of the distribution's second parameter (e.g. in Normal so for standarddeviation value that will be 1.3)
elif process_identifier=='A040': #It checks using if...elif syntax if the process identifier is 'A040', so the process that uses the second machine
OperationTime=process.get('OpeationTime')
Distribution=process.get('./OperationTime/Distribution')
Name=process.find('./OperationTime/Distribution/Name')
Name.text=Dict['M2']['distributionType']
DistributionParameterA=process.get('./OperationTime/Distribution/DistributionParameterA')
Name=process.find('./OperationTime/Distribution/DistributionParameterA/Name')
Name.text=str(M2Parameters[0])
Value=process.find('./OperationTime/Distribution/DistributionParameterA/Value')
Value.text=str(M2ParameterValue[0])
DistributionParameterB=process.get('./OperationTime/Distribution/DistributionP | arameterB')
Name=process.find('./OperationTime/Distribution/DistributionParameterB/Name')
Name.text=str(M2Parameters[1])
Value=process.find('./OperationTime | /Distribution/DistributionParameterB/Value')
Value.text=str(M2ParameterValue[1])
else:
continue
tree.write('CMSD_TwoServers_Output.xml',encoding="utf8" |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/khmer-1.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/count-overlap.py | Python | apache-2.0 | 2,765 | 0.000362 | #!/usr/bin/python2.7
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2012-2014. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
# pylint: disable=missing-docstring,invalid-name
"""
Count the overlap k-mers, which are the k-mers appearing in two sequence
datasets.
usage: count-overlap_cpp.py [-h] [-q] [--ksize KSIZE] [--n_tables N_HASHES]
[--tablesize HASHSIZE]
1st_dataset(htfile generated by load-graph.py) 2nd_dataset(fastafile)
result
Use '-h' for parameter help.
"""
import khmer
import textwrap
from khmer.file import check_file_status, check_space
from khmer.khmer_args import (build_hashbits_args, report_on_config, info)
DEFAULT_K = 32
DEFAULT_N_HT = 4
DEFAULT_HASHSIZE = 1e6
def get_parser():
epilog = """
An additional report will be written to ${output_report_filename}.curve
containing the increase of overlap k-mers as the number of sequences in the
second database increases.
"""
parser = build_hashbits_args(
descr='Count the overlap k-mers which are the k-mers appearing in two '
'sequence datasets.', epilog=textwrap.dedent(epilog))
parser.add_argument('ptfile', metavar='input_presence_table_filename',
help="input k-mer presence table filename")
parser.add_argument('fafile', metavar='input_sequence_filename',
help="input sequence filename")
parser.add_argument('report_filename', metavar='output_report_filename',
help='output report filename')
return parser
def main():
info('count-overlap.py', ['counting'])
args = get_parser().parse_args()
report_on_config(args, hashtype='hashbits')
for infile in [args.ptfile, args.fafile]:
check_file_status(infile)
check_space([args.ptfile, args.fafile])
print 'loading k-mer presence table from', args.ptfile
ht1 = khmer.load_hashb | its(args.ptfile)
kmer_size = ht1.ksize()
output = open(args.report_filename, 'w')
f_curve_obj = open(args.report_filename + '.curve', 'w')
ht2 = khmer.new_hashbits(kmer_size, args.min_tablesize, args.n_tables)
(n_unique, n_overlap, list_curve) = ht2.count_overlap(args.fafile, ht | 1)
printout1 = """\
dataset1(pt file): %s
dataset2: %s
# of unique k-mers in dataset2: %d
# of overlap unique k-mers: %d
""" % (args.ptfile, args.fafile, n_unique, n_overlap)
output.write(printout1)
for i in range(100):
to_print = str(list_curve[100 + i]) + ' ' + str(list_curve[i]) + '\n'
f_curve_obj.write(to_print)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
SanketDG/dpaste | dpaste/south_migrations/0006_auto__add_unique_snippet_secret_id.py | Python | mit | 2,051 | 0.006826 | # -*- coding: utf-8 -*-
import datetime
from django.db import models
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Snippet', fields ['secret_id']
db.create_unique('dpaste_snippet', ['secret_id'])
def backwards(self, orm):
# Removing unique constraint on 'Snippet', fields ['secret_id']
db.delete_unique('dpaste_snippet', ['secret_id'])
models = {
u'dpaste.snippet': {
'Meta': {'ordering': "('-published',)", 'object_name': 'Snippet'},
'content': ('django.db.models.fields.TextField', [], {}),
'expire_type': ('django.db.models.fields.PositiveSmallI | ntegerField', [], {'default': '1'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lexer': ('django.db.models.fields.CharField', [], {'default': "'python | '", 'max_length': '30'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['dpaste.Snippet']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'secret_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['dpaste']
|
Rondineli/pybr11_tutorial | pybr11_tutorial/pybr11_tutorial/settings.py | Python | apache-2.0 | 2,688 | 0 | """
Django settings for pybr11_tutorial project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
| SECRET_KEY = '4b5prz%55i#ay!qf=7w=61p^am-4a_jknjf8&jzu1d6ib@-*d^'
# SECURITY WARNING: don't r | un with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pybr11_tutorial',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pybr11_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pybr11_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
joakim-hove/ert | test-data/local/mini_ert/jobs/perlin_fail.py | Python | gpl-3.0 | 461 | 0 | #!/usr/bin/env python
import os
import | sys
iens = None
if len(sys.argv) > 1:
iens = int(sys.argv[1])
numbers = [1, 2, 3, 4, 5, 6]
if iens in numbers:
random_report_step = (numbers.index(iens) % 3) + 1
os.remove("perlin_%d.txt" % random_report_step)
with open("perlin_fail.status", "w") as f:
f.write("Deleted report step: %d" % random_report_step)
else:
with open("perlin_fail.status", "w") as f:
f.write("Did nothing | !")
|
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/pickCells.py | Python | gpl-3.0 | 16,540 | 0.026421 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create a scene with one of each cell type
# Voxel
voxelPoints = vtk.vtkPoints()
voxelPoints.SetNumberOfPoints(8)
voxelPoints.InsertPoint(0,0,0,0)
voxelPoints.InsertPoint(1,1,0,0)
voxelPoints.InsertPoint(2,0,1,0)
voxelPoints.InsertPoint(3,1,1,0)
voxelPoints.InsertPoint(4,0,0,1)
voxelPoints.InsertPoint(5,1,0,1)
voxelPoints.InsertPoint(6,0,1,1)
voxelPoints.InsertPoint(7,1,1,1)
aVoxel = vtk.vtkVoxel()
aVoxel.GetPointIds().SetId(0,0)
aVoxel.GetPointIds().SetId(1,1)
aVoxel.GetPointIds().SetId(2,2)
aVoxel.GetPointIds().SetId(3,3)
aVoxel.GetPointIds().SetId(4,4)
aVoxel.GetPointIds().SetId(5,5)
aVoxel.GetPointIds().SetId(6,6)
aVoxel.GetPointIds().SetId(7,7)
aVoxelGrid = vtk.vtkUnstructuredGrid()
aVoxelGrid.Allocate(1,1)
aVoxelGrid.InsertNextCell(aVoxel.GetCellType(),aVoxel.GetPointIds())
aVoxelGrid.SetPoints(voxelPoints)
aVoxelMapper = vtk.vtkDataSetMapper()
aVoxelMapper.SetInputData(aVoxelGrid)
aVoxelActor = vtk.vtkActor()
aVoxelActor.SetMapper(aVoxelMapper)
aVoxelActor.GetProperty().BackfaceCullingOn()
# Hexahedron
hexahedronPoints = vtk.vtkPoints()
hexahedronPoints.SetNumberOfPoints(8)
hexahedronPoints.InsertPoint(0,0,0,0)
hexahedronPoints.InsertPoint(1,1,0,0)
hexahedronPoints.InsertPoint(2,1,1,0)
hexahedronPoints.InsertPoint(3,0,1,0)
hexahedronPoints.InsertPoint(4,0,0,1)
hexahedronPoints.InsertPoint(5,1,0,1)
hexahedronPoints.InsertPoint(6,1,1,1)
hexahedronPoints.InsertPoint(7,0,1,1)
aHexahedron = vtk.vtkHexahedron()
aHexahedron.GetPointIds().SetId(0,0)
aHexahedron.GetPointIds().SetId(1,1)
aHexahedron.GetPointIds().SetId(2,2)
aHexahedron.GetPointIds().SetId(3,3)
aHexahedron.GetPointIds().SetId(4,4)
aHexahedron.GetPointIds().SetId(5,5)
aHexahedron.GetPointIds().SetId(6,6)
aHexahedron.GetPointIds().SetId(7,7)
aHexahedronGrid = vtk.vtkUnstructuredGrid()
aHexahedronGrid.Allocate(1,1)
aHexahedronGrid.InsertNextCell(aHexahedron.GetCellType(),aHexahedron.GetPointIds())
aHexahedronGrid.SetPoints(hexahedronPoints)
aHexahedronMapper = vtk.vtkDataSetMapper()
aHexahedronMapper.SetInputData(aHexahedronGrid)
aHexahedronActor = vtk.vtkActor()
aHexahedronActor.SetMapper(aHexahedronMapper)
aHexahedronActor.AddPosition(2,0,0)
aHexahedronActor.GetProperty().BackfaceCullingOn()
# Tetra
tetraPoints = vtk.vtkPoints()
tetraPoints.SetNumberOfPoints(4)
tetraPoints.InsertPoint(0,0,0,0)
tetraPoints.InsertPoint(1,1,0,0)
tetraPoints.InsertPoint(2,.5,1,0)
tetraPoints.InsertPoint(3,.5,.5,1)
aTetra = vtk.vtkTetra()
aTetra.GetPointIds().SetId(0,0)
aTetra.GetPointIds().SetId(1,1)
aTetra.GetPointIds().SetId(2,2)
aTetra.GetPointIds().SetId(3,3)
aTetraGrid = vtk.vtkUnstructuredGrid()
aTetraGrid.Allocate(1,1)
aTetraGrid.InsertNextCell(aTetra.GetCellType(),aTetra.GetPointIds())
aTetraGrid.SetPoints(tetraPoints)
aTetraMapper = vtk.vtkDataSetMapper()
aTetraMapper.SetInputData(aTetraGrid)
aTetraActor = vtk.vtkActor()
aTetraActor.SetMapper(aTetraMapper)
aTetraActor.AddPosition(4,0,0)
aTetraActor.GetProperty().BackfaceCullingOn()
# Wedge
wedgePoints = vtk.vtkPoints()
wedgePoints.SetNumberOfPoints(6)
wedgePoints.InsertPoint(0,0,1,0)
wedgePoints.InsertPoint(1,0,0,0)
wedgePoints.InsertPoint(2,0,.5,.5)
wedgePoints.InsertPoint(3,1,1,0)
wedgePoints.InsertPoint(4,1,0,0)
wedgePoints.InsertPoint(5,1,.5,.5)
aWedge = vtk.vtkWedge()
aWedge.GetPointIds().SetId(0,0)
aWedge.GetPointIds().SetId(1,1)
aWedge.GetPointIds().SetId(2,2)
aWedge.GetPointIds().SetId(3,3)
aWedge.GetPointIds().SetId(4,4)
aWedge.GetPointIds().SetId(5,5)
aWedgeGrid = vtk.vtkUnstructuredGrid()
aWedgeGrid.Allocate(1,1)
aWedgeGrid.InsertNextCell(aWedge.GetCellType(),aWedge.GetPointIds())
aWedgeGrid.SetPoints(wedgePoints)
aWedgeMapper = vtk.vtkDataSetMapper()
aWedgeMapper.SetInputData(aWedgeGrid)
aWedgeActor = vtk.vtkActor()
aWedgeActor.SetMapper(aWedgeMapper)
aWedgeActor.AddPosition(6,0,0)
aWedgeActor.GetProperty().BackfaceCullingOn()
# Pyramid
pyramidPoints = vtk.vtkPoints()
pyramidPoints.SetNumberOfPoints(5)
pyramidPoints.InsertPoint(0,0,0,0)
pyramidPoints.InsertPoint(1,1,0,0)
pyramidPoints.InsertPoint(2,1,1,0)
pyramidPoints.InsertPoint(3,0,1,0)
pyramidPoints.InsertPoint(4,.5,.5,1)
aPyramid = vtk.vtkPyramid()
aPyramid.GetPointIds().SetId(0,0)
aPyramid.GetPointIds().SetId(1,1)
aPyramid.GetPointIds().SetId(2,2)
aPyramid.GetPointIds().SetId(3,3)
aPyramid.GetPointIds().SetId(4,4)
aPyramidGrid = vtk.vtkUnstructuredGrid()
aPyramidGrid.Allocate(1,1)
aPyramidGrid.InsertNextCell(aPyramid.GetCellType(),aPyramid.GetPointIds())
aPyramidGrid.SetPoints(pyramidPoints)
aPyramidMapper = vtk.vtkDataSetMapper()
aPyramidMapper.SetInputData(aPyramidGrid)
aPyramidActor = vtk.vtkActor()
aPyramidActor.SetMapper(aPyramidMapper)
aPyramidActor.AddPosition(8,0,0)
aPyramidActor.GetProperty().BackfaceCullingOn()
# Pixel
pixelPoints = vtk.vtkPoints()
pixelPoints.SetNumberOfPoints(4)
pixelPoints.InsertPoint(0,0,0,0)
pixelPoints.InsertPoint(1,1,0,0)
pixelPoints.InsertPoint(2,0,1,0)
pixelPoints.InsertPoint(3,1,1,0)
aPixel = vtk.vtkPixel()
aPixel.GetPointIds().SetId(0,0)
aPixel.GetPointIds().SetId(1,1)
aPixel.GetPointIds().SetId(2,2)
aPixel.GetPointIds().SetId(3,3)
aPixelGrid = vtk.vtkUnstructuredGrid()
aPixelGrid.Allocate(1,1)
aPixelGrid.InsertNextCell(aPixel.GetCellType(),aPixel.GetPointIds())
aPixelGrid.SetPoints(pixelPoints)
aPixelMapper = vtk.vtkDataSetMapper()
aPixelMapper.SetInputData(aPixelGrid)
aPixelActor = vtk.vtkActor()
aPixelActor.SetMapper(aPixelMapper)
aPixelActor.AddPosition(0,0,2)
aPixelActor.GetProperty().BackfaceCullingOn()
# Quad
quadPoints = vtk.vtkPoints()
quadPoints.SetNumberOfPoints(4)
quadPoints.InsertPoint(0,0,0,0)
quadPoints.InsertPoint(1,1,0,0)
quadPoints.InsertPoint(2,1,1,0)
quadPoints.InsertPoint(3,0,1,0)
aQuad = vtk.vtkQuad()
aQuad.GetPointIds().SetId(0,0)
aQuad.GetPointIds().SetId(1,1)
aQuad.GetPointIds().SetId(2,2)
aQuad.GetPointIds().SetId(3,3)
aQuadGrid = vtk.vtkUnstructuredGrid()
aQuadGrid.Allocate(1,1)
aQuadGrid.InsertNextCell(aQuad.GetCellType(),aQuad.GetPointIds())
aQuadGrid.SetPoints(quadPoints)
aQuadMapper = vtk.vtkDataSetMapper()
aQuadMapper.SetInputData(aQuadGrid)
aQuadActor = vtk.vtkActor()
aQuadActor.SetMapper(aQuadMapper)
aQuadActor.AddPosition(2,0,2)
aQuadActor.GetProperty().BackfaceCullingOn()
# Triangle
trianglePoints = vtk.vtkPoints()
trianglePoints.SetNumberOfPoints(3)
trianglePoints.InsertPoint(0,0,0,0)
trianglePoints.InsertPoint(1,1,0,0)
trianglePoints.InsertPoint(2,.5,.5,0)
aTriangle = vtk.vtkTriangle()
aTriangle.GetPointIds().SetId(0,0)
aTriangle.GetPointIds().SetId(1,1)
aTriangle.GetPointIds().SetId(2,2)
aTriangleGrid = vtk.vtkUnstructuredGrid()
aTriangleGrid.Allocate(1,1)
aTriangleGrid.InsertNextCell(aTriangle.GetCellType(),aTriangle.GetPointIds())
aTriangleGrid.SetPoints(trianglePoints)
aTriangleMapper = vtk.vtkDataSetMapper()
aTriangleMapper.SetInputData(aTriangleGrid)
aTriangleActor = vtk.vtkActor()
aTriangleActor.SetMapper(aTriangleMapper | )
aTriangleActor.AddPosition(4,0,2)
aTriangleActor.GetProperty().BackfaceCullingOn()
# Polygon
polygonPoints = vtk.vtkPoints()
polygonPoints.SetNumberOfPoints(4)
polygonPoints.InsertPoint(0,0,0,0)
polygonPoints.InsertPoint(1,1,0,0)
polygonPoints.InsertPoint(2,1,1,0)
polygonPoints.InsertPoint(3,0,1,0)
aPolygon = vtk.vtkPolygon()
aPolygon.GetPointIds().SetNum | berOfIds(4)
aPolygon.GetPointIds().SetId(0,0)
aPolygon.GetPointIds().SetId(1,1)
aPolygon.GetPointIds().SetId(2,2)
aPolygon.GetPointIds().SetId(3,3)
aPolygonGrid = vtk.vtkUnstructuredGrid()
aPolygonGrid.Allocate(1,1)
aPolygonGrid.InsertNextCell(aPolygon.GetCellType(),aPolygon.GetPointIds())
aPolygonGrid.SetPoints(polygonPoints)
aPolygonMapper = vtk.vtkDataSetMapper()
aPolygonMapper.SetInputData(aPolygonGrid)
aPolygonActor = vtk.vtkActor()
aPolygonActor.SetMapper(aPolygonMapper)
aPolygonActor.AddPosition(6,0,2)
aPolygonActor.GetProperty().BackfaceCullingOn()
# Triangle Strip
triangleStripPoints = vtk.vtkPoints()
triangleStripPoints.SetNumberOfPoints(5)
triangleStripPoints.InsertPoint(0,0,1,0)
triangleStripPoints.InsertPoint(1,0, |
CERNDocumentServer/cds | cds/modules/records/jsonresolver/schemas.py | Python | gpl-2.0 | 1,378 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record resolver."""
from __future__ import absolute_import, print_function
import json
import jsonresolve | r
import pkg_resources
@jsonresolver.route('/schemas/<path:path>',
host='cds.cern.ch')
def resolve_schemas(path):
"""Resolve JSON sche | mas."""
with open(pkg_resources.resource_filename(
'cds_dojson.schemas', path), 'r') as f:
return json.load(f)
|
bthirion/nipy | nipy/fixes/nibabel/orientations.py | Python | bsd-3-clause | 3,877 | 0.001548 | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Copy of nibabel io_orientation function from nibabel > 1.2.0
This copy fixes a bug whe | n there are columns of all zero in the affine.
See h | ttps://github.com/nipy/nibabel/pull/128
Remove when we depend on nibabel > 1.2.0
"""
import numpy as np
import numpy.linalg as npl
def io_orientation(affine, tol=None):
''' Orientation of input axes in terms of output axes for `affine`
Valid for an affine transformation from ``p`` dimensions to ``q``
dimensions (``affine.shape == (q + 1, p + 1)``).
The calculated orientations can be used to transform associated
arrays to best match the output orientations. If ``p`` > ``q``, then
some of the output axes should be considered dropped in this
orientation.
Parameters
----------
affine : (q+1, p+1) ndarray-like
Transformation affine from ``p`` inputs to ``q`` outputs. Usually this
will be a shape (4,4) matrix, transforming 3 inputs to 3 outputs, but the
code also handles the more general case
tol : {None, float}, optional
threshold below which SVD values of the affine are considered zero. If
`tol` is None, and ``S`` is an array with singular values for `affine`,
and ``eps`` is the epsilon value for datatype of ``S``, then `tol` set to
``S.max() * eps``.
Returns
-------
orientations : (p, 2) ndarray
one row per input axis, where the first value in each row is the closest
corresponding output axis. The second value in each row is 1 if the input
axis is in the same direction as the corresponding output axis and -1 if
it is in the opposite direction. If a row is [np.nan, np.nan], which can
happen when p > q, then this row should be considered dropped.
'''
affine = np.asarray(affine)
q, p = affine.shape[0]-1, affine.shape[1]-1
# extract the underlying rotation, zoom, shear matrix
RZS = affine[:q, :p]
zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
# Zooms can be zero, in which case all elements in the column are zero, and
# we can leave them as they are
zooms[zooms == 0] = 1
RS = RZS / zooms
# Transform below is polar decomposition, returning the closest
# shearless matrix R to RS
P, S, Qs = npl.svd(RS)
# Threshold the singular values to determine the rank.
if tol is None:
tol = S.max() * np.finfo(S.dtype).eps
keep = (S > tol)
R = np.dot(P[:, keep], Qs[keep])
# the matrix R is such that np.dot(R,R.T) is projection onto the
# columns of P[:,keep] and np.dot(R.T,R) is projection onto the rows
# of Qs[keep]. R (== np.dot(R, np.eye(p))) gives rotation of the
# unit input vectors to output coordinates. Therefore, the row
# index of abs max R[:,N], is the output axis changing most as input
# axis N changes. In case there are ties, we choose the axes
# iteratively, removing used axes from consideration as we go
ornt = np.ones((p, 2), dtype=np.int8) * np.nan
for in_ax in range(p):
col = R[:, in_ax]
if not np.alltrue(np.equal(col, 0)):
out_ax = np.argmax(np.abs(col))
ornt[in_ax, 0] = out_ax
assert col[out_ax] != 0
if col[out_ax] < 0:
ornt[in_ax, 1] = -1
else:
ornt[in_ax, 1] = 1
# remove the identified axis from further consideration, by
# zeroing out the corresponding row in R
R[out_ax, :] = 0
return ornt
|
aplanas/py-tcdb | test/test-adb.py | Python | lgpl-3.0 | 15,634 | 0.000065 | # -*- coding: utf-8 -*-
import unittest
import warnings
from tcdb import adb
class TestADBSimple(unittest.TestCase):
def setUp(self):
self.adb = adb.ADBSimple()
self.adb.open('*')
def tearDown(self):
self.adb.close()
self.adb = None
def test_setgetitem(self):
self.adb['key'] = 'some string'
self.assertEqual(self.adb['key'], 'some string')
self.assertRaises(KeyError, self.adb.__getitem__, 'nonexistent key')
def test_put(self):
self.adb.put('key', 'some string')
self.assertEqual(self.adb.get('key'), 'some string')
self.assertEqual(self.adb.get('nonexistent key'), None)
self.assertEqual(self.adb.get('nonexistent key', 'def'), 'def')
def test_putkeep(self):
self.adb.putkeep('key', 'some string')
self.assertEqual(self.adb.get('key'), 'some string')
self.adb.putkeep('key', 'Never stored')
self.assertEqual(self.adb.get('key'), 'some string')
def test_putcat(self):
self.adb.putcat('key', 'some')
self.adb.putcat('key', ' text')
self.assertEquals(self.adb.get('key'), 'some text')
def test_out_and_contains(self):
self.assert_('key' not in self.adb)
self.adb.put('key', 'some text')
self.assert_('key' in self.adb)
self.adb.out('key')
self.assert_('key' not in self.adb)
self.adb.put('key', 'some text')
self.assert_('key' in self.adb)
del self.adb['key']
self.assert_('key' not in self.adb)
def test_vsiz(self):
self.adb.put('key', 'some text')
self.assertEqual(self. | adb.vsiz('key'), len('some text'))
def test_iters(self):
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
for key in keys:
self.adb.put(key, key)
self.assertEqual( | len(self.adb.keys()), len(keys))
self.assertEqual(len(self.adb.values()), len(keys))
self.assertEqual(len(zip(keys, keys)), len(self.adb.items()))
for key in self.adb:
self.assert_(key in keys)
for value in self.adb.itervalues():
self.assert_(value in keys)
def test_fwmkeys(self):
objs = ['aa', 'ab', 'ac', 'xx', 'ad']
for obj in objs:
self.adb.put(obj, 'same value')
self.assertEqual(len(self.adb.fwmkeys('a')),
len(['aa', 'ab', 'ac', 'ad']))
self.assertEqual(self.adb.fwmkeys('x'), ['xx'])
self.assertEqual(self.adb.fwmkeys('nonexistent key'), [])
def test_admin_functions(self):
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
for key in keys:
self.adb.put(key, key)
self.assertEquals(self.adb.path(), '*')
self.adb.sync()
self.assertEquals(len(self.adb), 5)
self.assertEquals(self.adb.size(), 525656)
self.adb.vanish()
self.assertEquals(self.adb.size(), 525376)
# def test_transaction(self):
# keys = ['key1', 'key2', 'key3', 'key4', 'key5']
# with self.adb as db:
# for key in keys:
# db.put(key, key)
# self.assertEquals(len(self.adb), 5)
# self.adb.vanish()
# try:
# with self.adb:
# for key in keys:
# self.adb.put(key, key)
# self.adb['bad key']
# except KeyError:
# pass
# self.assertEquals(len(self.adb), 0)
def test_foreach(self):
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
def proc(key, value, op):
self.assertEquals(key, value)
self.assert_(key in keys)
self.assertEquals(op, 'test')
return True
for key in keys:
self.adb.put(key, key)
self.adb.foreach(proc, 'test')
class TestADB(unittest.TestCase):
def setUp(self):
self.adb = adb.ADB()
self.adb.open('*')
def tearDown(self):
self.adb.close()
self.adb = None
def test_setgetitem(self):
objs = [1+1j, 'some text [áéíóú]', u'unicode text [áéíóú]', 10, 10.0]
for obj1 in objs:
self.adb['obj'] = obj1
obj2 = self.adb['obj']
self.assertEqual(obj1, obj2)
self.adb[obj1] = obj1
obj2 = self.adb[obj1]
self.assertEqual(obj1, obj2)
self.assertRaises(KeyError, self.adb.__getitem__, 'nonexistent key')
def test_put(self):
objs = [1+1j, 'some text [áéíóú]', u'unicode text [áéíóú]', 10, 10.0]
for obj1 in objs:
self.adb.put(obj1, obj1)
obj2 = self.adb.get(obj1)
self.assertEqual(obj1, obj2)
self.adb.put(obj1, obj1, raw_key=True)
obj2 = self.adb.get(obj1, raw_key=True)
self.assertEqual(obj1, obj2)
self.assertEqual(self.adb.get('nonexistent key'), None)
self.assertEqual(self.adb.get('nonexistent key', 'def'), 'def')
def test_put_str(self):
str1 = 'some text [áéíóú]'
objs = [1+1j, 'some text [áéíóú]', u'unicode text [áéíóú]', 10, 10.0]
for obj in objs:
self.adb.put_str(obj, str1)
str2 = self.adb.get_str(obj)
self.assertEqual(str1, str2)
self.adb.put_str(obj, str1, as_raw=True)
str2 = self.adb.get_str(obj, as_raw=True)
self.assertEqual(str1, str2)
unicode1 = u'unicode text [áéíóú]'
for obj in objs:
self.adb.put_str(obj, unicode1.encode('utf8'))
unicode2 = unicode(self.adb.get_str(obj), 'utf8')
self.assertEqual(unicode1, unicode2)
self.assertRaises(AssertionError, self.adb.put_str, 'key', 10)
self.assertEqual(self.adb.get_str('nonexistent key'), None)
self.assertEqual(self.adb.get_str('nonexistent key', 'def'), 'def')
def test_put_unicode(self):
unicode1 = u'unicode text [áéíóú]'
objs = [1+1j, 'some text [áéíóú]', u'unicode text [áéíóú]', 10, 10.0]
for obj in objs:
self.adb.put_unicode(obj, unicode1)
unicode2 = self.adb.get_unicode(obj)
self.assertEqual(unicode1, unicode2)
self.adb.put_unicode(obj, unicode1, as_raw=True)
unicode2 = self.adb.get_unicode(obj, as_raw=True)
self.assertEqual(unicode1, unicode2)
self.assertRaises(AssertionError, self.adb.put_unicode, 'key', 10)
self.assertEqual(self.adb.get_unicode('nonexistent key'), None)
self.assertEqual(self.adb.get_unicode('nonexistent key', 'def'), 'def')
def test_put_int(self):
int1 = 10
objs = [1+1j, 'some text [áéíóú]', u'unicode text [áéíóú]', 10, 10.0]
for obj in objs:
self.adb.put_int(obj, int1)
int2 = self.adb.get_int(obj)
self.assertEqual(int1, int2)
self.adb.put_int(obj, int1, as_raw=True)
int2 = self.adb.get_int(obj, as_raw=True)
self.assertEqual(int1, int2)
self.assertRaises(AssertionError, self.adb.put_int, 'key', '10')
self.assertEqual(self.adb.get_int('nonexistent key'), None)
self.assertEqual(self.adb.get_int('nonexistent key', 'def'), 'def')
def test_put_float(self):
float1 = 10.10
objs = [1+1j, 'some text [áéíóú]', u'unicode text [áéíóú]', 10, 10.0]
for obj in objs:
self.adb.put_float(obj, float1)
float2 = self.adb.get_float(obj)
self.assertEqual(float1, float2)
self.adb.put_float(obj, float1, as_raw=True)
float2 = self.adb.get_float(obj, as_raw=True)
self.assertEqual(float1, float2)
self.assertRaises(AssertionError, self.adb.put_float, 'key', 10)
self.assertEqual(self.adb.get_float('nonexistent key'), None)
self.assertEqual(self.adb.get_float('nonexistent key', 'def'), 'def')
def test_putkeep(self):
objs = [1+1j, 'some text [áéíóú]', u'unicode text [áéíóú]', 10, 10.0]
for obj1 in objs:
self.adb.putkeep(obj1, obj1)
obj2 = self.adb.get(obj1)
self.assertEqual(obj1, obj2)
self.adb.putkeep(ob |
jnewland/home-assistant | homeassistant/components/wirelesstag/binary_sensor.py | Python | apache-2.0 | 4,265 | 0 | """Binary sensor support for Wireless Sensor Tags."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA, BinarySensorDevice)
from homeassistant.const import CONF_MONITORED_CONDITIONS, STATE_OFF, STATE_ON
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
DOMAIN as WIRELESSTAG_DOMAIN, SIGNAL_BINARY_EVENT_UPDATE,
WirelessTagBaseSensor)
_LOGGER = logging.getLogger(__name__)
# On means in range, Off means out of range
SENSOR_PRESENCE = 'presence'
# On means motion detected, Off means clear
SENSOR_MOTION = 'motion'
# On means open, Off means closed
SENSOR_DOOR = 'door'
# On means temperature become too cold, Off means normal
SENSOR_COLD = 'cold'
# On means hot, Off means normal
SENSOR_HEAT = 'heat'
# On means too dry (humidity), Off means normal
SENSOR_DRY = 'dry'
# On means too wet (humidity), Off means normal
SENSOR_WET = 'wet'
# On means light detected, Off means no light
SENSOR_LIGHT = 'light'
# On means moisture detected (wet), Off means no moisture (dry)
SENSOR_MOISTURE = 'moisture'
# On means tag battery is low, Off means normal
SENSOR_BATTERY = 'battery'
# Sensor types: Name, device_class, push notification type representing 'on',
# attr to check
SENSOR_TYPES = {
SENSOR_PRESENCE: 'Presence',
SENSOR_MOTION: 'Motion',
SENSOR_DOOR: 'Door',
SENSOR_COLD: 'Cold',
SENSOR_HEAT: 'Heat',
SENSOR_DRY: 'Too dry',
SENSOR_WET: 'Too wet',
SENSOR_LIGHT: 'Light',
SENSOR_MOISTURE: 'Leak',
SENSOR_BATTERY: 'Low Battery'
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_CONDITIONS, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform for a WirelessTags."""
platform = hass.data.get(WIRELESSTAG_DOMAIN)
sensors = []
tags = platform.tags
for tag in tags.values():
allowed_sensor_types = tag.supported_binary_events_types
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type in allowed_sensor_types:
sensors.append | (WirelessTagBinarySensor(platform, tag,
sensor_type))
add_entities(sensors, True)
hass.add_job(platform.install_push_notifications, sensors)
class WirelessTagBinarySensor(WirelessTagBaseSensor, BinarySensorDevice):
"""A binary sensor implementation for WirelessTags."""
def __init__(self, api, tag, sensor_ty | pe):
"""Initialize a binary sensor for a Wireless Sensor Tags."""
super().__init__(api, tag)
self._sensor_type = sensor_type
self._name = '{0} {1}'.format(self._tag.name,
self.event.human_readable_name)
async def async_added_to_hass(self):
"""Register callbacks."""
tag_id = self.tag_id
event_type = self.device_class
mac = self.tag_manager_mac
async_dispatcher_connect(
self.hass,
SIGNAL_BINARY_EVENT_UPDATE.format(tag_id, event_type, mac),
self._on_binary_event_callback)
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._state == STATE_ON
@property
def device_class(self):
"""Return the class of the binary sensor."""
return self._sensor_type
@property
def event(self):
"""Binary event of tag."""
return self._tag.event[self._sensor_type]
@property
def principal_value(self):
"""Return value of tag.
Subclasses need override based on type of sensor.
"""
return STATE_ON if self.event.is_state_on else STATE_OFF
def updated_state_value(self):
"""Use raw princial value."""
return self.principal_value
@callback
def _on_binary_event_callback(self, event):
"""Update state from arrived push notification."""
# state should be 'on' or 'off'
self._state = event.data.get('state')
self.async_schedule_update_ha_state()
|
alexsalo/genenetwork2 | wqflask/basicStatistics/BasicStatisticsFunctions.py | Python | agpl-3.0 | 8,270 | 0.011487 | from __future__ import print_function
#import string
from math import *
#import piddle as pid
#import os
import traceback
from pprint import pformat as pf
from corestats import Stats
import reaper
from htmlgen import HTMLgen2 as HT
#from utility import Plot
from utility import webqtlUtil
from base import webqtlConfig
from dbFunction import webqtlDatabaseFunction
def basicStatsTable(vals, trait_type=None, cellid=None, heritability=None):
print("basicStatsTable called - len of vals", len(vals))
st = {} # This is the dictionary where we'll put everything for the template
valsOnly = []
dataXZ = vals[:]
for i in range(len(dataXZ)):
valsOnly.append(dataXZ[i][1])
(st['traitmean'],
st['traitmedian'],
st['traitvar'],
st['traitstdev'],
st['traitsem'],
st['N']) = reaper.anova(valsOnly) #ZS: Should convert this from reaper to R in the future
#tbl = HT.TableLite(cellpadding=20, cellspacing=0)
#dataXZ = vals[:]
dataXZ = sorted(vals, webqtlUtil.cmpOrder)
print("data for stats is:", pf(dataXZ))
for num, item in enumerate(dataXZ):
print(" %i - %s" % (num, item))
print(" length:", len(dataXZ))
st['min'] = dataXZ[0][1]
st['max'] = dataXZ[-1][1]
numbers = [x[1] for x in dataXZ]
stats = Stats(numbers)
at75 = stats.percentile(75)
at25 = stats.percentile(25)
print("should get a stack")
traceback.print_stack()
print("Interquartile:", at75 - at25)
#tbl.append(HT.TR(HT.TD("Statistic",align="left", Class="fs14 fwb ffl b1 cw cbrb", width = 180),
# HT.TD("Value", align="right", Class="fs14 fwb ffl b1 cw cbrb", width = 60)))
#tbl.append(HT.TR(HT.TD("N of Samples",align="left", Class="fs13 b1 cbw c222"),
# HT.TD(N,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
#tbl.append(HT.TR(HT.TD("Mean",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%2.3f" % traitmean,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
#tbl.append(HT.TR(HT.TD("Median",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%2.3f" % traitmedian,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
##tbl.append(HT.TR(HT.TD("Variance",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
## HT.TD("%2.3f" % traitvar,nowrap="yes",align="left", Class="fs13 b1 cbw c222")))
#tbl.append(HT.TR(HT.TD("Standard Error (SE)",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%2.3f" % traitsem,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
#tbl.append(HT.TR(HT.TD("Standard Deviation (SD)", align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%2.3f" % traitstdev,nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
#tbl.append(HT.TR(HT.TD("Minimum", align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%s" % dataXZ[0][1],nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
#tbl.append(HT.TR(HT.TD("Maximum", align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%s" % dataXZ[-1][1],nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
if (trait_type != None and trait_type == 'ProbeSet'):
#tbl.append(HT.TR(HT.TD("Range (log2)",align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%2.3f" % (dataXZ[-1][1]-dataXZ[0][1]),nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
#tbl.append(HT.TR(HT.TD(HT.Span("Range (fold)"),align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%2.2f" % pow(2.0,(dataXZ[-1][1]-dataXZ[0][1])), nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
#tbl.append(HT.TR(HT.TD(HT.Span(HT.Href(url="/glossary.html#Interquartile", target="_blank", text="Interquartile Range", Class="non_bold")), align="left", Class="fs13 b1 cbw c222",nowrap="yes"),
# HT.TD("%2.2f" % pow(2.0,(dataXZ[int((N-1)*3.0/4.0)][1]-dataXZ[int((N-1)/4.0)][1])), nowrap="yes", Class="fs13 b1 cbw c222"), align="right"))
st['range_log2'] = dataXZ[-1][1]-dataXZ[0][1]
st['range_fold'] = pow(2.0, (dataXZ[-1][1]-dataXZ[0][1]))
st['interquartile'] = pow(2.0, (dataXZ[int((st['N']-1)*3.0/4.0)][1]-dataXZ[int((st['N']-1)/4.0)][1]))
#XZ, 04/01/2009: don't try to get H2 value for probe.
if not cellid:
if heritability:
# This field needs to still be put into the Jinja2 template
st['heritability'] = heritability
#tbl.append(HT.TR(HT.TD(HT.Span("Heritability"),align="center", Class="fs13 b1 cbw c222",nowrap="yes"),HT.TD("%s" % heritability, nowrap="yes",align="center", Class="fs13 b1 cbw c222")))
# Lei Yan
# 2008/12/19
return st
def plotNormalProbability(vals=None, RISet='', title=None, showstrains=0, specialStrains=[None], size=(750,500)):
dataXZ = vals[:]
dataXZ.sort(webqtlUtil.cmpOrder)
dataLabel = []
dataX = map(lambda X: X[1], dataXZ)
showLabel = showstrains
if len(dataXZ) > 50:
showLabel = 0
for item in dataXZ:
strainName = webqtlUtil.genShortStrainName(RISet=RISet, input_strainName=item[0])
dataLabel.append(strainName)
dataY=Plot.U(len(dataX))
dataZ=map(Plot.inverseCumul,dataY)
c = pid.PILCanvas(size=(750,500))
Plot.plotXY(c, dataZ, dataX, dataLabel = dataLabel, XLabel='Expected Z score', connectdot=0, YLabel='Trait value', title=title, specialCases=specialStrains, showLabel = showLabel)
filename= webqtlUtil.genRandStr("nP_")
c.save(webqtlConfig.IMGDIR+filename, format='gif')
img=HT.Image('/image/'+filename+'.gif',border=0)
return img
def plotBoxPlot(vals):
valsOnly = []
dataXZ = vals[:]
for i in range(len(dataXZ)):
valsOnly.append(dataXZ[i][1])
plotHeight = 320
plotWidth = 220
xLeftOffset = 60
xRightOffset = 40
yTopOffset = 40
yBott | omOffset = 60
canvasHeight = plotHeight + yTopOffset + yBottomOffset
canvasWidth = plotWidth + xLeftOffset + xRightOffset
canvas = pid.PILCanvas(size=(canvasWidth,canvasHeight))
XXX = [('', valsOnly[:])]
Plot.plotBoxPlot(canvas, XXX, offset=(xLeftOffset, xRightOffset, yTopOffset, yBottomOffset | ), XLabel= "Trait")
filename= webqtlUtil.genRandStr("Box_")
canvas.save(webqtlConfig.IMGDIR+filename, format='gif')
img=HT.Image('/image/'+filename+'.gif',border=0)
plotLink = HT.Span("More about ", HT.Href(text="Box Plots", url="http://davidmlane.com/hyperstat/A37797.html", target="_blank", Class="fs13"))
return img, plotLink
def plotBarGraph(identification='', RISet='', vals=None, type="name"):
this_identification = "unnamed trait"
if identification:
this_identification = identification
if type=="rank":
dataXZ = vals[:]
dataXZ.sort(webqtlUtil.cmpOrder)
title='%s' % this_identification
else:
dataXZ = vals[:]
title='%s' % this_identification
tvals = []
tnames = []
tvars = []
for i in range(len(dataXZ)):
tvals.append(dataXZ[i][1])
tnames.append(webqtlUtil.genShortStrainName(RISet=RISet, input_strainName=dataXZ[i][0]))
tvars.append(dataXZ[i][2])
nnStrain = len(tnames)
sLabel = 1
###determine bar width and space width
if nnStrain < 20:
sw = 4
elif nnStrain < 40:
sw = 3
else:
sw = 2
### 700 is the default plot width minus Xoffsets for 40 strains
defaultWidth = 650
if nnStrain > 40:
defaultWidth += (nnStrain-40)*10
defaultOffset = 100
bw = int(0.5+(defaultWidth - (nnStrain-1.0)*sw)/nnStrain)
if bw < 10:
bw = 10
plotWidth = (nnStrain-1)*sw + nnStrain*bw + defaultOffset
plotHeight = 500
#print [plotWidth, plotHeight, bw, sw, nnStrain]
c = pid.PILCanvas(size=(plotWidth,plotHeight))
Plot.plotBarText(c, tvals, tnames, variance=tvars, YLabel='Value', title=title, sLabel = sLabel, barSpace = sw)
filename= webqtlUtil.genRandStr("Bar_")
c.save(webqtlConfig.IMGDIR+fil |
teknolab/django.org.tr | project/local_settings-example.py | Python | bsd-3-clause | 1,149 | 0 | from django.conf.global_settings import MIDDLEWARE_CLASSES
from settings_default import INSTALLED_APPS
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SECRET_KEY = 'please-generate-your-secret-key'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite.db',
}
}
# FACEBOOK_APP_ID = ''
# FACEBOOK_API_SECRET = ''
# TUMBLR_CONSU | MER_KEY = ''
# TUMBLR_CONSUMER_SECRET = ''
# TWITTER_CONSUMER_KEY = ''
# TWITTER_CONSUMER_SECRET = ''
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APP | S += ('django_extensions', 'debug_toolbar')
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel')
|
KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_effects/effect/profile_COMMON/technique/phong/ambient/effect_phong_ambient_texture/effect_phong_ambient_texture.py | Python | mit | 4,183 | 0.007172 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_effects', 'effect', 'profile_COMMON', 'technique', 'phong']
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Compare the rendered images between import and export
# Then compare images against reference test
# Last, check for preservation of element
if ( self.__assistant.CompareRenderedImages(context) ):
if ( self.__assistant.CompareImagesAgainst(context, "effect_phong_ambient_color") ):
self.__assistant.ElementPreserved(context, self.tagList)
| self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
|
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
calee88/ParlAI | parlai/agents/rnn_baselines/seq2seq.py | Python | bsd-3-clause | 10,385 | 0.001444 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.agents import Agent
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch
import copy
import random
class Seq2seqAgent(Agent):
"""Simple agent which uses an LSTM to process incoming text observations."""
@staticmethod
def add_cmdline_args(argparser):
argparser.add_arg('-hs', '--hiddensize', type=int, default=64,
help='size of the hidden layers and embeddings')
argparser.add_arg('-nl', '--numlayers', type=int, default=2,
help='number of hidden layers')
argparser.add_arg('-lr', '--learningrate', type=float, default=0.5,
help='learning rate')
argparser.add_arg('-dr', '--dropout', type=float, default=0.1,
help='dropout rate')
argparser.add_arg('--no-cuda', action='store_true', default=False,
help='disable GPUs even if available')
argparser.add_arg('--gpu', type=int, default=-1,
help='which GPU device to use')
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if shared and 'dictionary' in shared:
# only set up everything for the main instance
self.dict = shared['dictionary']
self.EOS = self.dict.eos_token
self.EOS_TENSOR = torch.LongTensor(self.dict.parse(self.EOS))
self.id = 'Seq2Seq'
hsz = opt['hiddensize']
self.hidden_size = hsz
self.num_layers = opt['numlayers']
self.learning_rate = opt['learningrate']
self.use_cuda = opt.get('cuda', False)
self.longest_label = 2 # TODO: 1
if 'babi' in opt['task']:
self.babi_mode = True
self.dirs = set(['n', 's', 'e', 'w'])
self.criterion = nn.NLLLoss()
self.lt = nn.Embedding(len(self.dict), hsz, padding_idx=0,
scale_grad_by_freq=True)
self.encoder = nn.GRU(hsz, hsz, opt['numlayers'])
self.decoder = nn.GRU(hsz, hsz, opt['numlayers'])
self.d2o = nn.Linear(hsz, len(self.dict))
self.dropout = nn.Dropout(opt['dropout'])
self.softmax = nn.LogSoftmax()
lr = opt['learningrate']
self.optims = {
'lt': optim.SGD(self.lt.parameters(), lr=lr),
'encoder': optim.SGD(self.encoder.parameters(), lr=lr),
'decoder': optim.SGD(self.decoder.parameters(), lr=lr),
'd2o': optim.SGD(self.d2o.parameters(), lr=lr),
}
if self.use_cuda:
self.cuda()
self.episode_done = True
def parse(self, text):
return torch.LongTensor(self.dict.txt2vec(text))
def v2t(self, vec):
return self.dict.vec2txt(vec)
def cuda(self):
self.criterion.cuda()
self.lt.cuda()
self.encoder.cuda()
self.decoder.cuda()
self.d2o.cuda()
self.dropout.cuda()
self.softmax.cuda()
def hidden_to_idx(self, hidden, drop=False):
if hidden.size(0) > 1:
raise RuntimeError('bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = self.d2o(hidden)
if drop:
scores = self.dropout(scores)
scores = self.softmax(scores)
_max_score, idx = scores.max(1)
return idx, scores
def zero_grad(self):
for optimizer in self.optims.values():
optimizer.zero_grad()
def update_params(self):
for optimizer in self.optims.values():
optimizer.step()
def init_zeros(self, bsz=1):
t = torch.zeros(self.num_layers, bsz, self.hidden_size)
if self.use_cuda:
t = t.cuda(async=True)
return Variable(t)
def init_rand(self, bsz=1):
t = torch.FloatTensor(self.num_layers, bsz, self.hidden_size)
t.uniform_(0.05)
if self.use_cuda:
t = t.cuda(async=True)
return Variable(t)
def observe(self, observation):
observation = copy.deepcopy(observation)
if not self.episode_done:
# if the last example wasn't the end of an episode, then we need to
# recall what was said in that example
prev_dialogue = self.observation['text']
observation['text'] = prev_dialogue + '\n' + observation['text']
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def update(self, xs, ys):
batchsize = len(xs)
# first encode context
xes = self.lt(xs).t()
h0 = self.init_zeros(batchsize)
_output, hn = self.encoder(xes, h0)
# start with EOS tensor for all
x = self.EOS_TENSOR
if self.use_cuda:
x = x.cuda(async=True)
x = Variable(x)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
output_lines = [[] for _ in range(batchsize)]
self.zero_grad()
# update model
loss = 0
self.longest_label = max(self.longest_label, ys.size(1))
for i in range(ys.size(1)):
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, drop=True)
y = ys.select(1, i)
loss += self.criterion(scores, y)
# use the true token as the next input
xes = self.lt(y).unsqueeze(0)
# hn = self.dropout(hn)
for j in range(preds.size(0)):
token = self.v2t([preds.data[j][0]])
output_lines[j].append(token)
loss.backward()
self.update_params()
if random.random() < 0.1:
true = self.v2t(ys.data[0])
print('loss:', round(loss.data[0], 2), ' '.join( | output_lines | [0]), '(true: {})'.format(true))
return output_lines
def predict(self, xs):
batchsize = len(xs)
# first encode context
xes = self.lt(xs).t()
h0 = self.init_zeros(batchsize)
_output, hn = self.encoder(xes, h0)
# start with EOS tensor for all
x = self.EOS_TENSOR
if self.use_cuda:
x = x.cuda(async=True)
x = Variable(x)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
done = [False for _ in range(batchsize)]
total_done = 0
max_len = 0
output_lines = [[] for _ in range(batchsize)]
while(total_done < batchsize) and max_len < self.longest_label:
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, drop=False)
xes = self.lt(preds.t())
max_len += 1
for i in range(preds.size(0)):
if not done[i]:
token = self.v2t(preds.data[i])
if token == self.EOS:
done[i] = True
total_done += 1
else:
output_lines[i].append(token)
if self.babi_mode and token not in self.dirs:
# for babi, only output one token except when
# giving directions
done[i] = True
total_done += 1
if random.random() < 0.1:
print('prediction:', ' '.join(output_lines[0]))
return output_lines
def batchify(self, obs):
exs = [ex for ex in obs if 'text' in ex]
valid_inds = [i for i, ex in enumerate(obs) if 'text' in ex]
batchsize = len(exs)
parsed = [self.parse(ex['text']) for ex in exs]
max_x_len = max([len(x) for x in parsed])
xs = torch.LongTensor(batchsize, max_x_len).fill_(0)
for i, x in en |
ktnyt/chainer | examples/glance/glance.py | Python | mit | 2,943 | 0 | # Note for contributors:
# This example code is referred to from "Chainer at a Glance" tutorial.
# If this file is to be modified, please also update the line numbers in
# `docs/source/glance.rst` accordingly.
import chainer as ch
from chainer import datasets
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import numpy as np
import matplotlib
matplotlib.use('Agg')
mushroomsfile = 'mushrooms.csv'
data_array = np.genfromtxt(
mushroomsfile, delimiter=',', dtype=str, skip_header=1)
for col in range(data_array.shape[1]):
data_array[:, col] = np.unique(data_array[:, col], return_inverse=True)[1]
X = data_array[:, 1:].astype(np.float32)
Y = data_array[:, 0].astype(np.int32)[:, None]
tra | in, test = datasets.split_dataset_random(
datasets.TupleDataset(X, Y), int(data_array.shape[0] * .7))
train_iter = ch.iterators.SerialIterator(train, 100)
test | _iter = ch.iterators.SerialIterator(
test, 100, repeat=False, shuffle=False)
# Network definition
def MLP(n_units, n_out):
layer = ch.Sequential(L.Linear(n_units), F.relu)
model = layer.repeat(2)
model.append(L.Linear(n_out))
return model
model = L.Classifier(
MLP(44, 1), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy)
# Setup an optimizer
optimizer = ch.optimizers.SGD().setup(model)
# Create the updater, using the optimizer
updater = training.StandardUpdater(train_iter, optimizer, device=-1)
# Set up a trainer
trainer = training.Trainer(updater, (50, 'epoch'), out='result')
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=-1))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(20, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
if extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
# Print selected entries of the log to stdout
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Run the training
trainer.run()
x, t = test[np.random.randint(len(test))]
predict = model.predictor(x[None]).array
predict = predict[0][0]
if predict >= 0:
print('Predicted Poisonous, Actual ' + ['Edible', 'Poisonous'][t[0]])
else:
print('Predicted Edible, Actual ' + ['Edible', 'Poisonous'][t[0]])
|
Skufler/pybot | weather.py | Python | gpl-3.0 | 736 | 0 | import pyowm
import main
owm = pyowm.OWM('9a55dd22782984d74bc1deecec64fc9a', language='Ru')
def weather_handler(message):
try:
observation = owm.weather_at_place(message.text)
weather = observation.g | et_weather()
answer = weather.get_detailed_status()
temperature = weather.get_temperature(unit='celsius')
main.bot.send_message(message.chat.id | , 'Погода: {answer}, '
'Температура: {temperature}'
.format(answer=answer,
temperature=temperature.get('temp')))
except Exception as e:
main.bot.send_message(message.chat.id, e.args)
|
witten/borgmatic | borgmatic/config/generate.py | Python | gpl-3.0 | 10,638 | 0.004324 | import collections
import io
import os
import re
from ruamel import yaml
from borgmatic.config import load
INDENT = 4
SEQUENCE_INDENT = 2
def _insert_newline_before_comment(config, field_name):
'''
Using some ruamel.yaml black magic, insert a blank line in the config right before the given
field and its comments.
'''
config.ca.items[field_name][1].insert(
0, yaml.tokens.CommentToken('\n', yaml.error.CommentMark(0), None)
)
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
'''
Given a loaded configuration schema, generate and return sample config for it. Include comments
for each section based on the schema "description".
'''
schema_type = schema.get('type')
example = schema.get('example')
if example is not None:
return example
if schema_type == 'array':
config = yaml.comments.CommentedSeq(
[_schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
)
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
elif schema_type == 'object':
config = yaml.comments.CommentedMap(
[
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
for field_name, sub_schema in schema['properties'].items()
]
)
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
add_comments_to_configuration_object(
config, schema, indent=indent, skip_first=parent_is_sequence
)
else:
raise ValueError('Schema at level {} is unsupported: {}'.format(level, schema))
return config
def _comment_out_line(line):
# If it's already is commented out (or empty), there's nothing further to do!
stripped_line = line.lstrip()
if not stripped_line or stripped_line.startswith('#'):
return line
# Comment out the names of optional sections, inserting the '#' after any indent for aesthetics.
matches = re.match(r'(\s*)', line)
indent_spaces = matches.group(0) if matches else ''
count_indent_spaces = len(indent_spaces)
return '# '.join((indent_spaces, line[count_indent_spaces:]))
def _comment_out_optional_configuration(rendered_config):
'''
Post-process a rendered configuration string to comment out optional key/values, as determined
by a sentinel in the comment before each key.
The idea is that the pre-commented configuration prevents the user from having to comment out a
bunch of configuration they don't care about to get to a minimal viable configuration file.
Ideally ruamel.yaml would support commenting out keys during configuration generation, but it's
not terribly easy to accomplish that way.
'''
lines = []
optional = False
for line in rendered_config.split('\n'):
# Upon encoun | tering an optional configuration option, comment out lines until the next blank
# line.
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
optional = | True
continue
# Hit a blank line, so reset commenting.
if not line.strip():
optional = False
lines.append(_comment_out_line(line) if optional else line)
return '\n'.join(lines)
def render_configuration(config):
'''
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
'''
dumper = yaml.YAML()
dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT)
rendered = io.StringIO()
dumper.dump(config, rendered)
return rendered.getvalue()
def write_configuration(config_filename, rendered_config, mode=0o600):
'''
Given a target config filename and rendered config YAML, write it out to file. Create any
containing directories as needed.
'''
if os.path.exists(config_filename):
raise FileExistsError('{} already exists. Aborting.'.format(config_filename))
try:
os.makedirs(os.path.dirname(config_filename), mode=0o700)
except (FileExistsError, FileNotFoundError):
pass
with open(config_filename, 'w') as config_file:
config_file.write(rendered_config)
os.chmod(config_filename, mode)
def add_comments_to_configuration_sequence(config, schema, indent=0):
'''
If the given config sequence's items are object, then mine the schema for the description of the
object's first item, and slap that atop the sequence. Indent the comment the given number of
characters.
Doing this for sequences of maps results in nice comments that look like:
```
things:
# First key description. Added by this function.
- key: foo
# Second key description. Added by add_comments_to_configuration_object().
other: bar
```
'''
if schema['items'].get('type') != 'object':
return
for field_name in config[0].keys():
field_schema = schema['items']['properties'].get(field_name, {})
description = field_schema.get('description')
# No description to use? Skip it.
if not field_schema or not description:
return
config[0].yaml_set_start_comment(description, indent=indent)
# We only want the first key's description here, as the rest of the keys get commented by
# add_comments_to_configuration_object().
return
REQUIRED_SECTION_NAMES = {'location', 'retention'}
REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
def add_comments_to_configuration_object(config, schema, indent=0, skip_first=False):
'''
Using descriptions from a schema as a source, add those descriptions as comments to the given
config mapping, before each field. Indent the comment the given number of characters.
'''
for index, field_name in enumerate(config.keys()):
if skip_first and index == 0:
continue
field_schema = schema['properties'].get(field_name, {})
description = field_schema.get('description', '').strip()
# If this is an optional key, add an indicator to the comment flagging it to be commented
# out from the sample configuration. This sentinel is consumed by downstream processing that
# does the actual commenting out.
if field_name not in REQUIRED_SECTION_NAMES and field_name not in REQUIRED_KEYS:
description = (
'\n'.join((description, COMMENTED_OUT_SENTINEL))
if description
else COMMENTED_OUT_SENTINEL
)
# No description to use? Skip it.
if not field_schema or not description: # pragma: no cover
continue
config.yaml_set_comment_before_after_key(key=field_name, before=description, indent=indent)
if index > 0:
_insert_newline_before_comment(config, field_name)
RUAMEL_YAML_COMMENTS_INDEX = 1
def remove_commented_out_sentinel(config, field_name):
'''
Given a configuration CommentedMap and a top-level field name in it, remove any "commented out"
sentinel found at the end of its YAML comments. This prevents the given field name from getting
commented out by downstream processing that consumes the sentinel.
'''
try:
last_comment_value = config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX][-1].value
except KeyError:
return
if last_comment_value == '# {}\n'.format(COMMENTED_OUT_SENTINEL):
config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()
def merge_source_configuration_into_destination(destination_config, source_config):
'''
Deep merge the given source configuration dict into the destination configuration CommentedMap,
favoring values from the source when there are collisions.
The purpose of this is to upgrade configuration files from old versions of borgmatic by adding
new
configuration keys and comments.
'''
if not source_config:
return destination_config
if not destination_config o |
hoburg/gpkit | gpkit/tools/tools.py | Python | mit | 3,097 | 0 | """Non-application-specific convenience methods for GPkit"""
import numpy as np
def te_exp_minus1(posy, nterm):
"""Taylor expansion of e^{posy} - 1
Arguments
---------
posy : gpkit.Posynomial
Variable or expression to exponentiate
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of e^{posy} - 1, carried to nterm terms
"""
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= i
res += posy**i / factorial_denom
return res
def te_secant(var, nterm):
"""Taylor expansion of secant(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of secant(x), carried to nterm terms
"""
# The first 12 Euler Numbers
E2n = np.asarray([1.0,
5,
61,
1385,
50521,
2702765,
199360981,
19391512145,
2404879675441,
370371188237525,
69348874393137901,
15514534163557086905])
if nterm > 12:
n_extend = np.asarray(range(13, nterm+1))
E2n_add = (8 * np.sqrt(n_extend/np.pi)
* (4*n_extend/(np.pi * np.exp(1)))**(2*n_extend))
E2n = np.append(E2n, E2n_add)
res = 1
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res = res + var**(2*i) * E2n[i-1] / factorial_denom
return res
def te_tangent(var, nterm):
"""Taylor expansion of tangent(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of tangen | t(x), carried to nterm terms
"""
if nterm > 15:
raise NotImplementedError("Tangent expansion not implemented above"
" 15 terms")
# The first 15 Bernoulli Numbers
B2n = np.asarray([1/6,
-1/30,
1/42,
-1/30,
5/66,
-691/2730,
7/6,
-3617/510,
| 43867/798,
-174611/330,
854513/138,
-236364091/2730,
8553103/6,
-23749461029/870,
8615841276005/14322])
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res += ((-1)**(i-1) * 2**(2*i) * (2**(2*i) - 1) *
B2n[i-1] / factorial_denom * var**(2*i-1))
return res
|
rackerlabs/rackspace-python-neutronclient | neutronclient/tests/unit/test_cli20_floatingips.py | Python | apache-2.0 | 8,263 | 0 | #!/usr/bin/env python
# Copyright 2012 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.neutron.v2_0 import floatingip as fip
from neutronclient.tests.unit import test_cli20
class CLITestV20FloatingIpsJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['floatingip']
| def test_create_floatingip(self):
# Create floatingip: fip1.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
args = [name, '--description', 'floats like a butterfly']
position_names = ['floating_network_id']
position_val | ues = [name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
description='floats like a butterfly')
def test_create_floatingip_and_port(self):
# Create floatingip: fip1.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
args = [name, '--port_id', pid]
position_names = ['floating_network_id', 'port_id']
position_values = [name, pid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = [name, '--port-id', pid]
position_names = ['floating_network_id', 'port_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_and_port_and_address(self):
# Create floatingip: fip1 with a given port and address.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
addr = '10.0.0.99'
args = [name, '--port_id', pid, '--fixed_ip_address', addr]
position_names = ['floating_network_id', 'port_id', 'fixed_ip_address']
position_values = [name, pid, addr]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = [name, '--port-id', pid, '--fixed-ip-address', addr]
position_names = ['floating_network_id', 'port_id', 'fixed_ip_address']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_ip_address_of_floating_ip(self):
# Create floatingip: fip1 with a given IP address of floating IP.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
addr = '10.0.0.99'
args = [name, '--floating-ip-address', addr]
position_values = [name, addr]
position_names = ['floating_network_id', 'floating_ip_address']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_subnet_id(self):
# Create floatingip: fip1 on a given subnet id.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
subnet_id = 'mysubnetid'
args = [name, '--subnet', subnet_id]
position_values = [name, subnet_id]
position_names = ['floating_network_id', 'subnet_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_subnet_id_and_port(self):
# Create floatingip: fip1 on a given subnet id and port.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
subnet_id = 'mysubnetid'
args = [name, '--subnet', subnet_id, '--port-id', pid]
position_values = [name, subnet_id, pid]
position_names = ['floating_network_id', 'subnet_id', 'port_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_dns_name_and_dns_domain(self):
# Create floatingip: fip1 with dns name and dns domain.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
dns_name_name = 'my-floatingip'
dns_domain_name = 'my-domain.org.'
args = [name, '--dns-name', dns_name_name, '--dns-domain',
dns_domain_name]
position_names = ['floating_network_id', 'dns_name', 'dns_domain']
position_values = [name, dns_name_name, dns_domain_name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_floatingips(self):
# list floatingips: -D.
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_floatingips_pagination(self):
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_floatingips_sort(self):
# list floatingips:
# --sort-key name --sort-key id --sort-key asc --sort-key desc
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_floatingips_limit(self):
# list floatingips: -P.
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_delete_floatingip(self):
# Delete floatingip: fip1.
resource = 'floatingip'
cmd = fip.DeleteFloatingIP(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_floatingip(self):
# Show floatingip: --fields id.
resource = 'floatingip'
cmd = fip.ShowFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id'])
def test_disassociate_ip(self):
# Disassociate floating IP: myid.
resource = 'floatingip'
cmd = fip.DisassociateFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['myid']
self._test_update_resource(resource, cmd, 'myid',
args, {"port_id": None}
)
def test_associate_ip(self):
# Associate floating IP: myid portid.
resource = 'floatingip'
cmd = fip.AssociateFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'portid']
self._test_update_resource(resource, cmd, 'myid',
|
jgirardet/unolog | unolog/unousers/models.py | Python | gpl-3.0 | 686 | 0 | from django.contrib.auth.models import AbstractUser
from django.db import models
STATUT = ['docteur', 'secrétaire', 'interne', 'remplaçant']
| class UnoUser(AbstractUser):
"""
Base User class for unolog
define statu
"""
MEDECIN = "medecin"
SECRETAIRE = "secretaire"
| INTERNE = "interne"
REMPLACANT = "remplacant"
STATUT = (
(MEDECIN, 'Médecin'),
(SECRETAIRE, 'Secrétaire'),
(INTERNE, "Interne"),
(REMPLACANT, "Remplaçant"),
)
statut = models.CharField(max_length=20, choices=STATUT)
"""
RPPS
ADELI
https://github.com/codingforentrepreneurs/srvup-rest-framework/blob/master/src/accounts/models.py
"""
|
openaps/oacids | oacids/exported/triggers.py | Python | mit | 3,924 | 0.019368 |
import dbus.service
from gi.repository import GObject as gobject
from oacids.helpers.dbus_props import GPropSync, Manager, WithProperties, ObjectManager
from ifaces import BUS, IFACE, PATH, INTROSPECTABLE_IFACE, TRIGGER_IFACE, OPENAPS_IFACE
EVENT_IFACE = 'org.openaps.Service.Instance.Triggers'
class Emitter (GPropSync):
OWN_IFACE = IFACE + '.EventSink.Emitter'
PATH_TEMPLATE = PATH + '/EventSink/{name:s}'
def __init__ (self, path, manager=None, props=None):
self.manager = manager
bus = manager.bus
self.bus = bus or dbus.SessionBus( )
| self.path = path
# self.when = armed.when
# self.armed = armed
GPropSync.__init__(self, self.bus.get_connection( ), path)
# WithProperties.__init__(self, self.bus.get_connection( ), path)
self.attrs = props
@dbus.service.method(dbus_interface=OWN_IFACE,
in_signature='', o | ut_signature='')
def Fire (self):
self.Emit('Fire')
self.Do( )
@dbus.service.method(dbus_interface=OWN_IFACE,
in_signature='a{sv}', out_signature='')
def Update (self, props):
print props
@dbus.service.signal(dbus_interface=OWN_IFACE,
signature='s')
def Emit (self, status):
print status
@dbus.service.signal(dbus_interface=OWN_IFACE,
signature='')
def Do (self):
print self.manager.master.background.Do
def response (*args):
print "RESPONSE", args
then = self.attrs['then']
command = dict(name=self.attrs['name'], phases="")
self.manager.master.background.Do(command, ack=response, error=response)
if then:
command = dict(name=then, phases="")
self.manager.master.background.Do(command, ack=response, error=response)
class EventSink (GPropSync, Manager):
OWN_IFACE = IFACE + '.EventSink'
PROP_SIGS = {
}
def __init__ (self, bus, ctrl):
self.bus = bus
self.path = PATH + '/EventSink'
self.master = ctrl
self.events = [ ]
Manager.__init__(self, self.path, bus)
self.init_managed( )
def init_managed (self):
# self.since = utils.datetime.datetime.fromtimestamp(self.master.heartbeat.started_at)
# self.add_signal_handler("heartbeat", self.Scan, dbus_interface=OPENAPS_IFACE + ".Heartbeat")
print "SUBSCRIBING to master's Interfaces events"
self.bus.add_signal_receiver(self.AddEvent, "InterfacesAdded", dbus_interface=ObjectManager, bus_name=BUS)
self.bus.add_signal_receiver(self.RemoveEvent, "InterfacesRemoved", dbus_interface=ObjectManager, bus_name=BUS)
def get_all_managed (self):
paths = dict( )
for thing in self.events:
# print thing, thing.trigger.OWN_IFACE
# print thing.trigger.OWN_IFACE, thing.trigger
spec = { thing.trigger.OWN_IFACE: dict(**thing.trigger.GetAll(thing.trigger.OWN_IFACE)) }
paths[thing.trigger.path] = spec
return paths
def AddEvent (self, path, spec):
if path.startswith('/org/openaps/Services/Instance/Trigger'):
print "FOUND NEW", path, spec
props = spec[EVENT_IFACE]
name = props.get('name')
then = props.get('then')
print name, then
self.Create(props)
@dbus.service.method(dbus_interface=OWN_IFACE,
in_signature='a{sv}', out_signature='')
def Create (self, props):
emitter = Emitter(Emitter.PATH_TEMPLATE.format(**props), self, props)
self.InterfacesAdded(emitter.path, emitter.GetAll(emitter.OWN_IFACE))
self.events.append(emitter)
def RemoveEvent (self, path, props):
if path.startswith('/org/openaps/Services/Instance/Trigger'):
print "REMOVED", path, props
@dbus.service.signal(dbus_interface=ObjectManager,
signature='oa{sa{sv}}')
def InterfacesAdded (self, path, iface_spec):
pass
@dbus.service.signal(dbus_interface=ObjectManager,
signature='oas')
def InterfacesRemoved (self, path, iface_spec):
pass
|
Akagi201/learning-python | lpthw/ex32.py | Python | mit | 887 | 0.001127 | #!/usr/bin/env python
# Exercise 32: Loops and Lists
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters'] |
# this first kind of for-loop goes through a list
for numb | er in the_count:
print "This is count %d" % number
# same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
# we can also build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0, 6):
print "Adding %d to the list." % i
# append is a function that lists understand
elements.append(i)
#elements = range(0, 6)
# now we can print them out too
for i in elements:
print "Element was: %d" % i
|
pelicanmapping/readymap-python | setup.py | Python | mit | 723 | 0.002766 | import os
from setuptools import setup, find_packages
version = __import__('readymap').__version__
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='readymap-python',
version=version,
author='Jason Beverage',
url="https://github.com/pelicanmapping/readymap-python",
author_email="jasonbeverage@pelicanmapping.com",
description=('Python bindin | gs for ReadyMap'),
long_description=README,
| packages=find_packages(),
install_requires=['requests>=2.4.1','requests-toolbelt>=0.3.1']
) |
adamrvfisher/TechnicalAnalysisLibrary | Weight.py | Python | apache-2.0 | 916 | 0.020742 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 7 11:27:35 2017
@author: AmatVictoriaCuramIII
"""
#Get modules
#import scipy as sp
import numpy as np
from pandas_datareader import data
import pandas as pd
#portfolio set up
port = ['^GSPC', '^RUA']
numsec = len(port)
equalweight = 1/numsec
df2 | = pd.DataFrame(columns=[])
x=0
y=0
#HERE'S AN IDEA print(list(enumerate(port, start=1)))
#List the log returns in columns
for s in port:
x = x + 1
y = y - 1
s = data.DataReader(s, 'yahoo', start='1/1/1900', | end='01/01/2050')
s[x] = np.log(s['Adj Close']/s['Adj Close'].shift(1)) #log returns
s['equalweight'] = equalweight
s[y] = s[x] * s['equalweight'] #This is the weighted return
df2 = pd.concat([df2,s[x],s[y]], axis=1)
#Multiply the individual columns by the last and sum
#df2['portfolioreturn'] = df2[(range(-1, -numsec, -1))]
df2 = pd.concat([df2,s['equalweight']], axis=1)
print(df2) |
robhost/UPDIAN | updian/frontend/__init__.py | Python | gpl-2.0 | 224 | 0.004464 | import flask
from flaskext.csrf import csr | f
from .basicauth import BasicAuth
from .. import config
app = flask.Flask(__name__)
app.secret_key = config.secret_key
csrf(app)
basic_auth = Basi | cAuth(app)
from . import views
|
thought-machine/please | test/python_rules/custom_runner/runner.py | Python | apache-2.0 | 685 | 0.00438 | import os
import pytest
def run(test_names, args):
"""Custom test runner entry point.
This is fairly minimal and serves mostly to demonstrate how to define this as an
entry point. |
Args:
test_names: The names of the original test modules to be run (i.e. the things that were
srcs to the python_test rule).
args: Any command-line arguments, not incl | uding sys.argv[0].
"""
results_file = os.getenv('RESULTS_FILE', 'test.results')
os.mkdir(results_file)
args += ['--junitxml', os.path.join(results_file, 'results.xml')] + test_names
if os.environ.get('DEBUG'):
args.append('--pdb')
return pytest.main(args)
|
miraheze/puppet | modules/mediawiki/files/bin/mwupgradetool.py | Python | gpl-3.0 | 2,456 | 0.002443 | #! /usr/bin/python3
import os
import requests
canary = 'mwtask111'
serverlist = ['mw101', 'mw102', 'mw111', 'mw112', 'mw121', 'mw122']
def check_up(server: str) -> bool:
headers = {'X-Miraheze-Debug': f'{server}.miraheze.org'}
req = requests.get('https://meta.miraheze.org/w/api.php?action=query&meta=siteinfo&formatversion=2&format=json', headers=headers)
if req.status_code == 200 and 'miraheze' in req.text and server in req.headers['X-Served-By']:
return True
return False
def check_ro(server: str) -> bool:
headers = {'X- | Miraheze-Debug': f'{server}.miraheze.o | rg'}
req = requests.get('https://meta.miraheze.org/w/api.php?action=query&meta=siteinfo&formatversion=2&format=json', headers=headers)
response = req.json()
if response['query']['general']['readonly']:
return True
return False
print('Welcome to the MediaWiki Upgrade tool!')
input('Please confirm you are running this script on the canary server: (press enter)')
input('MediaWiki -> RO - Running puppet to sync config')
os.system('sudo puppet agent -tv')
print('Config deployed')
print('Checking RO on Canary Server')
if not check_ro(canary):
input('Stopping deploy - RO check failed - Press enter to resume')
for server in serverlist:
print(f'Confirming RO on {server}')
if not check_ro(server):
input(f'RO check failed on {server} - Press enter to resume')
print('Starting staging update')
input('Press enter when branch updated in puppet: ')
os.system('sudo -u www-data rm -rf /srv/mediawiki-staging/w')
os.system('sudo puppet agent -tv')
print('Will now check mediawiki branch')
os.system('git -C /srv/mediawiki-staging/w rev-parse --abbrev-ref HEAD')
input('Confirm: ')
print('Will now deploy to canary server')
os.system('deploy-mediawiki --world --l10n --force --ignore-time --servers=skip')
if check_up(canary) and check_ro(canary):
print('Canary deploy done')
else:
print('Canary is not online')
input('Press enter to rollout: ')
for server in serverlist:
print(f'Will now deploy to {server}')
os.system(f'deploy-mediawiki --world --l10n --force --ignore-time --servers={server}')
if check_up(server) and check_ro(server):
print(f'{server} deploy done')
else:
input(f'{server} is not online - Proceed? ')
print('Deployment done')
input('Please merge RW change and press enter: ')
print('Running puppet')
os.system('sudo puppet agent -tv')
print('Deployment done')
|
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/numpy/add_newdocs.py | Python | artistic-2.0 | 222,133 | 0.001337 | """
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values | are allowed.
| * 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. Th |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.