text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sigchld}, an alternate, superior SIGCHLD
monitoring API.
"""
import os, signal, errno
from twisted.python.log import msg
from twisted.trial.unittest import TestCase
from twisted.internet.fdesc import setNonBlocking
from twisted.internet._signals import installHandler, isDefaultHandler
from twisted.internet._signals import _extInstallHandler, _extIsDefaultHandler
from twisted.internet._signals import _installHandlerUsingSetWakeup, \
_installHandlerUsingSignal, _isDefaultHandler
class SIGCHLDTestsMixin:
"""
Mixin for L{TestCase} subclasses which defines several tests for
I{installHandler} and I{isDefaultHandler}. Subclasses are expected to
define C{self.installHandler} and C{self.isDefaultHandler} to invoke the
implementation to be tested.
"""
if getattr(signal, 'SIGCHLD', None) is None:
skip = "Platform does not have SIGCHLD"
def installHandler(self, fd):
"""
Override in a subclass to install a SIGCHLD handler which writes a byte
to the given file descriptor. Return the previously registered file
descriptor.
"""
raise NotImplementedError()
def isDefaultHandler(self):
"""
Override in a subclass to determine if the current SIGCHLD handler is
SIG_DFL or not. Return True if it is SIG_DFL, False otherwise.
"""
raise NotImplementedError()
def pipe(self):
"""
Create a non-blocking pipe which will be closed after the currently
running test.
"""
read, write = os.pipe()
self.addCleanup(os.close, read)
self.addCleanup(os.close, write)
setNonBlocking(read)
setNonBlocking(write)
return read, write
def setUp(self):
"""
Save the current SIGCHLD handler as reported by L{signal.signal} and
the current file descriptor registered with L{installHandler}.
"""
handler = signal.getsignal(signal.SIGCHLD)
if handler != signal.SIG_DFL:
self.signalModuleHandler = handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
self.signalModuleHandler = None
self.oldFD = self.installHandler(-1)
if self.signalModuleHandler is not None and self.oldFD != -1:
msg("SIGCHLD setup issue: %r %r" % (self.signalModuleHandler, self.oldFD))
raise RuntimeError("You used some signal APIs wrong! Try again.")
def tearDown(self):
"""
Restore whatever signal handler was present when setUp ran.
"""
# If tests set up any kind of handlers, clear them out.
self.installHandler(-1)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Now restore whatever the setup was before the test ran.
if self.signalModuleHandler is not None:
signal.signal(signal.SIGCHLD, self.signalModuleHandler)
elif self.oldFD != -1:
self.installHandler(self.oldFD)
def test_isDefaultHandler(self):
"""
L{isDefaultHandler} returns true if the SIGCHLD handler is SIG_DFL,
false otherwise.
"""
self.assertTrue(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
self.assertFalse(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
self.assertTrue(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, lambda *args: None)
self.assertFalse(self.isDefaultHandler())
def test_returnOldFD(self):
"""
L{installHandler} returns the previously registered file descriptor.
"""
read, write = self.pipe()
oldFD = self.installHandler(write)
self.assertEqual(self.installHandler(oldFD), write)
def test_uninstallHandler(self):
"""
C{installHandler(-1)} removes the SIGCHLD handler completely.
"""
read, write = self.pipe()
self.assertTrue(self.isDefaultHandler())
self.installHandler(write)
self.assertFalse(self.isDefaultHandler())
self.installHandler(-1)
self.assertTrue(self.isDefaultHandler())
def test_installHandler(self):
"""
The file descriptor passed to L{installHandler} has a byte written to
it when SIGCHLD is delivered to the process.
"""
read, write = self.pipe()
self.installHandler(write)
exc = self.assertRaises(OSError, os.read, read, 1)
self.assertEqual(exc.errno, errno.EAGAIN)
os.kill(os.getpid(), signal.SIGCHLD)
self.assertEqual(len(os.read(read, 5)), 1)
class DefaultSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for whatever implementation is selected for the L{installHandler}
and L{isDefaultHandler} APIs.
"""
installHandler = staticmethod(installHandler)
isDefaultHandler = staticmethod(isDefaultHandler)
class ExtensionSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{twisted.internet._sigchld} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
try:
import twisted.internet._sigchld
except ImportError:
skip = "twisted.internet._sigchld is not available"
installHandler = _extInstallHandler
isDefaultHandler = _extIsDefaultHandler
class SetWakeupSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{signal.set_wakeup_fd} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
# Check both of these. On Ubuntu 9.10 (to take an example completely at
# random), Python 2.5 has set_wakeup_fd but not siginterrupt.
if (getattr(signal, 'set_wakeup_fd', None) is None
or getattr(signal, 'siginterrupt', None) is None):
skip = "signal.set_wakeup_fd is not available"
installHandler = staticmethod(_installHandlerUsingSetWakeup)
isDefaultHandler = staticmethod(_isDefaultHandler)
class PlainSignalModuleSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{signal.signal} implementation of the L{installHandler}
and L{isDefaultHandler} APIs.
"""
installHandler = staticmethod(_installHandlerUsingSignal)
isDefaultHandler = staticmethod(_isDefaultHandler)
|
waseem18/oh-mainline
|
vendor/packages/twisted/twisted/internet/test/test_sigchld.py
|
Python
|
agpl-3.0
| 6,400
| 0.002344
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class FossilFuel(IdentifiedObject):
"""The fossil fuel consumed by the non-nuclear thermal generating units, e.g., coal, oil, gasThe fossil fuel consumed by the non-nuclear thermal generating units, e.g., coal, oil, gas
"""
def __init__(self, fuelSulfur=0.0, fuelCost=0.0, fossilFuelType="oil", lowBreakpointP=0.0, fuelDispatchCost=0.0, fuelHandlingCost=0.0, fuelHeatContent=0.0, fuelEffFactor=0.0, fuelMixture=0.0, highBreakpointP=0.0, ThermalGeneratingUnit=None, FuelAllocationSchedules=None, *args, **kw_args):
"""Initialises a new 'FossilFuel' instance.
@param fuelSulfur: The fuel's fraction of pollution credit per unit of heat content
@param fuelCost: The cost in terms of heat value for the given type of fuel
@param fossilFuelType: The type of fossil fuel, such as coal, oil, or gas. Values are: "oil", "coal", "lignite", "gas"
@param lowBreakpointP: The active power output level of the unit at which the given type of fuel is switched off. This fuel (e.g., oil) is sometimes used to stabilize the base fuel (e.g., coal) at low active power output levels.
@param fuelDispatchCost: The cost of fuel used for economic dispatching which includes: fuel cost, transportation cost, and incremental maintenance cost
@param fuelHandlingCost: Handling and processing cost associated with this fuel
@param fuelHeatContent: The amount of heat per weight (or volume) of the given type of fuel
@param fuelEffFactor: The efficiency factor for the fuel (per unit) in terms of the effective energy absorbed
@param fuelMixture: Relative amount of the given type of fuel, when multiple fuels are being consumed.
@param highBreakpointP: The active power output level of the unit at which the given type of fuel is switched on. This fuel (e.g., oil) is sometimes used to supplement the base fuel (e.g., coal) at high active power output levels.
@param ThermalGeneratingUnit: A thermal generating unit may have one or more fossil fuels
@param FuelAllocationSchedules: A fuel allocation schedule must have a fossil fuel
"""
#: The fuel's fraction of pollution credit per unit of heat content
self.fuelSulfur = fuelSulfur
#: The cost in terms of heat value for the given type of fuel
self.fuelCost = fuelCost
#: The type of fossil fuel, such as coal, oil, or gas. Values are: "oil", "coal", "lignite", "gas"
self.fossilFuelType = fossilFuelType
#: The active power output level of the unit at which the given type of fuel is switched off. This fuel (e.g., oil) is sometimes used to stabilize the base fuel (e.g., coal) at low active power output levels.
self.lowBreakpointP = lowBreakpointP
#: The cost of fuel used for economic dispatching which includes: fuel cost, transportation cost, and incremental maintenance cost
self.fuelDispatchCost = fuelDispatchCost
#: Handling and processing cost associated with this fuel
self.fuelHandlingCost = fuelHandlingCost
#: The amount of heat per weight (or volume) of the given type of fuel
self.fuelHeatContent = fuelHeatContent
#: The efficiency factor for the fuel (per unit) in terms of the effective energy absorbed
self.fuelEffFactor = fuelEffFactor
#: Relative amount of the given type of fuel, when multiple fuels are being consumed.
self.fuelMixture = fuelMixture
#: The active power output level of the unit at which the given type of fuel is switched on. This fuel (e.g., oil) is sometimes used to supplement the base fuel (e.g., coal) at high active power output levels.
self.highBreakpointP = highBreakpointP
self._ThermalGeneratingUnit = None
self.ThermalGeneratingUnit = ThermalGeneratingUnit
self._FuelAllocationSchedules = []
self.FuelAllocationSchedules = [] if FuelAllocationSchedules is None else FuelAllocationSchedules
super(FossilFuel, self).__init__(*args, **kw_args)
_attrs = ["fuelSulfur", "fuelCost", "fossilFuelType", "lowBreakpointP", "fuelDispatchCost", "fuelHandlingCost", "fuelHeatContent", "fuelEffFactor", "fuelMixture", "highBreakpointP"]
_attr_types = {"fuelSulfur": float, "fuelCost": float, "fossilFuelType": str, "lowBreakpointP": float, "fuelDispatchCost": float, "fuelHandlingCost": float, "fuelHeatContent": float, "fuelEffFactor": float, "fuelMixture": float, "highBreakpointP": float}
_defaults = {"fuelSulfur": 0.0, "fuelCost": 0.0, "fossilFuelType": "oil", "lowBreakpointP": 0.0, "fuelDispatchCost": 0.0, "fuelHandlingCost": 0.0, "fuelHeatContent": 0.0, "fuelEffFactor": 0.0, "fuelMixture": 0.0, "highBreakpointP": 0.0}
_enums = {"fossilFuelType": "FuelType"}
_refs = ["ThermalGeneratingUnit", "FuelAllocationSchedules"]
_many_refs = ["FuelAllocationSchedules"]
def getThermalGeneratingUnit(self):
"""A thermal generating unit may have one or more fossil fuels
"""
return self._ThermalGeneratingUnit
def setThermalGeneratingUnit(self, value):
if self._ThermalGeneratingUnit is not None:
filtered = [x for x in self.ThermalGeneratingUnit.FossilFuels if x != self]
self._ThermalGeneratingUnit._FossilFuels = filtered
self._ThermalGeneratingUnit = value
if self._ThermalGeneratingUnit is not None:
if self not in self._ThermalGeneratingUnit._FossilFuels:
self._ThermalGeneratingUnit._FossilFuels.append(self)
ThermalGeneratingUnit = property(getThermalGeneratingUnit, setThermalGeneratingUnit)
def getFuelAllocationSchedules(self):
"""A fuel allocation schedule must have a fossil fuel
"""
return self._FuelAllocationSchedules
def setFuelAllocationSchedules(self, value):
for x in self._FuelAllocationSchedules:
x.FossilFuel = None
for y in value:
y._FossilFuel = self
self._FuelAllocationSchedules = value
FuelAllocationSchedules = property(getFuelAllocationSchedules, setFuelAllocationSchedules)
def addFuelAllocationSchedules(self, *FuelAllocationSchedules):
for obj in FuelAllocationSchedules:
obj.FossilFuel = self
def removeFuelAllocationSchedules(self, *FuelAllocationSchedules):
for obj in FuelAllocationSchedules:
obj.FossilFuel = None
|
rwl/PyCIM
|
CIM15/IEC61970/Generation/Production/FossilFuel.py
|
Python
|
mit
| 7,612
| 0.004992
|
# A Test Program for pipeTestService.py
#
# Install and start the Pipe Test service, then run this test
# either from the same machine, or from another using the "-s" param.
#
# Eg: pipeTestServiceClient.py -s server_name Hi There
# Should work.
from win32pipe import *
from win32file import *
from win32event import *
import pywintypes
import win32api
import winerror
import sys, os, traceback
verbose = 0
#def ReadFromPipe(pipeName):
# Could (Should?) use CallNamedPipe, but this technique allows variable size
# messages (whereas you must supply a buffer size for CallNamedPipe!
# hPipe = CreateFile(pipeName, GENERIC_WRITE, 0, None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0)
# more = 1
# while more:
# hr = ReadFile(hPipe, 256)
# if hr==0:
# more = 0
# except win32api.error (hr, fn, desc):
# if hr==winerror.ERROR_MORE_DATA:
# data = dat
#
def CallPipe(fn, args):
ret = None
retryCount = 0
while retryCount < 8: # Keep looping until user cancels.
retryCount = retryCount + 1
try:
return fn(*args)
except win32api.error as exc:
if exc.winerror==winerror.ERROR_PIPE_BUSY:
win32api.Sleep(5000)
continue
else:
raise
raise RuntimeError("Could not make a connection to the server")
def testClient(server,msg):
if verbose:
print("Sending", msg)
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 256, NMPWAIT_WAIT_FOREVER))
if verbose:
print("Server sent back '%s'" % data)
print("Sent and received a message!")
def testLargeMessage(server, size = 4096):
if verbose:
print("Sending message of size %d" % (size))
msg = "*" * size
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 512, NMPWAIT_WAIT_FOREVER))
if len(data)-size:
print("Sizes are all wrong - send %d, got back %d" % (size, len(data)))
def stressThread(server, numMessages, wait):
try:
try:
for i in range(numMessages):
r = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, "#" * 512, 1024, NMPWAIT_WAIT_FOREVER))
except:
traceback.print_exc()
print("Failed after %d messages" % i)
finally:
SetEvent(wait)
def stressTestClient(server, numThreads, numMessages):
import _thread
thread_waits = []
for t_num in range(numThreads):
# Note I could just wait on thread handles (after calling DuplicateHandle)
# See the service itself for an example of waiting for the clients...
wait = CreateEvent(None, 0, 0, None)
thread_waits.append(wait)
_thread.start_new_thread(stressThread, (server,numMessages, wait))
# Wait for all threads to finish.
WaitForMultipleObjects(thread_waits, 1, INFINITE)
def main():
import sys, getopt
server = "."
thread_count = 0
msg_count = 500
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:m:vl')
for o,a in opts:
if o=='-s':
server = a
if o=='-m':
msg_count = int(a)
if o=='-t':
thread_count = int(a)
if o=='-v':
global verbose
verbose = 1
if o=='-l':
testLargeMessage(server)
msg = " ".join(args).encode("mbcs")
except getopt.error as msg:
print(msg)
my_name = os.path.split(sys.argv[0])[1]
print("Usage: %s [-v] [-s server] [-t thread_count=0] [-m msg_count=500] msg ..." % my_name)
print(" -v = verbose")
print(" Specifying a value for -t will stress test using that many threads.")
return
testClient(server, msg)
if thread_count > 0:
print("Spawning %d threads each sending %d messages..." % (thread_count, msg_count))
stressTestClient(server, thread_count, msg_count)
if __name__=='__main__':
main()
|
sserrot/champion_relationships
|
venv/Lib/site-packages/win32/Demos/service/pipeTestServiceClient.py
|
Python
|
mit
| 4,134
| 0.008224
|
class Config:
# specific (for this development instance)
# SERVER_NAME = 'localhost:5000'
SQLALCHEMY_DATABASE_URI = "sqlite:///data.db"
ANTIVIRUS_CHECK_REQUIRED = False
SECRET_KEY = "toto"
# develop settings
DEBUG = True
ASSETS_DEBUG = True
DEBUG_TB_ENABLED = True
# TEMPLATE_DEBUG = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
DEBUG_TB_PROFILER_ENABLED = False
# Generic for this project
SITE_NAME = "Abilian Core Demo"
MAIL_SENDER = "sender@example.com"
SESSION_COOKIE_NAME = "abilian-core-session"
PRIVATE_SITE = True
MAIL_ASCII_ATTACHMENTS = True
BABEL_ACCEPT_LANGUAGES = ("fr", "en", "es", "tr", "zh")
# celery settings
REDIS_URI = "redis://localhost/0"
BROKER_URL = REDIS_URI
CELERY_RESULT_BACKEND = REDIS_URI
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ALWAYS_EAGER = False # True: run tasks locally, no async
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# uncomment if you don't want to use system timezone
# CELERY_TIMEZONE = 'Europe/Paris'
|
abilian/abilian-core
|
demo/config.py
|
Python
|
lgpl-2.1
| 1,105
| 0
|
#!/usr/bin/env python
import functools
import itertools
import contextlib
import weakref
import logging
l = logging.getLogger("angr.sim_state")
import claripy
import ana
from archinfo import arch_from_id
from .misc.ux import deprecated
def arch_overrideable(f):
@functools.wraps(f)
def wrapped_f(self, *args, **kwargs):
if hasattr(self.arch, f.__name__):
arch_f = getattr(self.arch, f.__name__)
return arch_f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return wrapped_f
from .state_plugins import default_plugins
# This is a counter for the state-merging symbolic variables
merge_counter = itertools.count()
class SimState(ana.Storable): # pylint: disable=R0904
"""
The SimState represents the state of a program, including its memory, registers, and so forth.
:ivar regs: A convenient view of the state's registers, where each register is a property
:ivar mem: A convenient view of the state's memory, a :class:`angr.state_plugins.view.SimMemView`
:ivar registers: The state's register file as a flat memory region
:ivar memory: The state's memory as a flat memory region
:ivar solver: The symbolic solver and variable manager for this state
:ivar inspect: The breakpoint manager, a :class:`angr.state_plugins.inspect.SimInspector`
:ivar log: Information about the state's history
:ivar scratch: Information about the current execution step
:ivar posix: MISNOMER: information about the operating system or environment model
:ivar libc: Information about the standard library we are emulating
:ivar cgc: Information about the cgc environment
:ivar uc_manager: Control of under-constrained symbolic execution
:ivar unicorn: Control of the Unicorn Engine
"""
def __init__(self, project=None, arch=None, plugins=None, memory_backer=None, permissions_backer=None, mode=None, options=None,
add_options=None, remove_options=None, special_memory_filler=None, os_name=None):
self.project = project
self.arch = arch if arch is not None else project.arch.copy() if project is not None else None
if type(self.arch) is str:
self.arch = arch_from_id(self.arch)
# the options
if options is None:
if mode is None:
l.warning("SimState defaulting to symbolic mode.")
mode = "symbolic"
options = o.modes[mode]
options = set(options)
if add_options is not None:
options |= add_options
if remove_options is not None:
options -= remove_options
self.options = options
self.mode = mode
# plugins
self.plugins = { }
if plugins is not None:
for n,p in plugins.iteritems():
self.register_plugin(n, p)
if not self.has_plugin('memory'):
# we don't set the memory endness because, unlike registers, it's hard to understand
# which endness the data should be read
if o.ABSTRACT_MEMORY in self.options:
# We use SimAbstractMemory in static mode
# Convert memory_backer into 'global' region
if memory_backer is not None:
memory_backer = {'global': memory_backer}
# TODO: support permissions backer in SimAbstractMemory
self.register_plugin('memory', SimAbstractMemory(memory_backer=memory_backer, memory_id="mem"))
elif o.FAST_MEMORY in self.options:
self.register_plugin('memory', SimFastMemory(memory_backer=memory_backer, memory_id="mem"))
else:
self.register_plugin('memory', SimSymbolicMemory(memory_backer=memory_backer, permissions_backer=permissions_backer, memory_id="mem"))
if not self.has_plugin('registers'):
if o.FAST_REGISTERS in self.options:
self.register_plugin('registers', SimFastMemory(memory_id="reg", endness=self.arch.register_endness))
else:
self.register_plugin('registers', SimSymbolicMemory(memory_id="reg", endness=self.arch.register_endness))
# OS name
self.os_name = os_name
# This is used in static mode as we don't have any constraints there
self._satisfiable = True
# states are big, so let's give them UUIDs for ANA right away to avoid
# extra pickling
self.make_uuid()
self.uninitialized_access_handler = None
self._special_memory_filler = special_memory_filler
# this is a global condition, applied to all added constraints, memory reads, etc
self._global_condition = None
self.ip_constraints = []
def _ana_getstate(self):
s = dict(ana.Storable._ana_getstate(self))
s['plugins'] = { k:v for k,v in s['plugins'].iteritems() if k not in ('inspector', 'regs', 'mem') }
return s
def _ana_setstate(self, s):
ana.Storable._ana_setstate(self, s)
for p in self.plugins.values():
p.set_state(self._get_weakref() if not isinstance(p, SimAbstractMemory) else self)
if p.STRONGREF_STATE:
p.set_strongref_state(self)
def _get_weakref(self):
return weakref.proxy(self)
def _get_strongref(self):
return self
def __repr__(self):
try:
ip_str = "%#x" % self.addr
except (SimValueError, SimSolverModeError):
ip_str = repr(self.regs.ip)
return "<SimState @ %s>" % ip_str
#
# Easier access to some properties
#
@property
def ip(self):
"""
Get the instruction pointer expression, trigger SimInspect breakpoints, and generate SimActions.
Use ``_ip`` to not trigger breakpoints or generate actions.
:return: an expression
"""
return self.regs.ip
@ip.setter
def ip(self, val):
self.regs.ip = val
@property
def _ip(self):
"""
Get the instruction pointer expression without triggering SimInspect breakpoints or generating SimActions.
:return: an expression
"""
return self.regs._ip
@_ip.setter
def _ip(self, val):
"""
Set the instruction pointer without triggering SimInspect breakpoints or generating SimActions.
:param val: The new instruction pointer.
:return: None
"""
self.regs._ip = val
@property
def addr(self):
"""
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
SimActions. An integer is returned, or an exception is raised if the instruction pointer is symbolic.
:return: an int
"""
return self.se.eval_one(self.regs._ip)
#
# Plugin accessors
#
def __getattr__(self, v):
try:
return self.get_plugin(v)
except KeyError:
raise AttributeError(v)
@property
def memory(self):
return self.get_plugin('memory')
@property
def registers(self):
return self.get_plugin('registers')
@property
def se(self):
return self.get_plugin('solver_engine')
@property
def solver(self):
return self.get_plugin('solver_engine')
@property
def inspect(self):
return self.get_plugin('inspector')
@property
def log(self):
return self.get_plugin('log')
@property
def scratch(self):
return self.get_plugin('scratch')
@property
def history(self):
return self.get_plugin('history')
@property
def posix(self):
return self.get_plugin('posix')
@property
def libc(self):
return self.get_plugin('libc')
@property
def cgc(self):
return self.get_plugin('cgc')
@property
def regs(self):
return self.get_plugin('regs')
@property
def mem(self):
return self.get_plugin('mem')
@property
def gdb(self):
return self.get_plugin('gdb')
@property
def globals(self):
return self.get_plugin('globals')
@property
def uc_manager(self):
return self.get_plugin('uc_manager')
@property
def unicorn(self):
return self.get_plugin('unicorn')
@property
def preconstrainer(self):
return self.get_plugin('preconstrainer')
@property
def callstack(self):
return self.get_plugin('callstack')
def _inspect(self, *args, **kwargs):
if self.has_plugin('inspector'):
self.inspect.action(*args, **kwargs)
def _inspect_getattr(self, attr, default_value):
if self.has_plugin('inspector'):
if hasattr(self.inspect, attr):
return getattr(self.inspect, attr)
return default_value
#
# Plugins
#
def has_plugin(self, name):
return name in self.plugins
def get_plugin(self, name):
if name not in self.plugins:
p = default_plugins[name]()
self.register_plugin(name, p)
return p
return self.plugins[name]
def register_plugin(self, name, plugin):
#l.debug("Adding plugin %s of type %s", name, plugin.__class__.__name__)
plugin.set_state(self._get_weakref() if not isinstance(plugin, SimAbstractMemory) else self)
if plugin.STRONGREF_STATE:
plugin.set_strongref_state(self)
self.plugins[name] = plugin
plugin.init_state()
return plugin
def release_plugin(self, name):
if name in self.plugins:
del self.plugins[name]
#
# Constraint pass-throughs
#
def simplify(self, *args):
"""
Simplify this state's constraints.
"""
return self.se.simplify(*args)
def add_constraints(self, *args, **kwargs):
"""
Add some constraints to the state.
You may pass in any number of symbolic booleans as variadic positional arguments.
"""
if len(args) > 0 and isinstance(args[0], (list, tuple)):
raise Exception("Tuple or list passed to add_constraints!")
if o.TRACK_CONSTRAINTS in self.options and len(args) > 0:
if o.SIMPLIFY_CONSTRAINTS in self.options:
constraints = [ self.simplify(a) for a in args ]
else:
constraints = args
self._inspect('constraints', BP_BEFORE, added_constraints=constraints)
constraints = self._inspect_getattr("added_constraints", constraints)
added = self.se.add(*constraints)
self._inspect('constraints', BP_AFTER)
# add actions for the added constraints
if o.TRACK_CONSTRAINT_ACTIONS in self.options:
for c in added:
sac = SimActionConstraint(self, c)
self.history.add_action(sac)
else:
# preserve the old action logic for when we don't track constraints (why?)
if (
'action' in kwargs and kwargs['action'] and
o.TRACK_CONSTRAINT_ACTIONS in self.options and len(args) > 0
):
for arg in args:
if self.se.symbolic(arg):
sac = SimActionConstraint(self, arg)
self.history.add_action(sac)
if o.ABSTRACT_SOLVER in self.options and len(args) > 0:
for arg in args:
if self.se.is_false(arg):
self._satisfiable = False
return
if self.se.is_true(arg):
continue
# `is_true` and `is_false` does not use VSABackend currently (see commits 97a75366 and 2dfba73e in
# claripy). There is a chance that VSA backend can in fact handle it.
# Therefore we try to resolve it with VSABackend again
if claripy.backends.vsa.is_false(arg):
self._satisfiable = False
return
if claripy.backends.vsa.is_true(arg):
continue
# It's neither True or False. Let's try to apply the condition
# We take the argument, extract a list of constrained SIs out of it (if we could, of course), and
# then replace each original SI the intersection of original SI and the constrained one.
_, converted = self.se.constraint_to_si(arg)
for original_expr, constrained_si in converted:
if not original_expr.variables:
l.error('Incorrect original_expression to replace in add_constraints(). ' +
'This is due to defects in VSA logics inside claripy. Please report ' +
'to Fish and he will fix it if he\'s free.')
continue
new_expr = constrained_si
self.registers.replace_all(original_expr, new_expr)
for _, region in self.memory.regions.items():
region.memory.replace_all(original_expr, new_expr)
l.debug("SimState.add_constraints: Applied to final state.")
elif o.SYMBOLIC not in self.options and len(args) > 0:
for arg in args:
if self.se.is_false(arg):
self._satisfiable = False
return
def satisfiable(self, **kwargs):
"""
Whether the state's constraints are satisfiable
"""
if o.ABSTRACT_SOLVER in self.options or o.SYMBOLIC not in self.options:
extra_constraints = kwargs.pop('extra_constraints', ())
for e in extra_constraints:
if self.se.is_false(e):
return False
return self._satisfiable
else:
return self.se.satisfiable(**kwargs)
def downsize(self):
"""
Clean up after the solver engine. Calling this when a state no longer needs to be solved on will reduce memory
usage.
"""
if 'solver_engine' in self.plugins:
self.se.downsize()
#
# State branching operations
#
def step(self, **kwargs):
"""
Perform a step of symbolic execution using this state.
Any arguments to `AngrObjectFactory.successors` can be passed to this.
:return: A SimSuccessors object categorizing the results of the step.
"""
return self.project.factory.successors(self, **kwargs)
def block(self, *args, **kwargs):
"""
Represent the basic block at this state's instruction pointer.
Any arguments to `AngrObjectFactory.block` can ba passed to this.
:return: A Block object describing the basic block of code at this point.
"""
if not args and 'addr' not in kwargs:
kwargs['addr'] = self.addr
return self.project.factory.block(*args, backup_state=self, **kwargs)
# Returns a dict that is a copy of all the state's plugins
def _copy_plugins(self):
memo = {}
out = {}
for n, p in self.plugins.iteritems():
if id(p) in memo:
out[n] = memo[id(p)]
else:
out[n] = p.copy()
memo[id(p)] = out[n]
return out
def copy(self):
"""
Returns a copy of the state.
"""
if self._global_condition is not None:
raise SimStateError("global condition was not cleared before state.copy().")
c_plugins = self._copy_plugins()
state = SimState(project=self.project, arch=self.arch, plugins=c_plugins, options=self.options, mode=self.mode, os_name=self.os_name)
state.uninitialized_access_handler = self.uninitialized_access_handler
state._special_memory_filler = self._special_memory_filler
state.ip_constraints = self.ip_constraints
return state
def merge(self, *others, **kwargs):
"""
Merges this state with the other states. Returns the merging result, merged state, and the merge flag.
:param states: the states to merge
:param merge_conditions: a tuple of the conditions under which each state holds
:param common_ancestor: a state that represents the common history between the states being merged. Usually it
is only available when EFFICIENT_STATE_MERGING is enabled, otherwise weak-refed states
might be dropped from state history instances.
:param plugin_whitelist: a list of plugin names that will be merged. If this option is given and is not None,
any plugin that is not inside this list will not be merged, and will be created as a
fresh instance in the new state.
:param common_ancestor_history:
a SimStateHistory instance that represents the common history between the states being
merged. This is to allow optimal state merging when EFFICIENT_STATE_MERGING is
disabled.
:return: (merged state, merge flag, a bool indicating if any merging occured)
"""
merge_conditions = kwargs.pop('merge_conditions', None)
common_ancestor = kwargs.pop('common_ancestor', None)
plugin_whitelist = kwargs.pop('plugin_whitelist', None)
common_ancestor_history = kwargs.pop('common_ancestor_history', None)
if len(kwargs) != 0:
raise ValueError("invalid arguments: %s" % kwargs.keys())
if merge_conditions is None:
# TODO: maybe make the length of this smaller? Maybe: math.ceil(math.log(len(others)+1, 2))
merge_flag = self.se.BVS("state_merge_%d" % merge_counter.next(), 16)
merge_values = range(len(others)+1)
merge_conditions = [ merge_flag == b for b in merge_values ]
else:
merge_conditions = [
(self.se.true if len(mc) == 0 else self.se.And(*mc)) for mc in merge_conditions
]
if len(set(o.arch.name for o in others)) != 1:
raise SimMergeError("Unable to merge due to different architectures.")
all_plugins = set(self.plugins.keys()) | set.union(*(set(o.plugins.keys()) for o in others))
if plugin_whitelist is not None:
all_plugins = all_plugins.intersection(set(plugin_whitelist))
merged = self.copy()
merging_occurred = False
# fix parent
merged.history.parent = self.history
# plugins
for p in all_plugins:
our_plugin = merged.plugins[p] if p in merged.plugins else None
their_plugins = [ (pl.plugins[p] if p in pl.plugins else None) for pl in others ]
plugin_classes = (
set([our_plugin.__class__]) | set(pl.__class__ for pl in their_plugins)
) - set([None.__class__])
if len(plugin_classes) != 1:
raise SimMergeError(
"There are differing plugin classes (%s) for plugin %s" % (plugin_classes, p)
)
plugin_class = plugin_classes.pop()
our_filled_plugin = our_plugin if our_plugin is not None else merged.register_plugin(
p, plugin_class()
)
their_filled_plugins = [
(tp if tp is not None else t.register_plugin(p, plugin_class()))
for t,tp in zip(others, their_plugins)
]
plugin_common_ancestor = (
common_ancestor.plugins[p] if
(common_ancestor is not None and p in common_ancestor.plugins) else
None
)
if plugin_common_ancestor is None and \
plugin_class is SimStateHistory and \
common_ancestor_history is not None:
plugin_common_ancestor = common_ancestor_history
plugin_state_merged = our_filled_plugin.merge(
their_filled_plugins, merge_conditions, common_ancestor=plugin_common_ancestor,
)
if plugin_state_merged:
l.debug('Merging occurred in %s', p)
merging_occurred = True
merged.add_constraints(merged.se.Or(*merge_conditions))
return merged, merge_conditions, merging_occurred
def widen(self, *others):
"""
Perform a widening between self and other states
:param others:
:return:
"""
if len(set(frozenset(o.plugins.keys()) for o in others)) != 1:
raise SimMergeError("Unable to widen due to different sets of plugins.")
if len(set(o.arch.name for o in others)) != 1:
raise SimMergeError("Unable to widen due to different architectures.")
widened = self.copy()
widening_occurred = False
# plugins
for p in self.plugins:
if p in ('solver_engine', 'unicorn'):
continue
plugin_state_widened = widened.plugins[p].widen([_.plugins[p] for _ in others])
if plugin_state_widened:
l.debug('Widening occured in %s', p)
widening_occurred = True
return widened, widening_occurred
#############################################
### Accessors for tmps, registers, memory ###
#############################################
def reg_concrete(self, *args, **kwargs):
"""
Returns the contents of a register but, if that register is symbolic,
raises a SimValueError.
"""
e = self.registers.load(*args, **kwargs)
if self.se.symbolic(e):
raise SimValueError("target of reg_concrete is symbolic!")
return self.se.eval(e)
def mem_concrete(self, *args, **kwargs):
"""
Returns the contents of a memory but, if the contents are symbolic,
raises a SimValueError.
"""
e = self.memory.load(*args, **kwargs)
if self.se.symbolic(e):
raise SimValueError("target of mem_concrete is symbolic!")
return self.se.eval(e)
###############################
### Stack operation helpers ###
###############################
@arch_overrideable
def stack_push(self, thing):
"""
Push 'thing' to the stack, writing the thing to memory and adjusting the stack pointer.
"""
# increment sp
sp = self.regs.sp + self.arch.stack_change
self.regs.sp = sp
return self.memory.store(sp, thing, endness=self.arch.memory_endness)
@arch_overrideable
def stack_pop(self):
"""
Pops from the stack and returns the popped thing. The length will be the architecture word size.
"""
sp = self.regs.sp
self.regs.sp = sp - self.arch.stack_change
return self.memory.load(sp, self.arch.bits / 8, endness=self.arch.memory_endness)
@arch_overrideable
def stack_read(self, offset, length, bp=False):
"""
Reads length bytes, at an offset into the stack.
:param offset: The offset from the stack pointer.
:param length: The number of bytes to read.
:param bp: If True, offset from the BP instead of the SP. Default: False.
"""
sp = self.regs.bp if bp else self.regs.sp
return self.memory.load(sp+offset, length, endness=self.arch.memory_endness)
###############################
### Other helpful functions ###
###############################
def make_concrete_int(self, expr):
if isinstance(expr, (int, long)):
return expr
if not self.se.symbolic(expr):
return self.se.eval(expr)
v = self.se.eval(expr)
self.add_constraints(expr == v)
return v
# This handles the preparation of concrete function launches from abstract functions.
@arch_overrideable
def prepare_callsite(self, retval, args, cc='wtf'):
#TODO
pass
def _stack_values_to_string(self, stack_values):
"""
Convert each stack value to a string
:param stack_values: A list of values
:return: The converted string
"""
strings = [ ]
for stack_value in stack_values:
if self.se.symbolic(stack_value):
concretized_value = "SYMBOLIC - %s" % repr(stack_value)
else:
if len(self.se.eval_upto(stack_value, 2)) == 2:
concretized_value = repr(stack_value)
else:
concretized_value = repr(stack_value)
strings.append(concretized_value)
return " .. ".join(strings)
def dbg_print_stack(self, depth=None, sp=None):
"""
Only used for debugging purposes.
Return the current stack info in formatted string. If depth is None, the
current stack frame (from sp to bp) will be printed out.
"""
var_size = self.arch.bits / 8
sp_sim = self.regs._sp
bp_sim = self.regs._bp
if self.se.symbolic(sp_sim) and sp is None:
result = "SP is SYMBOLIC"
elif self.se.symbolic(bp_sim) and depth is None:
result = "BP is SYMBOLIC"
else:
sp_value = sp if sp is not None else self.se.eval(sp_sim)
if self.se.symbolic(bp_sim):
result = "SP = 0x%08x, BP is symbolic\n" % (sp_value)
bp_value = None
else:
bp_value = self.se.eval(bp_sim)
result = "SP = 0x%08x, BP = 0x%08x\n" % (sp_value, bp_value)
if depth is None:
# bp_value cannot be None here
depth = (bp_value - sp_value) / var_size + 1 # Print one more value
pointer_value = sp_value
for i in xrange(depth):
# For AbstractMemory, we wanna utilize more information from VSA
stack_values = [ ]
if o.ABSTRACT_MEMORY in self.options:
sp = self.regs._sp
segment_sizes = self.memory.get_segments(sp + i * var_size, var_size)
pos = i * var_size
for segment_size in segment_sizes:
stack_values.append(self.stack_read(pos, segment_size, bp=False))
pos += segment_size
else:
stack_values.append(self.stack_read(i * var_size, var_size, bp=False))
# Convert it into a big string!
val = self._stack_values_to_string(stack_values)
if pointer_value == sp_value:
line = "(sp)% 16x | %s" % (pointer_value, val)
elif pointer_value == bp_value:
line = "(bp)% 16x | %s" % (pointer_value, val)
else:
line = "% 20x | %s" % (pointer_value, val)
pointer_value += var_size
result += line + "\n"
return result
#
# Other helper methods
#
def set_mode(self, mode):
self.mode = mode
self.options = set(o.modes[mode])
@property
def thumb(self):
if not self.arch.name.startswith('ARM'):
return False
if self.regs.ip.symbolic:
# return True when IP can *only* be odd
new_state = self.copy()
new_state.add_constraints(new_state.regs.ip % 2 == 1, new_state.regs.ip % 2 != 0)
return new_state.satisfiable()
else:
concrete_ip = self.se.eval(self.regs.ip)
return concrete_ip % 2 == 1
#
# Some pretty fancy global condition stuff!
#
@property
def with_condition(self):
@contextlib.contextmanager
def ctx(c):
old_condition = self._global_condition
try:
new_condition = c if old_condition is None else self.se.And(old_condition, c)
self._global_condition = new_condition
yield
finally:
self._global_condition = old_condition
return ctx
def _adjust_condition(self, c):
if self._global_condition is None:
return c
elif c is None:
return self._global_condition
else:
return self.se.And(self._global_condition, c)
def _adjust_condition_list(self, conditions):
if self._global_condition is None:
return conditions
elif len(conditions) == 0:
return conditions.__class__((self._global_condition,))
else:
return conditions.__class__((self._adjust_condition(self.se.And(*conditions)),))
#
# Compatibility layer
#
@property
def state(self):
return self
@property
def length(self):
return self.history.block_count
@property
def jumpkind(self):
return self.scratch.jumpkind
@property
def last_actions(self):
return self.history.recent_actions
@property
def history_iterator(self):
return self.history.lineage
@property
def addr_trace(self):
return self.history.addr_trace
@property
def trace(self):
return self.history.trace
@property
def targets(self):
return self.history.jump_targets
@property
def guards(self):
return self.history.jump_guards
@property
def jumpkinds(self):
return self.history.jumpkinds
@property
def events(self):
return self.history.events
@property
def actions(self):
return self.history.actions
@property
def reachable(self):
return self.history.reachable()
@deprecated
def trim_history(self):
self.history.trim()
from .state_plugins.symbolic_memory import SimSymbolicMemory
from .state_plugins.fast_memory import SimFastMemory
from .state_plugins.abstract_memory import SimAbstractMemory
from .state_plugins.history import SimStateHistory
from .errors import SimMergeError, SimValueError, SimStateError, SimSolverModeError
from .state_plugins.inspect import BP_AFTER, BP_BEFORE
from .state_plugins.sim_action import SimActionConstraint
from . import sim_options as o
|
f-prettyland/angr
|
angr/sim_state.py
|
Python
|
bsd-2-clause
| 30,608
| 0.003921
|
########## recombination.py parameters
class Recombination_Parameters(object):
# Change these two values to the folders you prefer - use an absolute path e.g. /Users/Harry/fastq-data and
# /Users/Harry/csv-data or a path relative to the tools directory.
# You may use the same folder for input and output.
input_folder = "data"
output_folder = "data"
# The number of bases to retrieve before the seed sequence
HEAD = 10
# The number of bases to retrieve after the seed sequences
TAIL = 10
seed_sequences = {
"loxP": "ATAACTTCGTATAGCATACATTATACGAAGTTAT",
"lox2272": "ATAACTTCGTATAGGATACTTTATACGAAGTTAT",
}
########## serotypes.py parameters
class Serotypes_Parameters(object):
# Change these two values to the folders you prefer - use an absolute path e.g. /Users/Harry/fastq-data and
# /Users/Harry/csv-data or a path relative to the tools directory.
# You may use the same folder for input and output.
input_folder = "data"
output_folder = "data"
# These are the signatures that will be matched. The first part is the name, the part in brackets contains the
# actual signatures, separated by a comma (each serotype can have multiple signatures)
signatures = {
"AAV1": [
"AGTGCTTCAACGGGGGCCAG",
"GGGCGTGAATCCATCATCAACCCTGG",
"CCGGAGCTTCAAACACTGCATTGGACAAT"
],
"AAV2": [
"AGGCAACAGACAAGCAGCTACC",
"AACAGACAAGCAGCTACCGCA"
],
"AAV5": [
"TCCAAGCCTTCCACCTCGTCAGACGCCGAA",
"CACCAACAACCAGAGCTCCACCACTG",
"GCCCGTCAGCAGCTTCATC"
],
"AAV7": [
"AGTGAAACTGCAGGTAGTACC"
],
"AAV8": [
"GCAAAACACGGCTCCTCAAAT",
"CAGCAAGCGCTGGAACCCCGAGATCCAGTA",
"AAATACCATCTGAATGGAAGAAATTCATTG",
"CGTGGCAGATAACTTGCAGC",
"ATCCTCCGACCACCTTCAACC"
],
"AAV9": [
"AGTGCCCAAGCACAGGCGCA",
"ATCTCTCAAAGACTATTAAC",
"GGCGAGCAGTCTTCCAGGCA"
],
"AAVrh10": [
"CTACAAATCTACAAATGTGGACTTTG"
],
"PHPeB": [
"CTTTGGCGGTGCCTTTTAAGGCACAGGCGCAGA"
],
"PHPs": [
"AGGCGGTTAGGACGTCTTTGGCACAGGCGCAGA"
],
"AAVrg": [
"TAGCAGACCAAGACTACACAAAAACTGCT"
],
}
|
addgene/research
|
toolkit/parameters.py
|
Python
|
gpl-3.0
| 2,405
| 0.003742
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for the ResNet50 model, executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.client import device_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager.benchmarks.resnet50 import resnet50
from tensorflow.python.eager.benchmarks.resnet50 import resnet50_test_util
from tensorflow.python.framework import test_util
def compute_gradients(model, images, labels, num_replicas=1):
with tf.GradientTape() as grad_tape:
logits = model(images, training=True)
loss = tf.compat.v1.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.compat.v2.summary.write('loss', loss)
if num_replicas != 1:
loss /= num_replicas
# TODO(b/110991947): We can mistakenly trace the gradient call in
# multi-threaded environment. Explicitly disable recording until
# this is fixed.
with tape.stop_recording():
grads = grad_tape.gradient(loss, model.variables)
return grads
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
def _events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.compat.v1.Event protos in the event file.
"""
records = list(tf.compat.v1.python_io.tf_record_iterator(filepath))
result = []
for r in records:
event = tf.compat.v1.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.compat.v1.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert tf.io.gfile.exists(logdir)
files = tf.io.gfile.listdir(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return _events_from_file(os.path.join(logdir, files[0]))
class ResNet50Test(tf.test.TestCase):
def _apply(self, defun=False, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
if defun:
model.call = tf.function(model.call)
with tf.device(device), context.execution_mode(execution_mode):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
context.async_wait()
self.assertEqual((2, 1000), output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply(self):
self._apply(defun=False)
def test_apply_async(self):
self._apply(defun=False, execution_mode=context.ASYNC)
def test_apply_with_defun(self):
self._apply(defun=True)
def test_apply_with_defun_async(self):
self._apply(defun=True, execution_mode=context.ASYNC)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_no_top(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1)
if data_format == 'channels_first' else (2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_with_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False, pooling='avg')
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
self.assertEqual((2, 2048), output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_no_average_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, average_pooling=False, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 7, 7) if data_format == 'channels_first' else
(2, 7, 7, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_block3_strides(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_retrieve_intermediates(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
intermediates_dict = {}
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False,
intermediates_dict=intermediates_dict)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
if data_format == 'channels_first':
block_shapes = {
'block0': (2, 64, 112, 112),
'block0mp': (2, 64, 55, 55),
'block1': (2, 256, 55, 55),
'block2': (2, 512, 28, 28),
'block3': (2, 1024, 7, 7),
'block4': (2, 2048, 1, 1),
}
else:
block_shapes = {
'block0': (2, 112, 112, 64),
'block0mp': (2, 55, 55, 64),
'block1': (2, 55, 55, 256),
'block2': (2, 28, 28, 512),
'block3': (2, 7, 7, 1024),
'block4': (2, 1, 1, 2048),
}
for (block_name, block) in intermediates_dict.items():
self.assertEqual(block_shapes[block_name], block.shape)
def _test_train(self, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
tf.compat.v2.summary.experimental.set_step(
tf.compat.v1.train.get_or_create_global_step())
logdir = tempfile.mkdtemp()
with tf.compat.v2.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), tf.compat.v2.summary.record_if(True):
with tf.device(device), context.execution_mode(execution_mode):
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
images, labels = resnet50_test_util.random_batch(2, data_format)
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
self.assertEqual(320, len(model.variables))
context.async_wait()
events = events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_train(self):
self._test_train()
@test_util.disable_tfrt('TFE_ContextGetExecutorForThread missing b/156188669')
def test_train_async(self):
self._test_train(execution_mode=context.ASYNC)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_no_garbage(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
with tf.device(device):
images, labels = resnet50_test_util.random_batch(2, data_format)
gc.disable()
# Warm up. Note that this first run does create significant amounts of
# garbage to be collected. The hope is that this is a build-only effect,
# and a subsequent training loop will create nothing which needs to be
# collected.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
previous_gc_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
for _ in range(2):
# Run twice to ensure that garbage that is created on the first
# iteration is no longer accessible.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
# There should be no garbage requiring collection.
self.assertEqual(0, len(gc.garbage))
gc.set_debug(previous_gc_debug_flags)
gc.enable()
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class ResNet50Benchmarks(tf.test.Benchmark):
def _report(self, label, start, num_iters, device, batch_size, data_format,
num_replicas=1):
resnet50_test_util.report(self, label, start, num_iters, device, batch_size,
data_format, num_replicas)
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
# TODO(b/141475121): We need some way to check which batch sizes would
# work using a public API.
if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
# Avoid OOM errors with larger batch sizes, which seem to cause errors
# later on even if caught.
#
# TODO(allenl): Base this on device memory; memory limit information
# during the test seems to exclude the amount TensorFlow has allocated,
# which isn't useful.
if 'K20' in device.physical_device_desc:
return (16,)
# Quardro P1000.
if 'P1000' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
return (32,)
return (16, 32)
def _force_device_sync(self):
# If this function is called in the context of a non-CPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
# which forces a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def _benchmark_eager_apply(self, label, device_and_format, defun=False,
execution_mode=None):
with context.execution_mode(execution_mode):
device, data_format = device_and_format
model = resnet50.ResNet50(data_format)
if defun:
model.call = tf.function(model.call)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = resnet50_test_util.random_batch(batch_size, data_format)
for _ in xrange(num_burn):
model(images, training=False).cpu()
if execution_mode:
context.async_wait()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
model(images, training=False).cpu()
if execution_mode:
context.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply(
'eager_apply', resnet50_test_util.device_and_data_format(),
defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
'eager_apply_async',
resnet50_test_util.device_and_data_format(),
defun=False,
execution_mode=context.ASYNC)
def benchmark_eager_apply_with_defun(self):
self._benchmark_eager_apply(
'eager_apply_with_defun',
resnet50_test_util.device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
with context.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = resnet50_test_util.random_batch(
batch_size, data_format)
model = resnet50.ResNet50(data_format)
# TODO(b/161911585): tf_to_corert MLIR lowering pipeline should handle
# case when momentum is not set.
optimizer = tf.keras.optimizers.SGD(0.1, 0.1)
apply_grads = apply_gradients
if defun:
model.call = tf.function(model.call)
apply_grads = tf.function(apply_gradients)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in xrange(num_burn):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
context.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
context.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train(
'eager_train', MockIterator,
resnet50_test_util.device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
'eager_train_async',
MockIterator,
resnet50_test_util.device_and_data_format(),
defun=False,
execution_mode=context.ASYNC)
def benchmark_eager_train_with_defun(self):
self._benchmark_eager_train(
'eager_train_with_defun', MockIterator,
resnet50_test_util.device_and_data_format(), defun=True)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return iter(ds)
self._benchmark_eager_train(
'eager_train_dataset',
make_iterator,
resnet50_test_util.device_and_data_format(),
defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return iter(ds)
self._benchmark_eager_train(
'eager_train_dataset_with_defun', make_iterator,
resnet50_test_util.device_and_data_format(), defun=True)
if __name__ == '__main__':
tf.compat.v1.enable_eager_execution()
tf.test.main()
|
frreiss/tensorflow-fred
|
tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py
|
Python
|
apache-2.0
| 16,160
| 0.008168
|
#!/usr/bin/env python3
# 574A_bear.py - Codeforces.com/problemset/problem/574/A Bear program by Sergey 2015
import unittest
import sys
###############################################################################
# Bear Class
###############################################################################
class Bear:
""" Bear representation """
def __init__(self, test_inputs=None):
""" Default constructor """
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
return next(it) if it else sys.stdin.readline().rstrip()
# Reading single elements
self.n = int(uinput())
# Reading a single line of multiple elements
self.nums = list(map(int, uinput().split()))
def calculate(self):
""" Main calcualtion function of the class """
lamak = self.nums[0]
srt = sorted(self.nums[1:])
result = 0
while lamak <= srt[-1]:
srt[-1] -= 1
lamak += 1
result += 1
srt = sorted(srt)
return str(result)
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_single_test(self):
""" Bear class testing """
# Constructor test
test = "5\n5 1 11 2 8"
d = Bear(test)
self.assertEqual(d.n, 5)
self.assertEqual(d.nums, [5, 1, 11, 2, 8])
# Sample test
self.assertEqual(Bear(test).calculate(), "4")
# Sample test
test = "4\n1 8 8 8"
self.assertEqual(Bear(test).calculate(), "6")
# Sample test
test = "2\n7 6"
self.assertEqual(Bear(test).calculate(), "0")
# My tests
test = "4\n0 1 1 1"
self.assertEqual(Bear(test).calculate(), "2")
# Time limit test
self.time_limit_test(100)
def time_limit_test(self, nmax):
""" Timelimit testing """
import random
import timeit
# Random inputs
test = str(nmax) + "\n"
test += "0 "
nums = [1000 for i in range(nmax-1)]
test += " ".join(map(str, nums)) + "\n"
# Run the test
start = timeit.default_timer()
d = Bear(test)
calc = timeit.default_timer()
d.calculate()
stop = timeit.default_timer()
print("\nTimelimit Test: " +
"{0:.3f}s (init {1:.3f}s calc {2:.3f}s)".
format(stop-start, calc-start, stop-calc))
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
# Print the result string
sys.stdout.write(Bear().calculate())
|
snsokolov/contests
|
codeforces/574A_bear.py
|
Python
|
unlicense
| 2,847
| 0.000702
|
#!/usr/bin/python -O
#
# /usr/sbin/webapp-config
# Python script for managing the deployment of web-based
# applications
#
# Originally written for the Gentoo Linux distribution
#
# Copyright (c) 1999-2007 Authors
# Released under v2 of the GNU GPL
#
# Author(s) Stuart Herbert
# Renat Lumpau <rl03@gentoo.org>
# Gunnar Wrobel <wrobel@gentoo.org>
#
# ========================================================================
''' Provides a class that handles ebuild related tasks. '''
# ========================================================================
# Dependencies
# ------------------------------------------------------------------------
import os.path, re, pwd, grp
from WebappConfig.debug import OUT
import WebappConfig.wrapper as wrapper
from WebappConfig.sandbox import Sandbox
# ========================================================================
# Handler for ebuild related tasks
# ------------------------------------------------------------------------
class Ebuild:
'''
This class handles all ebuild related task. Currently this includes
displaying the post install instruction as well as running hooks
provided by the ebuild.
'''
def __init__(self, config):
self.config = config
self.__root = wrapper.get_root(self.config)
self.__re = re.compile('/+')
self.__sourced = self.__re.sub('/', self.__root
+ self.get_config('my_appdir'))
self.__hooksd = self.__re.sub('/', self.__root
+ self.get_config('my_hookscriptsdir'))
def get_config(self, option):
''' Return a config option.'''
return self.config.config.get('USER', option)
def run_hooks(self, type, server):
'''
Run the hook scripts - if there are any
'''
if self.config.pretend():
return
sandbox = Sandbox(self.config)
# save list of environment variables to set
env_map = self.run_vars(server)
if os.path.isdir(self.__hooksd):
for x in os.listdir(self.__hooksd):
if (os.path.isfile(self.__hooksd + '/' + x) and
os.access(self.__hooksd + '/' + x, os.X_OK)):
OUT.debug('Running hook script', 7)
sandbox.spawn(self.__hooksd + '/' + x + ' ' + type, env_map)
def show_post(self, filename, ptype, server = None):
'''
Display one of the post files.
'''
post_file = self.__sourced + '/' + filename
OUT.debug('Check for instruction file', 7)
if not os.path.isfile(post_file):
return
self.run_vars(server)
post_instructions = open(post_file).readlines()
OUT.debug('Read post instructions', 7)
post = [
'',
'=================================================================',
'POST-' + ptype.upper() + ' INSTRUCTIONS',
'=================================================================',
'']
for i in post_instructions:
i = i.replace('"', '\\"')
post.append(os.popen('printf "' + i + '"\n').read()[:-1])
post = post + [
'',
'=================================================================',
'']
for i in post:
OUT.notice(i)
def show_postinst(self, server = None):
'''
Display any post-installation instructions, if there are any.
'''
OUT.debug('Running show_postinst', 6)
self.show_post(filename = 'postinst-en.txt', ptype = 'install', server = server)
def show_postupgrade(self, server = None):
'''
Display any post-upgrade instructions, if there are any.
'''
OUT.debug('Running show_postupgrade', 6)
self.show_post(filename = 'postupgrade-en.txt', ptype = 'upgrade', server = server)
def run_vars(self, server = None):
'''
This function exports the necessary variables to the shell
environment so that they are accessible within the shell scripts
and/or files provided by the ebuild.
'''
v_root = self.get_config('vhost_root')
v_cgi = self.get_config('g_cgibindir')
v_conf = self.get_config('vhost_config_dir')
v_err = v_root + '/' + self.get_config('my_errorsbase')
v_icon = v_root + '/' + self.get_config('my_iconsbase')
g_inst = self.get_config('g_installdir')
g_htd = self.get_config('g_htdocsdir')
g_orig = self.get_config('g_orig_installdir')
vsu = None
vsg = None
if server:
vsu = pwd.getpwuid(server.vhost_server_uid)[0]
vsg = grp.getgrgid(server.vhost_server_gid)[0]
OUT.debug('Exporting variables', 7)
export_map = {'MY_HOSTROOTDIR' : None,
'MY_HTDOCSDIR' : None,
'MY_CGIBINDIR' : None,
'MY_INSTALLDIR' : g_inst,
'MY_ICONSDIR' : None,
'MY_SERVERCONFIGDIR' : None,
'MY_ERRORSDIR' : None,
'MY_SQLSCRIPTSDIR' : None,
'VHOST_ROOT' : None,
'VHOST_HTDOCSDIR' : g_htd,
'VHOST_CGIBINDIR' : v_cgi,
'VHOST_CONFDIR' : v_conf,
'VHOST_ERRORSDIR' : v_err,
'VHOST_ICONSDIR' : v_icon,
'VHOST_HOSTNAME' : None,
'VHOST_SERVER' : None,
'VHOST_APPDIR' : g_orig,
'VHOST_CONFIG_UID' : None,
'VHOST_CONFIG_GID' : None,
'VHOST_SERVER_UID' : vsu,
'VHOST_SERVER_GID' : vsg,
'VHOST_DEFAULT_UID' : None,
'VHOST_DEFAULT_GID' : None,
'VHOST_PERMS_SERVEROWNED_DIR' : None,
'VHOST_PERMS_SERVEROWNED_FILE' : None,
'VHOST_PERMS_CONFIGOWNED_DIR' : None,
'VHOST_PERMS_CONFIGOWNED_FILE' : None,
'VHOST_PERMS_DEFAULTOWNED_DIR' : None,
'VHOST_PERMS_VIRTUALOWNED_FILE': None,
'VHOST_PERMS_INSTALLDIR' : None,
'ROOT' : self.__root,
'PN' : None,
'PVR': None}
result = {}
for i in list(export_map.keys()):
value = export_map[i]
if not value:
value = self.get_config(i.lower())
os.putenv(i, str(value))
result[i] = str(value)
return result
|
gentoo/webapp-config
|
WebappConfig/ebuild.py
|
Python
|
gpl-2.0
| 6,937
| 0.010379
|
##################################################################
# Code for testing the variational Multi-Stage Generative Model. #
##################################################################
# basic python
import numpy as np
import numpy.random as npr
import cPickle
# theano business
import theano
import theano.tensor as T
# phil's sweetness
import utils
from NetLayers import relu_actfun, softplus_actfun, tanh_actfun
from InfNet import InfNet
from HydraNet import HydraNet
from GPSImputer import GPSImputer, load_gpsimputer_from_file
from load_data import load_udm, load_tfd, load_svhn_gray, load_binarized_mnist
from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \
row_shuffle, to_fX
RESULT_PATH = "IMP_MNIST_GPSI/"
###############################
###############################
## TEST GPS IMPUTER ON MNIST ##
###############################
###############################
def test_mnist(step_type='add',
imp_steps=6,
occ_dim=15,
drop_prob=0.0):
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}RELU_GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
Xtr, Xva, Xte = load_binarized_mnist(data_path='./data/')
Xtr = np.vstack((Xtr, Xva))
Xva = Xte
#del Xte
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
##########################
# Get some training data #
##########################
# rng = np.random.RandomState(1234)
# dataset = 'data/mnist.pkl.gz'
# datasets = load_udm(dataset, as_shared=False, zero_mean=False)
# Xtr = datasets[0][0]
# Xva = datasets[1][0]
# Xte = datasets[2][0]
# # Merge validation set and training set, and test on test set.
# #Xtr = np.concatenate((Xtr, Xva), axis=0)
# #Xva = Xte
# Xtr = to_fX(shift_and_scale_into_01(Xtr))
# Xva = to_fX(shift_and_scale_into_01(Xva))
# tr_samples = Xtr.shape[0]
# va_samples = Xva.shape[0]
batch_size = 200
batch_reps = 1
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) )
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
x_dim = Xtr.shape[1]
s_dim = x_dim
#s_dim = 300
z_dim = 100
init_scale = 0.6
x_in_sym = T.matrix('x_in_sym')
x_out_sym = T.matrix('x_out_sym')
x_mask_sym = T.matrix('x_mask_sym')
#################
# p_zi_given_xi #
#################
params = {}
shared_config = [(x_dim + x_dim), 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_zi_given_xi.init_biases(0.0)
###################
# p_sip1_given_zi #
###################
params = {}
shared_config = [z_dim, 500, 500]
output_config = [s_dim, s_dim, s_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_sip1_given_zi = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_sip1_given_zi.init_biases(0.0)
################
# p_x_given_si #
################
params = {}
shared_config = [s_dim]
output_config = [x_dim, x_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_x_given_si = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_x_given_si.init_biases(0.0)
#################
# q_zi_given_xi #
#################
params = {}
shared_config = [(x_dim + x_dim), 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
q_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
q_zi_given_xi.init_biases(0.0)
###########################################################
# Define parameters for the GPSImputer, and initialize it #
###########################################################
print("Building the GPSImputer...")
gpsi_params = {}
gpsi_params['x_dim'] = x_dim
gpsi_params['z_dim'] = z_dim
gpsi_params['s_dim'] = s_dim
# switch between direct construction and construction via p_x_given_si
gpsi_params['use_p_x_given_si'] = False
gpsi_params['imp_steps'] = imp_steps
gpsi_params['step_type'] = step_type
gpsi_params['x_type'] = 'bernoulli'
gpsi_params['obs_transform'] = 'sigmoid'
GPSI = GPSImputer(rng=rng,
x_in=x_in_sym, x_out=x_out_sym, x_mask=x_mask_sym, \
p_zi_given_xi=p_zi_given_xi, \
p_sip1_given_zi=p_sip1_given_zi, \
p_x_given_si=p_x_given_si, \
q_zi_given_xi=q_zi_given_xi, \
params=gpsi_params, \
shared_param_dicts=None)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_RESULTS.txt".format(result_tag)
out_file = open(log_name, 'wb')
costs = [0. for i in range(10)]
learn_rate = 0.0002
momentum = 0.5
batch_idx = np.arange(batch_size) + tr_samples
for i in range(250000):
scale = min(1.0, ((i+1) / 5000.0))
lam_scale = 1.0 - min(1.0, ((i+1) / 100000.0)) # decays from 1.0->0.0
if (((i + 1) % 15000) == 0):
learn_rate = learn_rate * 0.93
if (i > 10000):
momentum = 0.90
else:
momentum = 0.75
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
GPSI.set_sgd_params(lr=scale*learn_rate, \
mom_1=scale*momentum, mom_2=0.98)
GPSI.set_train_switch(1.0)
GPSI.set_lam_nll(lam_nll=1.0)
GPSI.set_lam_kld(lam_kld_p=0.05, lam_kld_q=0.95, lam_kld_g=(0.1 * lam_scale))
GPSI.set_lam_l2w(1e-5)
# perform a minibatch update and record the cost for this batch
xb = to_fX( Xtr.take(batch_idx, axis=0) )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
result = GPSI.train_joint(xi, xo, xm, batch_reps)
# do diagnostics and general training tracking
costs = [(costs[j] + result[j]) for j in range(len(result)-1)]
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " joint_cost: {0:.4f}".format(costs[0])
str3 = " nll_bound : {0:.4f}".format(costs[1])
str4 = " nll_cost : {0:.4f}".format(costs[2])
str5 = " kld_cost : {0:.4f}".format(costs[3])
str6 = " reg_cost : {0:.4f}".format(costs[4])
joint_str = "\n".join([str1, str2, str3, str4, str5, str6])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 1000) == 0):
Xva = row_shuffle(Xva)
# record an estimate of performance on the test set
xi, xo, xm = construct_masked_data(Xva[0:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll, kld = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10)
vfe = np.mean(nll) + np.mean(kld)
str1 = " va_nll_bound : {}".format(vfe)
str2 = " va_nll_term : {}".format(np.mean(nll))
str3 = " va_kld_q2p : {}".format(np.mean(kld))
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
if ((i % 2000) == 0):
GPSI.save_to_file("{}_PARAMS.pkl".format(result_tag))
# Get some validation samples for evaluating model performance
xb = to_fX( Xva[0:100] )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
xi = np.repeat(xi, 2, axis=0)
xo = np.repeat(xo, 2, axis=0)
xm = np.repeat(xm, 2, axis=0)
# draw some sample imputations from the model
samp_count = xi.shape[0]
_, model_samps = GPSI.sample_imputer(xi, xo, xm, use_guide_policy=False)
seq_len = len(model_samps)
seq_samps = np.zeros((seq_len*samp_count, model_samps[0].shape[1]))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = model_samps[s2][s1]
idx += 1
file_name = "{0:s}_samples_ng_b{1:d}.png".format(result_tag, i)
utils.visualize_samples(seq_samps, file_name, num_rows=20)
# get visualizations of policy parameters
# file_name = "{0:s}_gen_step_weights_b{1:d}.png".format(result_tag, i)
# W = GPSI.gen_step_weights.get_value(borrow=False)
# utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20)
# file_name = "{0:s}_gen_write_gate_weights_b{1:d}.png".format(result_tag, i)
# W = GPSI.gen_write_gate_weights.get_value(borrow=False)
# utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20)
# file_name = "{0:s}_gen_erase_gate_weights_b{1:d}.png".format(result_tag, i)
# W = GPSI.gen_erase_gate_weights.get_value(borrow=False)
# utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20)
# file_name = "{0:s}_gen_inf_weights_b{1:d}.png".format(result_tag, i)
# W = GPSI.gen_inf_weights.get_value(borrow=False).T
# utils.visualize_samples(W[:,:x_dim], file_name, num_rows=20)
#################################
#################################
## CHECK MNIST IMPUTER RESULTS ##
#################################
#################################
def test_mnist_results(step_type='add',
imp_steps=6,
occ_dim=15,
drop_prob=0.0):
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
Xtr, Xva, Xte = load_binarized_mnist(data_path='./data/')
Xtr = np.vstack((Xtr, Xva))
Xva = Xte
#del Xte
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
##########################
# Get some training data #
##########################
# rng = np.random.RandomState(1234)
# dataset = 'data/mnist.pkl.gz'
# datasets = load_udm(dataset, as_shared=False, zero_mean=False)
# Xtr = datasets[0][0]
# Xva = datasets[1][0]
# Xte = datasets[2][0]
# # Merge validation set and training set, and test on test set.
# #Xtr = np.concatenate((Xtr, Xva), axis=0)
# #Xva = Xte
# Xtr = to_fX(shift_and_scale_into_01(Xtr))
# Xva = to_fX(shift_and_scale_into_01(Xva))
# tr_samples = Xtr.shape[0]
# va_samples = Xva.shape[0]
batch_size = 250
batch_reps = 1
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) )
# Load parameters from a previously trained model
print("Testing model load from file...")
GPSI = load_gpsimputer_from_file(f_name="{}_PARAMS.pkl".format(result_tag), \
rng=rng)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_FINAL_RESULTS_NEW.txt".format(result_tag)
out_file = open(log_name, 'wb')
Xva = row_shuffle(Xva)
# record an estimate of performance on the test set
str0 = "GUIDED SAMPLE BOUND:"
print(str0)
xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=True)
xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=True)
nll = np.concatenate((nll_0, nll_1))
kld = np.concatenate((kld_0, kld_1))
vfe = np.mean(nll) + np.mean(kld)
str1 = " va_nll_bound : {}".format(vfe)
str2 = " va_nll_term : {}".format(np.mean(nll))
str3 = " va_kld_q2p : {}".format(np.mean(kld))
joint_str = "\n".join([str0, str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
# record an estimate of performance on the test set
str0 = "UNGUIDED SAMPLE BOUND:"
print(str0)
xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=False)
xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=False)
nll = np.concatenate((nll_0, nll_1))
kld = np.concatenate((kld_0, kld_1))
str1 = " va_nll_bound : {}".format(np.mean(nll))
str2 = " va_nll_term : {}".format(np.mean(nll))
str3 = " va_kld_q2p : {}".format(np.mean(kld))
joint_str = "\n".join([str0, str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
if __name__=="__main__":
#########
# MNIST #
#########
# TRAINING
#test_mnist(step_type='add', occ_dim=14, drop_prob=0.0)
#test_mnist(step_type='add', occ_dim=16, drop_prob=0.0)
#test_mnist(step_type='add', occ_dim=0, drop_prob=0.6)
#test_mnist(step_type='add', occ_dim=0, drop_prob=0.8)
#test_mnist(step_type='jump', occ_dim=14, drop_prob=0.0)
#test_mnist(step_type='jump', occ_dim=16, drop_prob=0.0)
#test_mnist(step_type='jump', occ_dim=0, drop_prob=0.6)
#test_mnist(step_type='jump', occ_dim=0, drop_prob=0.8)
#test_mnist(step_type='add', imp_steps=1, occ_dim=0, drop_prob=0.9)
#test_mnist(step_type='add', imp_steps=2, occ_dim=0, drop_prob=0.9)
test_mnist(step_type='add', imp_steps=5, occ_dim=0, drop_prob=0.9)
#test_mnist(step_type='add', imp_steps=10, occ_dim=0, drop_prob=0.9)
#test_mnist(step_type='add', imp_steps=15, occ_dim=0, drop_prob=0.9)
# RESULTS
# test_mnist_results(step_type='add', occ_dim=14, drop_prob=0.0)
# test_mnist_results(step_type='add', occ_dim=16, drop_prob=0.0)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.6)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.7)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.8)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.9)
# test_mnist_results(step_type='jump', occ_dim=14, drop_prob=0.0)
# test_mnist_results(step_type='jump', occ_dim=16, drop_prob=0.0)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.6)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.7)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.8)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=1, occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=2, occ_dim=0, drop_prob=0.9)
test_mnist_results(step_type='add', imp_steps=5, occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=10, occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=15, occ_dim=0, drop_prob=0.9)
|
capybaralet/Sequential-Generation
|
TestImpGPSI_MNIST.py
|
Python
|
mit
| 18,331
| 0.004582
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-09 01:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("hordak", "0007_auto_20161209_0111")]
operations = [
migrations.RenameField("Account", "has_statements", "is_bank_account")
]
|
adamcharnock/django-hordak
|
hordak/migrations/0008_auto_20161209_0129.py
|
Python
|
mit
| 353
| 0
|
#!/usr/bin/python
''' Python API for YouTube
Copyright (C) 2013 nagev
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. '''
__version__ = "0.2"
__author__ = "nagev"
__license__ = "GPLv3"
import re
import sys
import time
import json
import urllib
import urllib2
from urlparse import parse_qs
def _decrypt_signature(s):
if len(s) == 92:
return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + \
s[80:83]
elif len(s) == 90:
return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + \
s[78:81]
elif len(s) == 88:
return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1]\
+ s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12]
elif len(s) == 87:
return s[4:23] + s[86] + s[24:85]
elif len(s) == 86:
return s[83:85] + s[26] + s[79:46:-1] + s[85] + s[45:36:-1] + s[30] + \
s[35:30:-1] + s[46] + s[29:26:-1] + s[82] + s[25:1:-1]
elif len(s) == 85:
return s[2:8] + s[0] + s[9:21] + s[65] + s[22:65] + s[84] + s[66:82] +\
s[21]
elif len(s) == 84:
return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26]
elif len(s) == 83:
return s[:15] + s[80] + s[16:80] + s[15]
elif len(s) == 82:
return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1]\
+ s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34]
elif len(s) == 81:
return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1]\
+ s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
elif len(s) == 79:
return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1]\
+ s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
else:
raise NameError("Unable to decode video url - sig len %s" % len(s))
class Stream():
resolutions = {
'5': ('240x400', 'flv'),
'17': ('144x176', '3gp'),
'18': ('360x640', 'mp4'),
'22': ('720x1280', 'mp4'),
'34': ('360x640', 'flv'),
'35': ('480x854', 'flv'),
'36': ('320x240', '3gp'),
'37': ('1080x1920', 'mp4'),
'38': ('3072x4096', 'superHD'),
'43': ('360x640', 'webm'),
'44': ('480x854', 'webm'),
'45': ('720x1280', 'webm'),
'46': ('1080x1920', 'webm'),
'82': ('640x360-3D', 'mp4'),
'84': ('1280x720-3D', 'mp4'),
'100': ('640x360-3D', 'webm'),
'102': ('1280x720-3D', 'webm')}
def __init__(self, streammap, opener, title="ytvid"):
if not streammap.get("sig", ""):
streammap['sig'] = [_decrypt_signature(streammap['s'][0])]
self.url = streammap['url'][0] + '&signature=' + streammap['sig'][0]
self.vidformat = streammap['type'][0].split(';')[0]
self.resolution = self.resolutions[streammap['itag'][0]][0]
self.extension = self.resolutions[streammap['itag'][0]][1]
self.itag = streammap['itag'][0]
self.title = title
self.filename = self.title + "." + self.extension
self._opener = opener
def get_filesize(self):
opener = self._opener
return int(opener.open(self.url).headers['content-length'])
def download(self, progress=True, filepath=""):
response = self._opener.open(self.url)
total = int(response.info().getheader('Content-Length').strip())
print u"-Downloading '{}' [{:,} Bytes]".format(self.filename, total)
status_string = (' {:,} Bytes [{:.2%}] received. Rate: [{:4.0f} '
'kbps]. ETA: [{:.0f} secs]')
chunksize, bytesdone, t0 = 16834, 0, time.time()
outfh = open(filepath or self.filename, 'wb')
while 1:
chunk = response.read(chunksize)
elapsed = time.time() - t0
outfh.write(chunk)
bytesdone += len(chunk)
if not chunk:
outfh.close()
break
if progress:
rate = (bytesdone / 1024) / elapsed
eta = (total - bytesdone) / (rate * 1024)
display = (bytesdone, bytesdone * 1.0 / total, rate, eta)
status = status_string.format(*display)
sys.stdout.write("\r" + status + ' ' * 4 + "\r")
sys.stdout.flush
print "\nDone"
class Pafy():
def __len__(self):
return self.length
def __repr__(self):
out = ""
keys = "Title Author ID Duration Rating Views Thumbnail Keywords"
keys = keys.split(" ")
keywords = ", ".join(self.keywords).decode("utf8")
length = time.strftime('%H:%M:%S', time.gmtime(self.length))
info = dict(Title=self.title,
Author=self.author,
Views=self.viewcount,
Rating=self.rating,
Duration=length,
ID=self.videoid,
Thumbnail=self.thumb,
Keywords=keywords)
for k in keys:
try:
out += "%s: %s\n" % (k, info[k])
except KeyError:
pass
return out.encode("utf8", "ignore")
def __init__(self, video_url):
infoUrl = 'https://www.youtube.com/get_video_info?video_id='
vidid = re.search(r'v=([a-zA-Z0-9-_]*)', video_url).group(1)
infoUrl += vidid + "&asv=3&el=detailpage&hl=en_US"
self.urls = []
opener = urllib2.build_opener()
ua = ("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64;"
"Trident/5.0)")
opener.addheaders = [('User-Agent', ua)]
self.keywords = ""
self.rawinfo = opener.open(infoUrl).read()
self.allinfo = parse_qs(self.rawinfo)
self.title = self.allinfo['title'][0].decode('utf-8')
self.author = self.allinfo['author'][0]
self.videoid = self.allinfo['video_id'][0]
if 'keywords' in self.allinfo:
self.keywords = self.allinfo['keywords'][0].split(',')
self.rating = float(self.allinfo['avg_rating'][0])
self.length = int(self.allinfo['length_seconds'][0])
self.duration = time.strftime('%H:%M:%S', time.gmtime(self.length))
self.viewcount = int(self.allinfo['view_count'][0])
self.thumb = urllib.unquote_plus(self.allinfo['thumbnail_url'][0])
self.formats = self.allinfo['fmt_list'][0].split(",")
self.formats = [x.split("/") for x in self.formats]
if self.allinfo.get('iurlsd'):
self.bigthumb = self.allinfo['iurlsd'][0]
if self.allinfo.get('iurlmaxres'):
self.bigthumbhd = self.allinfo['iurlmaxres'][0]
streamMap = self.allinfo['url_encoded_fmt_stream_map'][0].split(',')
smap = [parse_qs(sm) for sm in streamMap]
if not smap[0].get("sig", ""): # vevo!
watchurl = "https://www.youtube.com/watch?v=" + vidid
watchinfo = opener.open(watchurl).read()
match = re.search(r';ytplayer.config = ({.*?});', watchinfo)
try:
myjson = json.loads(match.group(1))
except:
raise NameError('Problem handling this video')
args = myjson['args']
streamMap = args['url_encoded_fmt_stream_map'].split(",")
smap = [parse_qs(sm) for sm in streamMap]
self.streams = [Stream(sm, opener, self.title) for sm in smap]
def getbest(self, preftype="any", ftypestrict=True):
# set ftypestrict to False to use a non preferred format if that
# has a higher resolution
def _sortkey(x, key3d=0, keyres=0, keyftype=0):
key3d = "3D" not in x.resolution
keyres = int(x.resolution.split("x")[0])
keyftype = preftype == x.extension
if ftypestrict:
return (key3d, keyftype, keyres)
else:
return (key3d, keyres, keyftype)
return max(self.streams, key=_sortkey)
|
sadolit/pafy
|
pafy.py
|
Python
|
gpl-3.0
| 8,660
| 0.000577
|
"""
Markup class allows the use of easy-to-write characters to style the text
instead of using escape codes.
==text== --> reverse video
'''text''' --> bold
~~text~~ --> strikethrough
Copyright (c) 2015
makos <https://github.com/makos>, chibi <http://neetco.de/chibi>
under GNU GPL v3, see LICENSE for details
"""
import re
class Marker():
def esc(self, input_text):
input_text = input_text.replace('\033', '\\033')
return input_text
def demarkify(self, input_text):
"""Prints out a marked-up piece of text."""
output_text = self.esc(input_text)
# strikethrough
output_text = re.sub(
'~~(?P<substring>.*?)~~', '\033[0;9m\g<substring>\033[0m',
output_text)
# bold
output_text = re.sub(
'\'\'\'(?P<substring>.*?)\'\'\'', '\033[0;1m\g<substring>\033[0m',
output_text)
# rv
output_text = re.sub(
'==(?P<substring>.*?)==', '\033[0;7m\g<substring>\033[0m',
output_text)
return output_text
|
makos/sshchan-oop
|
chan_mark.py
|
Python
|
gpl-3.0
| 1,058
| 0.004726
|
from django.utils.six.moves.builtins import str
#
# django-atompub by James Tauber <http://jtauber.com/>
# http://code.google.com/p/django-atompub/
# An implementation of the Atom format and protocol for Django
#
# For instructions on how to use this module to generate Atom feeds,
# see http://code.google.com/p/django-atompub/wiki/UserGuide
#
#
# Copyright (c) 2007, James Tauber
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
from xml.sax.saxutils import XMLGenerator
from django.utils import timezone
GENERATOR_TEXT = 'django-atompub'
GENERATOR_ATTR = {
'uri': 'http://code.google.com/p/django-atompub/',
'version': 'r33'
}
# based on django.utils.xmlutils.SimplerXMLGenerator
class SimplerXMLGenerator(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None:
attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
# based on django.utils.feedgenerator.rfc3339_date
def rfc3339_date(date):
return '%s-%s-%sT%s:%s:%sZ' % (date.year, date.month, date.day, date.hour, date.minute, date.second)
# based on django.utils.feedgenerator.get_tag_uri
def get_tag_uri(url, date):
"Creates a TagURI. See http://diveintomark.org/archives/2004/05/28/howto-atom-id"
tag = re.sub('^http://', '', url)
if date is not None:
tag = re.sub('/', ',%s-%s-%s:/' % (date.year, date.month, date.day), tag, 1)
tag = re.sub('#', '/', tag)
return 'tag:' + tag
# based on django.contrib.syndication.feeds.Feed
class Feed(object):
VALIDATE = True
def __init__(self, slug, feed_url):
# @@@ slug and feed_url are not used yet
pass
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def get_feed(self, extra_params=None):
if extra_params:
try:
obj = self.get_object(extra_params.split('/'))
except (AttributeError, LookupError):
raise LookupError('Feed does not exist')
else:
obj = None
feed = AtomFeed(
atom_id=self.__get_dynamic_attr('feed_id', obj),
title=self.__get_dynamic_attr('feed_title', obj),
updated=self.__get_dynamic_attr('feed_updated', obj),
icon=self.__get_dynamic_attr('feed_icon', obj),
logo=self.__get_dynamic_attr('feed_logo', obj),
rights=self.__get_dynamic_attr('feed_rights', obj),
subtitle=self.__get_dynamic_attr('feed_subtitle', obj),
authors=self.__get_dynamic_attr('feed_authors', obj, default=[]),
categories=self.__get_dynamic_attr('feed_categories', obj, default=[]),
contributors=self.__get_dynamic_attr('feed_contributors', obj, default=[]),
links=self.__get_dynamic_attr('feed_links', obj, default=[]),
extra_attrs=self.__get_dynamic_attr('feed_extra_attrs', obj),
hide_generator=self.__get_dynamic_attr('hide_generator', obj, default=False)
)
items = self.__get_dynamic_attr('items', obj)
if items is None:
raise LookupError('Feed has no items field')
for item in items:
feed.add_item(
atom_id=self.__get_dynamic_attr('item_id', item),
title=self.__get_dynamic_attr('item_title', item),
updated=self.__get_dynamic_attr('item_updated', item),
content=self.__get_dynamic_attr('item_content', item),
published=self.__get_dynamic_attr('item_published', item),
rights=self.__get_dynamic_attr('item_rights', item),
source=self.__get_dynamic_attr('item_source', item),
summary=self.__get_dynamic_attr('item_summary', item),
authors=self.__get_dynamic_attr('item_authors', item, default=[]),
categories=self.__get_dynamic_attr('item_categories', item, default=[]),
contributors=self.__get_dynamic_attr('item_contributors', item, default=[]),
links=self.__get_dynamic_attr('item_links', item, default=[]),
extra_attrs=self.__get_dynamic_attr('item_extra_attrs', None, default={}),
)
if self.VALIDATE:
feed.validate()
return feed
class ValidationError(Exception):
pass
# based on django.utils.feedgenerator.SyndicationFeed and django.utils.feedgenerator.Atom1Feed
class AtomFeed(object):
mime_type = 'application/atom+xml'
ns = u'http://www.w3.org/2005/Atom'
def __init__(self, atom_id, title, updated=None, icon=None, logo=None, rights=None, subtitle=None, authors=None, categories=None, contributors=None, links=None, extra_attrs={}, hide_generator=False):
if atom_id is None:
raise LookupError('Feed has no feed_id field')
if title is None:
raise LookupError('Feed has no feed_title field')
# if updated == None, we'll calculate it
self.feed = {
'id': atom_id,
'title': title,
'updated': updated,
'icon': icon,
'logo': logo,
'rights': rights,
'subtitle': subtitle,
'authors': authors or [],
'categories': categories or [],
'contributors': contributors or [],
'links': links or [],
'extra_attrs': extra_attrs,
'hide_generator': hide_generator,
}
self.items = []
def add_item(self, atom_id, title, updated, content=None, published=None, rights=None, source=None, summary=None, authors=None, categories=None, contributors=None, links=None, extra_attrs={}):
if atom_id is None:
raise LookupError('Feed has no item_id method')
if title is None:
raise LookupError('Feed has no item_title method')
if updated is None:
raise LookupError('Feed has no item_updated method')
self.items.append({
'id': atom_id,
'title': title,
'updated': updated,
'content': content,
'published': published,
'rights': rights,
'source': source,
'summary': summary,
'authors': authors or [],
'categories': categories or [],
'contributors': contributors or [],
'links': links or [],
'extra_attrs': extra_attrs,
})
def latest_updated(self):
"""
Returns the latest item's updated or the current time if there are no items.
"""
updates = [item['updated'] for item in self.items]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return timezone.now() # @@@ really we should allow a feed to define its "start" for this case
def write_text_construct(self, handler, element_name, data):
if isinstance(data, tuple):
text_type, text = data
if text_type == 'xhtml':
handler.startElement(element_name, {'type': text_type})
handler._write(text) # write unescaped -- it had better be well-formed XML
handler.endElement(element_name)
else:
handler.addQuickElement(element_name, text, {'type': text_type})
else:
handler.addQuickElement(element_name, data)
def write_person_construct(self, handler, element_name, person):
handler.startElement(element_name, {})
handler.addQuickElement(u'name', person['name'])
if 'uri' in person:
handler.addQuickElement(u'uri', person['uri'])
if 'email' in person:
handler.addQuickElement(u'email', person['email'])
handler.endElement(element_name)
def write_link_construct(self, handler, link):
if 'length' in link:
link['length'] = str(link['length'])
handler.addQuickElement(u'link', None, link)
def write_category_construct(self, handler, category):
handler.addQuickElement(u'category', None, category)
def write_source(self, handler, data):
handler.startElement(u'source', {})
if data.get('id'):
handler.addQuickElement(u'id', data['id'])
if data.get('title'):
self.write_text_construct(handler, u'title', data['title'])
if data.get('subtitle'):
self.write_text_construct(handler, u'subtitle', data['subtitle'])
if data.get('icon'):
handler.addQuickElement(u'icon', data['icon'])
if data.get('logo'):
handler.addQuickElement(u'logo', data['logo'])
if data.get('updated'):
handler.addQuickElement(u'updated', rfc3339_date(data['updated']))
for category in data.get('categories', []):
self.write_category_construct(handler, category)
for link in data.get('links', []):
self.write_link_construct(handler, link)
for author in data.get('authors', []):
self.write_person_construct(handler, u'author', author)
for contributor in data.get('contributors', []):
self.write_person_construct(handler, u'contributor', contributor)
if data.get('rights'):
self.write_text_construct(handler, u'rights', data['rights'])
handler.endElement(u'source')
def write_content(self, handler, data):
if isinstance(data, tuple):
content_dict, text = data
if content_dict.get('type') == 'xhtml':
handler.startElement(u'content', content_dict)
handler._write(text) # write unescaped -- it had better be well-formed XML
handler.endElement(u'content')
else:
handler.addQuickElement(u'content', text, content_dict)
else:
handler.addQuickElement(u'content', data)
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
feed_attrs = {u'xmlns': self.ns}
if self.feed.get('extra_attrs'):
feed_attrs.update(self.feed['extra_attrs'])
handler.startElement(u'feed', feed_attrs)
handler.addQuickElement(u'id', self.feed['id'])
self.write_text_construct(handler, u'title', self.feed['title'])
if self.feed.get('subtitle'):
self.write_text_construct(handler, u'subtitle', self.feed['subtitle'])
if self.feed.get('icon'):
handler.addQuickElement(u'icon', self.feed['icon'])
if self.feed.get('logo'):
handler.addQuickElement(u'logo', self.feed['logo'])
if self.feed['updated']:
handler.addQuickElement(u'updated', rfc3339_date(self.feed['updated']))
else:
handler.addQuickElement(u'updated', rfc3339_date(self.latest_updated()))
for category in self.feed['categories']:
self.write_category_construct(handler, category)
for link in self.feed['links']:
self.write_link_construct(handler, link)
for author in self.feed['authors']:
self.write_person_construct(handler, u'author', author)
for contributor in self.feed['contributors']:
self.write_person_construct(handler, u'contributor', contributor)
if self.feed.get('rights'):
self.write_text_construct(handler, u'rights', self.feed['rights'])
if not self.feed.get('hide_generator'):
handler.addQuickElement(u'generator', GENERATOR_TEXT, GENERATOR_ATTR)
self.write_items(handler)
handler.endElement(u'feed')
def write_items(self, handler):
for item in self.items:
entry_attrs = item.get('extra_attrs', {})
handler.startElement(u'entry', entry_attrs)
handler.addQuickElement(u'id', item['id'])
self.write_text_construct(handler, u'title', item['title'])
handler.addQuickElement(u'updated', rfc3339_date(item['updated']))
if item.get('published'):
handler.addQuickElement(u'published', rfc3339_date(item['published']))
if item.get('rights'):
self.write_text_construct(handler, u'rights', item['rights'])
if item.get('source'):
self.write_source(handler, item['source'])
for author in item['authors']:
self.write_person_construct(handler, u'author', author)
for contributor in item['contributors']:
self.write_person_construct(handler, u'contributor', contributor)
for category in item['categories']:
self.write_category_construct(handler, category)
for link in item['links']:
self.write_link_construct(handler, link)
if item.get('summary'):
self.write_text_construct(handler, u'summary', item['summary'])
if item.get('content'):
self.write_content(handler, item['content'])
handler.endElement(u'entry')
def validate(self):
def validate_text_construct(obj):
if isinstance(obj, tuple):
if obj[0] not in ['text', 'html', 'xhtml']:
return False
# @@@ no validation is done that 'html' text constructs are valid HTML
# @@@ no validation is done that 'xhtml' text constructs are well-formed XML or valid XHTML
return True
if not validate_text_construct(self.feed['title']):
raise ValidationError('feed title has invalid type')
if self.feed.get('subtitle'):
if not validate_text_construct(self.feed['subtitle']):
raise ValidationError('feed subtitle has invalid type')
if self.feed.get('rights'):
if not validate_text_construct(self.feed['rights']):
raise ValidationError('feed rights has invalid type')
alternate_links = {}
for link in self.feed.get('links'):
if link.get('rel') == 'alternate' or link.get('rel') is None:
key = (link.get('type'), link.get('hreflang'))
if key in alternate_links:
raise ValidationError('alternate links must have unique type/hreflang')
alternate_links[key] = link
if self.feed.get('authors'):
feed_author = True
else:
feed_author = False
for item in self.items:
if not feed_author and not item.get('authors'):
if item.get('source') and item['source'].get('authors'):
pass
else:
raise ValidationError('if no feed author, all entries must have author (possibly in source)')
if not validate_text_construct(item['title']):
raise ValidationError('entry title has invalid type')
if item.get('rights'):
if not validate_text_construct(item['rights']):
raise ValidationError('entry rights has invalid type')
if item.get('summary'):
if not validate_text_construct(item['summary']):
raise ValidationError('entry summary has invalid type')
source = item.get('source')
if source:
if source.get('title'):
if not validate_text_construct(source['title']):
raise ValidationError('source title has invalid type')
if source.get('subtitle'):
if not validate_text_construct(source['subtitle']):
raise ValidationError('source subtitle has invalid type')
if source.get('rights'):
if not validate_text_construct(source['rights']):
raise ValidationError('source rights has invalid type')
alternate_links = {}
for link in item.get('links'):
if link.get('rel') == 'alternate' or link.get('rel') is None:
key = (link.get('type'), link.get('hreflang'))
if key in alternate_links:
raise ValidationError('alternate links must have unique type/hreflang')
alternate_links[key] = link
if not item.get('content'):
if not alternate_links:
raise ValidationError('if no content, entry must have alternate link')
if item.get('content') and isinstance(item.get('content'), tuple):
content_type = item.get('content')[0].get('type')
if item.get('content')[0].get('src'):
if item.get('content')[1]:
raise ValidationError('content with src should be empty')
if not item.get('summary'):
raise ValidationError('content with src requires a summary too')
if content_type in ['text', 'html', 'xhtml']:
raise ValidationError('content with src cannot have type of text, html or xhtml')
if content_type:
if '/' in content_type and not content_type.startswith('text/') and not content_type.endswith('/xml') and not content_type.endswith('+xml') and content_type not in ['application/xml-external-parsed-entity', 'application/xml-dtd']:
# @@@ check content is Base64
if not item.get('summary'):
raise ValidationError('content in Base64 requires a summary too')
if content_type not in ['text', 'html', 'xhtml'] and '/' not in content_type:
raise ValidationError('content type does not appear to be valid')
# @@@ no validation is done that 'html' text constructs are valid HTML
# @@@ no validation is done that 'xhtml' text constructs are well-formed XML or valid XHTML
return
return
class LegacySyndicationFeed(AtomFeed):
"""
Provides an SyndicationFeed-compatible interface in its __init__ and
add_item but is really a new AtomFeed object.
"""
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None):
atom_id = link
title = title
updated = None # will be calculated
rights = feed_copyright
subtitle = subtitle
author_dict = {'name': author_name}
if author_link:
author_dict['uri'] = author_link
if author_email:
author_dict['email'] = author_email
authors = [author_dict]
if categories:
categories = [{'term': term} for term in categories]
links = [{'rel': 'alternate', 'href': link}]
if feed_url:
links.append({'rel': 'self', 'href': feed_url})
if language:
extra_attrs = {'xml:lang': language}
else:
extra_attrs = {}
# description ignored (as with Atom1Feed)
AtomFeed.__init__(
self, atom_id, title, updated, rights=rights, subtitle=subtitle,
authors=authors, categories=categories, links=links, extra_attrs=extra_attrs)
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=None, item_copyright=None):
if unique_id:
atom_id = unique_id
else:
atom_id = get_tag_uri(link, pubdate)
title = title
updated = pubdate
if item_copyright:
rights = item_copyright
else:
rights = None
if description:
summary = 'html', description
else:
summary = None
author_dict = {'name': author_name}
if author_link:
author_dict['uri'] = author_link
if author_email:
author_dict['email'] = author_email
authors = [author_dict]
categories = [{'term': term} for term in categories]
links = [{'rel': 'alternate', 'href': link}]
if enclosure:
links.append({'rel': 'enclosure', 'href': enclosure.url, 'length': enclosure.length, 'type': enclosure.mime_type})
AtomFeed.add_item(
self, atom_id, title, updated, rights=rights, summary=summary,
authors=authors, categories=categories, links=links)
|
nharsch/django-scheduler
|
schedule/feeds/atom.py
|
Python
|
bsd-3-clause
| 22,525
| 0.002042
|
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ChemLab
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import chemlab.gromacs_topology
class TestTopologyReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.topol_file = 'topol.top'
cls.gt = chemlab.gromacs_topology.GromacsTopology(cls.topol_file, generate_exclusions=True)
cls.gt.read()
def test_replicated_molecules(self):
"""Test the molecule replication"""
total_nr_atoms = len(self.gt.atoms)
expected_nr_atoms = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_atoms = len(self.gt.gt.molecules_data[mol_name]['atoms'])
expected_nr_atoms += nmols * mol_atoms
self.assertEqual(total_nr_atoms, expected_nr_atoms)
total_nr_bonds = len(self.gt.bonds)
expected_nr_bonds = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_bonds = len(self.gt.gt.molecules_data[mol_name].get('bonds', []))
expected_nr_bonds += nmols * mol_bonds
self.assertEqual(total_nr_bonds, expected_nr_bonds)
total_nr_angles = len(self.gt.angles)
expected_nr_angles = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_angles = len(self.gt.gt.molecules_data[mol_name].get('angles', []))
expected_nr_angles += nmols * mol_angles
self.assertEqual(total_nr_angles, expected_nr_angles)
total_nr_dihedrals = len(self.gt.dihedrals)
expected_nr_dihedrals = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_dihedrals = len(self.gt.gt.molecules_data[mol_name].get('dihedrals',[]))
expected_nr_dihedrals += nmols * mol_dihedrals
self.assertEqual(total_nr_dihedrals, expected_nr_dihedrals)
total_nr_pairs = len(self.gt.pairs)
expected_nr_pairs = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_pairs = len(self.gt.gt.molecules_data[mol_name].get('pairs', []))
expected_nr_pairs += nmols * mol_pairs
self.assertEqual(total_nr_pairs, expected_nr_pairs)
if __name__ == '__main__':
unittest.main()
|
cgchemlab/chemlab
|
src/tests/test_topology_reader.py
|
Python
|
gpl-3.0
| 2,938
| 0.003744
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Bespin.
#
# The Initial Developer of the Original Code is
# Mozilla.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.5 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 9 Mar 2007
Slightly modified to eliminate the deprecationwarning for the md5 module.
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison
# sent me a patch for this.
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib
__version__ = '2.2'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.new("md5")
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
|
samn/spectral-workbench
|
webserver/public/lib/bespin-0.9a2/lib/dryice/path.py
|
Python
|
gpl-3.0
| 33,721
| 0.000919
|
# Copyright 2014-2015 Rumma & Ko Ltd
#
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""Defines "parency links" between two "persons", and a user interface
to manage them.
This module is probably useful in combination with
:mod:`lino_xl.lib.households`.
.. autosummary::
:toctree:
choicelists
models
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"Extends :class:`lino.core.plugin.Plugin`."
verbose_name = _("Parency links")
## settings
person_model = 'contacts.Person'
"""
A string referring to the model which represents a human in your
application. Default value is ``'contacts.Person'`` (referring to
:class:`lino_xl.lib.contacts.Person`).
"""
def on_site_startup(self, site):
self.person_model = site.models.resolve(self.person_model)
super(Plugin, self).on_site_startup(site)
def setup_explorer_menu(self, site, user_type, m):
# mg = site.plugins.contacts
mg = site.plugins[self.person_model._meta.app_label]
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('humanlinks.Links')
m.add_action('humanlinks.LinkTypes')
|
lino-framework/xl
|
lino_xl/lib/humanlinks/__init__.py
|
Python
|
bsd-2-clause
| 1,211
| 0.004129
|
from investor_lifespan_model.investor import Investor
from investor_lifespan_model.market import Market
from investor_lifespan_model.insurer import Insurer
from investor_lifespan_model.lifespan_model import LifespanModel
from investor_lifespan_model.mortality_data import π, G, tf
|
moehle/investor_lifespan_model
|
investor_lifespan_model/__init__.py
|
Python
|
mit
| 282
| 0
|
RegObj.dll is an ActiveX server--and, hence, has an automation interface--that is available with documentation in
the distribution file known as RegObji.exe, from the following page:
http://msdn.microsoft.com/vbasic/downloads/addins.asp
To provide early binding for RegObj use
>>> from win32com.client import gencache
>>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0)
or the MakePy utility within PythonWin, referring to "Regstration Manipulation Classes (1.0)" (Please notice
the spelling error.)
Sample use, to determine what command is associated with a Python file:
>>> from win32com.client import Dispatch, gencache
>>> from win32con import HKEY_CLASSES_ROOT
>>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0)
>>> regobj = Dispatch ( 'RegObj.Registry' )
>>> HKCR = regobj.RegKeyFromHKey ( HKEY_CLASSES_ROOT )
>>> PythonFileKey = HKCR.ParseKeyName('Python.File\Shell\Open\command')
>>> PythonFileKey.Value
u'J:\\Python22\\pythonw.exe "%1" %*'
|
ActiveState/code
|
recipes/Python/137551_Using_RegObj_Automatiaccess_MSW/recipe-137551.py
|
Python
|
mit
| 1,024
| 0.03125
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
'name' : 'Displays product in hr analytic timesheet'
, 'version' : '0.7'
, 'category' : 'HR'
, 'description' : """
This module displays the hidden field product_id
"""
, 'author' : 'ChriCar Beteiligungs- und Beratungs- GmbH'
, 'depends' : ['hr_timesheet' ]
, 'data' : ['hr_timesheet_product.xml']
, 'demo_xml' : []
, 'installable': False
, 'active' : False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
VitalPet/c2c-rd-addons
|
hr_timesheet_product/__openerp__.py
|
Python
|
agpl-3.0
| 1,555
| 0.020579
|
from django.core.management.base import BaseCommand, CommandError
from geography.models import Place
class Command(BaseCommand):
help = u"""Add new places"""
usage_str = 'USAGE: ./manage.py add_places map_name STATE|CITY|RIVER|LAKE|... [difficulty]'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(self.usage_str)
if not args[1] in Place.PLACE_TYPE_SLUGS:
raise CommandError(self.usage_str)
place_type = self.Place.PLACE_TYPE_SLUGS[args[1]]
map_name = args[0]
state_file = open(map_name.lower() + ".txt")
states = state_file.read()
ss = states.split("\n")
for s in ss:
place = s.split("\t")
if(len(place) == 2):
name = place[1]
code = place[0]
p = Place(code=code, name=name, difficulty=500, type=place_type)
p.save()
self.stdout.write(name + " added")
|
slaweet/autoskola
|
main/geography/management/commands/add_places.py
|
Python
|
mit
| 985
| 0.00203
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_nyt', '0006_auto_20141229_1630'),
('wiki', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ArticleSubscription',
fields=[
('articleplugin_ptr', models.OneToOneField(auto_created=True, to='wiki.ArticlePlugin', primary_key=True, parent_link=True, serialize=False, on_delete=models.CASCADE)),
('subscription', models.OneToOneField(to='django_nyt.Subscription', on_delete=models.CASCADE)),
],
options={
},
bases=('wiki.articleplugin',),
),
migrations.AlterUniqueTogether(
name='articlesubscription',
unique_together=set([('subscription', 'articleplugin_ptr')]),
),
]
|
floemker/django-wiki
|
src/wiki/plugins/notifications/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 881
| 0.00227
|
"""
Support for Vera cover - curtains, rollershutters etc.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.vera/
"""
import logging
from homeassistant.components.cover import CoverDevice
from homeassistant.components.vera import (
VeraDevice, VERA_DEVICES, VERA_CONTROLLER)
DEPENDENCIES = ['vera']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Find and return Vera covers."""
add_devices_callback(
VeraCover(device, VERA_CONTROLLER) for
device in VERA_DEVICES['cover'])
# pylint: disable=abstract-method
class VeraCover(VeraDevice, CoverDevice):
"""Represents a Vera Cover in Home Assistant."""
def __init__(self, vera_device, controller):
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller)
@property
def current_cover_position(self):
"""
Return current position of cover.
0 is closed, 100 is fully open.
"""
position = self.vera_device.get_level()
if position <= 5:
return 0
if position >= 95:
return 100
return position
def set_cover_position(self, position, **kwargs):
"""Move the cover to a specific position."""
self.vera_device.set_level(position)
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is not None:
if self.current_cover_position > 0:
return False
else:
return True
def open_cover(self, **kwargs):
"""Open the cover."""
self.vera_device.open()
def close_cover(self, **kwargs):
"""Close the cover."""
self.vera_device.close()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.vera_device.stop()
|
Smart-Torvy/torvy-home-assistant
|
homeassistant/components/cover/vera.py
|
Python
|
mit
| 1,970
| 0
|
# -*- coding: utf-8 -*-
"""
@file costMemory.py
@author Jakob Erdmann
@author Michael Behrisch
@date 2012-03-14
@version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $
Perform smoothing of edge costs across successive iterations of duaIterate
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from xml.sax import saxutils, make_parser, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from sumolib.net import readNet
class EdgeMemory:
def __init__(self, cost):
self.cost = cost
self.seen = True
def update(self, cost, memory_weight, new_weight, pessimism):
p = (cost / self.cost) ** pessimism if self.cost > 0 else 1
memory_factor = memory_weight / (memory_weight + new_weight * p)
self.cost = self.cost * memory_factor + cost * (1 - memory_factor)
self.seen = True
class CostMemory(handler.ContentHandler):
# memorize the weighted average of edge costs
def __init__(self, cost_attribute, pessimism=0, network_file=None):
# the cost attribute to parse (i.e. 'traveltime')
self.cost_attribute = cost_attribute.encode('utf8')
# the duaIterate iteration index
self.iteration = None
# the main data store: for every interval and edge id we store costs and
# whether data was seen in the last call of load_costs()
# start -> (edge_id -> EdgeMemory)
self.intervals = defaultdict(dict)
# the intervall length (only known for certain if multiple intervals
# have been seen)
self.interval_length = 214748 # SUMOTIME_MAXSTRING
# the intervall currently being parsed
self.current_interval = None
# the combined weigth of all previously loaded costs
self.memory_weight = 0.0
# update is done according to: memory * memory_factor + new * (1 -
# memory_factor)
self.memory_factor = None
# differences between the previously loaded costs and the memorized
# costs
self.errors = None
# some statistics
self.num_loaded = 0
self.num_decayed = 0
# travel times without obstructing traffic
# XXX could use the minimum known traveltime
self.traveltime_free = defaultdict(lambda: 0)
if network_file is not None:
# build a map of default weights for decaying edges assuming the
# attribute is traveltime
self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed())
for e in readNet(network_file).getEdges()])
self.pessimism = pessimism
def startElement(self, name, attrs):
if name == 'interval':
self.current_interval = self.intervals[float(attrs['begin'])]
if name == 'edge':
id = attrs['id']
# may be missing for some
if self.cost_attribute.decode('utf-8') in attrs:
self.num_loaded += 1
cost = float(attrs[self.cost_attribute.decode('utf-8')])
if id in self.current_interval:
edgeMemory = self.current_interval[id]
self.errors.append(edgeMemory.cost - cost)
edgeMemory.update(
cost, self.memory_weight, self.new_weight, self.pessimism)
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, edgeMemory.cost, file=f)
else:
self.errors.append(0)
self.current_interval[id] = EdgeMemory(cost)
def load_costs(self, dumpfile, iteration, weight):
# load costs from dumpfile and update memory according to weight and
# iteration
if weight <= 0:
sys.stderr.write(
"Skipped loading of costs because the weight was %s but should have been > 0\n" % weight)
return
assert(weight > 0)
if self.iteration == None and iteration != 0:
print("Warning: continuing with empty memory")
# update memory weights. memory is a weighted average across all runs
self.new_weight = float(weight)
self.iteration = iteration
self.errors = []
# mark all edges as unseen
for edges in self.intervals.values():
for edgeMemory in edges.values():
edgeMemory.seen = False
# parse costs
self.num_loaded = 0
parser = make_parser()
parser.setContentHandler(self)
parser.parse(dumpfile)
# decay costs of unseen edges
self.num_decayed = 0
for edges in self.intervals.values():
for id, edgeMemory in edges.items():
if not edgeMemory.seen:
edgeMemory.update(
self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism)
self.num_decayed += 1
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, 'decay', edgeMemory.cost, file=f)
# figure out the interval length
if len(self.intervals.keys()) > 1:
sorted_begin_times = sorted(self.intervals.keys())
self.interval_length = sorted_begin_times[
1] - sorted_begin_times[0]
self.memory_weight += self.new_weight
def write_costs(self, weight_file):
with open(weight_file, 'w') as f:
f.write('<netstats>\n')
for start, edge_costs in self.intervals.items():
f.write(' <interval begin="%d" end="%d">\n' %
(start, start + self.interval_length))
for id, edgeMemory in edge_costs.items():
f.write(' <edge id="%s" %s="%s"/>\n' %
(id, self.cost_attribute.decode('utf-8'), edgeMemory.cost))
f.write(' </interval>\n')
f.write('</netstats>\n')
def avg_error(self, values=None):
if not values:
values = self.errors
l = len(list(values))
if l > 0:
return (sum(list(values)) / l)
else:
return 0
def avg_abs_error(self):
return self.avg_error(list(map(abs, self.errors)))
def mean_error(self, values=None):
if not values:
values = self.errors
values.sort()
if values:
return values[len(values) // 2]
def mean_abs_error(self):
return self.mean_error(list(map(abs, self.errors)))
def loaded(self):
return self.num_loaded
def decayed(self):
return self.num_decayed
|
702nADOS/sumo
|
tools/assign/costMemory.py
|
Python
|
gpl-3.0
| 7,283
| 0.00151
|
#<ImportSpecificModules>
import ShareYourSystem as SYS
#</ImportSpecificModules>
#print(SYS.SumClass().insert('Parameter').hdfview().HdformatedConsoleStr)
#print(SYS.SumClass().insert('Result').hdfview().HdformatedConsoleStr)
#print(SYS.Sum.attest_insert())
#print(SYS.Sum.attest_retrieve())
#print(SYS.Sum.attest_find())
#print(SYS.Sum.attest_recover())
#print(SYS.Sum.attest_scan())
|
Ledoux/ShareYourSystem
|
Pythonlogy/ShareYourSystem/Standards/Tutorials/Sumer/Test.py
|
Python
|
mit
| 388
| 0.025773
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2012 The Chromium OS Authors.
#
"""Tests for the dtb_platdata module
This includes unit tests for some functions and functional tests for the dtoc
tool.
"""
import collections
import os
import struct
import sys
import tempfile
import unittest
from dtoc import dtb_platdata
from dtb_platdata import conv_name_to_c
from dtb_platdata import get_compat_name
from dtb_platdata import get_value
from dtb_platdata import tab_to
from dtoc import fdt
from dtoc import fdt_util
from patman import test_util
from patman import tools
our_path = os.path.dirname(os.path.realpath(__file__))
HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
#include <stdbool.h>
#include <linux/libfdt.h>'''
C_HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
/* Allow use of U_BOOT_DEVICE() in this file */
#define DT_PLATDATA_C
#include <common.h>
#include <dm.h>
#include <dt-structs.h>
'''
C_EMPTY_POPULATE_PHANDLE_DATA = '''void dm_populate_phandle_data(void) {
}
'''
def get_dtb_file(dts_fname, capture_stderr=False):
"""Compile a .dts file to a .dtb
Args:
dts_fname: Filename of .dts file in the current directory
capture_stderr: True to capture and discard stderr output
Returns:
Filename of compiled file in output directory
"""
return fdt_util.EnsureCompiled(os.path.join(our_path, dts_fname),
capture_stderr=capture_stderr)
class TestDtoc(unittest.TestCase):
"""Tests for dtoc"""
@classmethod
def setUpClass(cls):
tools.PrepareOutputDir(None)
cls.maxDiff = None
@classmethod
def tearDownClass(cls):
tools._RemoveOutputDir()
def _WritePythonString(self, fname, data):
"""Write a string with tabs expanded as done in this Python file
Args:
fname: Filename to write to
data: Raw string to convert
"""
data = data.replace('\t', '\\t')
with open(fname, 'w') as fd:
fd.write(data)
def _CheckStrings(self, expected, actual):
"""Check that a string matches its expected value
If the strings do not match, they are written to the /tmp directory in
the same Python format as is used here in the test. This allows for
easy comparison and update of the tests.
Args:
expected: Expected string
actual: Actual string
"""
if expected != actual:
self._WritePythonString('/tmp/binman.expected', expected)
self._WritePythonString('/tmp/binman.actual', actual)
print('Failures written to /tmp/binman.{expected,actual}')
self.assertEquals(expected, actual)
def run_test(self, args, dtb_file, output):
dtb_platdata.run_steps(args, dtb_file, False, output, True)
def test_name(self):
"""Test conversion of device tree names to C identifiers"""
self.assertEqual('serial_at_0x12', conv_name_to_c('serial@0x12'))
self.assertEqual('vendor_clock_frequency',
conv_name_to_c('vendor,clock-frequency'))
self.assertEqual('rockchip_rk3399_sdhci_5_1',
conv_name_to_c('rockchip,rk3399-sdhci-5.1'))
def test_tab_to(self):
"""Test operation of tab_to() function"""
self.assertEqual('fred ', tab_to(0, 'fred'))
self.assertEqual('fred\t', tab_to(1, 'fred'))
self.assertEqual('fred was here ', tab_to(1, 'fred was here'))
self.assertEqual('fred was here\t\t', tab_to(3, 'fred was here'))
self.assertEqual('exactly8 ', tab_to(1, 'exactly8'))
self.assertEqual('exactly8\t', tab_to(2, 'exactly8'))
def test_get_value(self):
"""Test operation of get_value() function"""
self.assertEqual('0x45',
get_value(fdt.TYPE_INT, struct.pack('>I', 0x45)))
self.assertEqual('0x45',
get_value(fdt.TYPE_BYTE, struct.pack('<I', 0x45)))
self.assertEqual('0x0',
get_value(fdt.TYPE_BYTE, struct.pack('>I', 0x45)))
self.assertEqual('"test"', get_value(fdt.TYPE_STRING, 'test'))
self.assertEqual('true', get_value(fdt.TYPE_BOOL, None))
def test_get_compat_name(self):
"""Test operation of get_compat_name() function"""
Prop = collections.namedtuple('Prop', ['value'])
Node = collections.namedtuple('Node', ['props'])
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1', 'arasan_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1', 'third'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1',
'arasan_sdhci_5_1', 'third']),
get_compat_name(node))
def test_empty_file(self):
"""Test output from a device tree file with no nodes"""
dtb_file = get_dtb_file('dtoc_test_empty.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
lines = infile.read().splitlines()
self.assertEqual(HEADER.splitlines(), lines)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
lines = infile.read().splitlines()
self.assertEqual(C_HEADER.splitlines() + [''] +
C_EMPTY_POPULATE_PHANDLE_DATA.splitlines(), lines)
def test_simple(self):
"""Test output from some simple nodes with various types of data"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_i2c_test {
};
struct dtd_sandbox_pmic_test {
\tbool\t\tlow_power;
\tfdt64_t\t\treg[2];
};
struct dtd_sandbox_spl_test {
\tconst char * acpi_name;
\tbool\t\tboolval;
\tunsigned char\tbytearray[3];
\tunsigned char\tbyteval;
\tfdt32_t\t\tintarray[4];
\tfdt32_t\t\tintval;
\tunsigned char\tlongbytearray[9];
\tunsigned char\tnotstring[5];
\tconst char *\tstringarray[3];
\tconst char *\tstringval;
};
struct dtd_sandbox_spl_test_2 {
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /i2c@0 index 0 */
static struct dtd_sandbox_i2c_test dtv_i2c_at_0 = {
};
U_BOOT_DEVICE(i2c_at_0) = {
\t.name\t\t= "sandbox_i2c_test",
\t.platdata\t= &dtv_i2c_at_0,
\t.platdata_size\t= sizeof(dtv_i2c_at_0),
\t.parent_idx\t= -1,
};
/* Node /i2c@0/pmic@9 index 1 */
static struct dtd_sandbox_pmic_test dtv_pmic_at_9 = {
\t.low_power\t\t= true,
\t.reg\t\t\t= {0x9, 0x0},
};
U_BOOT_DEVICE(pmic_at_9) = {
\t.name\t\t= "sandbox_pmic_test",
\t.platdata\t= &dtv_pmic_at_9,
\t.platdata_size\t= sizeof(dtv_pmic_at_9),
\t.parent_idx\t= 0,
};
/* Node /spl-test index 2 */
static struct dtd_sandbox_spl_test dtv_spl_test = {
\t.boolval\t\t= true,
\t.bytearray\t\t= {0x6, 0x0, 0x0},
\t.byteval\t\t= 0x5,
\t.intarray\t\t= {0x2, 0x3, 0x4, 0x0},
\t.intval\t\t\t= 0x1,
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
\t\t0x11},
\t.notstring\t\t= {0x20, 0x21, 0x22, 0x10, 0x0},
\t.stringarray\t\t= {"multi-word", "message", ""},
\t.stringval\t\t= "message",
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
/* Node /spl-test2 index 3 */
static struct dtd_sandbox_spl_test dtv_spl_test2 = {
\t.acpi_name\t\t= "\\\\_SB.GPO0",
\t.bytearray\t\t= {0x1, 0x23, 0x34},
\t.byteval\t\t= 0x8,
\t.intarray\t\t= {0x5, 0x0, 0x0, 0x0},
\t.intval\t\t\t= 0x3,
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0x0, 0x0, 0x0, 0x0,
\t\t0x0},
\t.stringarray\t\t= {"another", "multi-word", "message"},
\t.stringval\t\t= "message2",
};
U_BOOT_DEVICE(spl_test2) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test2,
\t.platdata_size\t= sizeof(dtv_spl_test2),
\t.parent_idx\t= -1,
};
/* Node /spl-test3 index 4 */
static struct dtd_sandbox_spl_test dtv_spl_test3 = {
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
\t\t0x0},
\t.stringarray\t\t= {"one", "", ""},
};
U_BOOT_DEVICE(spl_test3) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test3,
\t.platdata_size\t= sizeof(dtv_spl_test3),
\t.parent_idx\t= -1,
};
/* Node /spl-test4 index 5 */
static struct dtd_sandbox_spl_test_2 dtv_spl_test4 = {
};
U_BOOT_DEVICE(spl_test4) = {
\t.name\t\t= "sandbox_spl_test_2",
\t.platdata\t= &dtv_spl_test4,
\t.platdata_size\t= sizeof(dtv_spl_test4),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_driver_alias(self):
"""Test output from a device tree file with a driver alias"""
dtb_file = get_dtb_file('dtoc_test_driver_alias.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_gpio {
\tconst char *\tgpio_bank_name;
\tbool\t\tgpio_controller;
\tfdt32_t\t\tsandbox_gpio_count;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /gpios@0 index 0 */
static struct dtd_sandbox_gpio dtv_gpios_at_0 = {
\t.gpio_bank_name\t\t= "a",
\t.gpio_controller\t= true,
\t.sandbox_gpio_count\t= 0x14,
};
U_BOOT_DEVICE(gpios_at_0) = {
\t.name\t\t= "sandbox_gpio",
\t.platdata\t= &dtv_gpios_at_0,
\t.platdata_size\t= sizeof(dtv_gpios_at_0),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_invalid_driver(self):
"""Test output from a device tree file with an invalid driver"""
dtb_file = get_dtb_file('dtoc_test_invalid_driver.dts')
output = tools.GetOutputFilename('output')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_invalid {
};
''', data)
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /spl-test index 0 */
static struct dtd_invalid dtv_spl_test = {
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "invalid",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle(self):
"""Test output from a node containing a phandle reference"""
dtb_file = get_dtb_file('dtoc_test_phandle.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_source {
\tstruct phandle_2_arg clocks[4];
};
struct dtd_target {
\tfdt32_t\t\tintval;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle2-target index 0 */
static struct dtd_target dtv_phandle2_target = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(phandle2_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle2_target,
\t.platdata_size\t= sizeof(dtv_phandle2_target),
\t.parent_idx\t= -1,
};
/* Node /phandle3-target index 1 */
static struct dtd_target dtv_phandle3_target = {
\t.intval\t\t\t= 0x2,
};
U_BOOT_DEVICE(phandle3_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle3_target,
\t.platdata_size\t= sizeof(dtv_phandle3_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-target index 4 */
static struct dtd_target dtv_phandle_target = {
\t.intval\t\t\t= 0x0,
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source index 2 */
static struct dtd_source dtv_phandle_source = {
\t.clocks\t\t\t= {
\t\t\t{4, {}},
\t\t\t{0, {11}},
\t\t\t{1, {12, 13}},
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source,
\t.platdata_size\t= sizeof(dtv_phandle_source),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 3 */
static struct dtd_source dtv_phandle_source2 = {
\t.clocks\t\t\t= {
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_single(self):
"""Test output from a node containing a phandle reference"""
dtb_file = get_dtb_file('dtoc_test_phandle_single.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_source {
\tstruct phandle_0_arg clocks[1];
};
struct dtd_target {
\tfdt32_t\t\tintval;
};
''', data)
def test_phandle_reorder(self):
"""Test that phandle targets are generated before their references"""
dtb_file = get_dtb_file('dtoc_test_phandle_reorder.dts')
output = tools.GetOutputFilename('output')
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle-target index 1 */
static struct dtd_target dtv_phandle_target = {
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 0 */
static struct dtd_source dtv_phandle_source2 = {
\t.clocks\t\t\t= {
\t\t\t{1, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_cd_gpio(self):
"""Test that phandle targets are generated when unsing cd-gpios"""
dtb_file = get_dtb_file('dtoc_test_phandle_cd_gpios.dts')
output = tools.GetOutputFilename('output')
dtb_platdata.run_steps(['platdata'], dtb_file, False, output, True)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle2-target index 0 */
static struct dtd_target dtv_phandle2_target = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(phandle2_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle2_target,
\t.platdata_size\t= sizeof(dtv_phandle2_target),
\t.parent_idx\t= -1,
};
/* Node /phandle3-target index 1 */
static struct dtd_target dtv_phandle3_target = {
\t.intval\t\t\t= 0x2,
};
U_BOOT_DEVICE(phandle3_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle3_target,
\t.platdata_size\t= sizeof(dtv_phandle3_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-target index 4 */
static struct dtd_target dtv_phandle_target = {
\t.intval\t\t\t= 0x0,
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source index 2 */
static struct dtd_source dtv_phandle_source = {
\t.cd_gpios\t\t= {
\t\t\t{4, {}},
\t\t\t{0, {11}},
\t\t\t{1, {12, 13}},
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source,
\t.platdata_size\t= sizeof(dtv_phandle_source),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 3 */
static struct dtd_source dtv_phandle_source2 = {
\t.cd_gpios\t\t= {
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_bad(self):
"""Test a node containing an invalid phandle fails"""
dtb_file = get_dtb_file('dtoc_test_phandle_bad.dts',
capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Cannot parse 'clocks' in node 'phandle-source'",
str(e.exception))
def test_phandle_bad2(self):
"""Test a phandle target missing its #*-cells property"""
dtb_file = get_dtb_file('dtoc_test_phandle_bad2.dts',
capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'phandle-target' has no cells property",
str(e.exception))
def test_addresses64(self):
"""Test output from a node with a 'reg' property with na=2, ns=2"""
dtb_file = get_dtb_file('dtoc_test_addr64.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses32(self):
"""Test output from a node with a 'reg' property with na=1, ns=1"""
dtb_file = get_dtb_file('dtoc_test_addr32.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt32_t\t\treg[2];
};
struct dtd_test2 {
\tfdt32_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x12345678, 0x98765432, 0x2, 0x3},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses64_32(self):
"""Test output from a node with a 'reg' property with na=2, ns=1"""
dtb_file = get_dtb_file('dtoc_test_addr64_32.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x123400000000, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x1234567890123456, 0x98765432},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x1234567890123456, 0x98765432, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses32_64(self):
"""Test output from a node with a 'reg' property with na=1, ns=2"""
dtb_file = get_dtb_file('dtoc_test_addr32_64.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x567800000000},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x12345678, 0x9876543210987654},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x12345678, 0x9876543210987654, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_bad_reg(self):
"""Test that a reg property with an invalid type generates an error"""
# Capture stderr since dtc will emit warnings for this file
dtb_file = get_dtb_file('dtoc_test_bad_reg.dts', capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'spl-test' reg property is not an int",
str(e.exception))
def test_bad_reg2(self):
"""Test that a reg property with an invalid cell count is detected"""
# Capture stderr since dtc will emit warnings for this file
dtb_file = get_dtb_file('dtoc_test_bad_reg2.dts', capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'spl-test' reg property has 3 cells which is not a multiple of na + ns = 1 + 1)",
str(e.exception))
def test_add_prop(self):
"""Test that a subequent node can add a new property to a struct"""
dtb_file = get_dtb_file('dtoc_test_add_prop.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_spl_test {
\tfdt32_t\t\tintarray;
\tfdt32_t\t\tintval;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /spl-test index 0 */
static struct dtd_sandbox_spl_test dtv_spl_test = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
/* Node /spl-test2 index 1 */
static struct dtd_sandbox_spl_test dtv_spl_test2 = {
\t.intarray\t\t= 0x5,
};
U_BOOT_DEVICE(spl_test2) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test2,
\t.platdata_size\t= sizeof(dtv_spl_test2),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def testStdout(self):
"""Test output to stdout"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
with test_util.capture_sys_output() as (stdout, stderr):
self.run_test(['struct'], dtb_file, '-')
def testNoCommand(self):
"""Test running dtoc without a command"""
with self.assertRaises(ValueError) as e:
self.run_test([], '', '')
self.assertIn("Please specify a command: struct, platdata",
str(e.exception))
def testBadCommand(self):
"""Test running dtoc with an invalid command"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['invalid-cmd'], dtb_file, output)
self.assertIn("Unknown command 'invalid-cmd': (use: struct, platdata)",
str(e.exception))
def testScanDrivers(self):
"""Test running dtoc with additional drivers to scan"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output, True,
[None, '', 'tools/dtoc/dtoc_test_scan_drivers.cxx'])
def testUnicodeError(self):
"""Test running dtoc with an invalid unicode file
To be able to perform this test without adding a weird text file which
would produce issues when using checkpatch.pl or patman, generate the
file at runtime and then process it.
"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
driver_fn = '/tmp/' + next(tempfile._get_candidate_names())
with open(driver_fn, 'wb+') as df:
df.write(b'\x81')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output, True,
[driver_fn])
|
Digilent/u-boot-digilent
|
tools/dtoc/test_dtoc.py
|
Python
|
gpl-2.0
| 28,025
| 0.000178
|
import codecs
import mock
import os
import tempfile
import unittest
from time import strftime
import six
from kinto import config
from kinto import __version__
class ConfigTest(unittest.TestCase):
def test_transpose_parameters_into_template(self):
self.maxDiff = None
template = "kinto.tpl"
dest = tempfile.mktemp()
config.render_template(template, dest,
secret='secret',
storage_backend='storage_backend',
cache_backend='cache_backend',
permission_backend='permission_backend',
storage_url='storage_url',
cache_url='cache_url',
permission_url='permission_url',
kinto_version='kinto_version',
config_file_timestamp='config_file_timestamp')
with codecs.open(dest, 'r', encoding='utf-8') as d:
destination_temp = d.read()
sample_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
"test_configuration/test.ini")
with codecs.open(sample_path, 'r', encoding='utf-8') as c:
sample = c.read()
self.assertEqual(destination_temp, sample)
def test_create_destination_directory(self):
dest = os.path.join(tempfile.mkdtemp(), 'config', 'kinto.ini')
config.render_template("kinto.tpl", dest,
secret='secret',
storage_backend='storage_backend',
cache_backend='cache_backend',
permission_backend='permission_backend',
storage_url='storage_url',
cache_url='cache_url',
permission_url='permission_url',
kinto_version='kinto_version',
config_file_timestamp='config_file_timestamp')
self.assertTrue(os.path.exists(dest))
@mock.patch('kinto.config.render_template')
def test_hmac_secret_is_text(self, mocked_render_template):
config.init('kinto.ini', 'postgresql')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(type(kwargs['secret']), six.text_type)
@mock.patch('kinto.config.render_template')
def test_init_postgresql_values(self, mocked_render_template):
config.init('kinto.ini', 'postgresql')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))
postgresql_url = "postgres://postgres:postgres@localhost/postgres"
self.assertDictEqual(kwargs, {
'secret': kwargs['secret'],
'storage_backend': 'kinto.core.storage.postgresql',
'cache_backend': 'kinto.core.cache.postgresql',
'permission_backend': 'kinto.core.permission.postgresql',
'storage_url': postgresql_url,
'cache_url': postgresql_url,
'permission_url': postgresql_url,
'kinto_version': __version__,
'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
})
@mock.patch('kinto.config.render_template')
def test_init_redis_values(self, mocked_render_template):
config.init('kinto.ini', 'redis')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))
redis_url = "redis://localhost:6379"
self.maxDiff = None # See the full diff in case of error
self.assertDictEqual(kwargs, {
'secret': kwargs['secret'],
'storage_backend': 'kinto_redis.storage',
'cache_backend': 'kinto_redis.cache',
'permission_backend': 'kinto_redis.permission',
'storage_url': redis_url + '/1',
'cache_url': redis_url + '/2',
'permission_url': redis_url + '/3',
'kinto_version': __version__,
'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
})
@mock.patch('kinto.config.render_template')
def test_init_memory_values(self, mocked_render_template):
config.init('kinto.ini', 'memory')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))
self.assertDictEqual(kwargs, {
'secret': kwargs['secret'],
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
'storage_url': '',
'cache_url': '',
'permission_url': '',
'kinto_version': __version__,
'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
})
def test_render_template_creates_directory_if_necessary(self):
temp_path = tempfile.mkdtemp()
destination = os.path.join(temp_path, 'config/kinto.ini')
config.render_template('kinto.tpl', destination, **{
'secret': "abcd-ceci-est-un-secret",
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
'storage_url': '',
'cache_url': '',
'permission_url': '',
'kinto_version': '',
'config_file_timestamp': ''
})
self.assertTrue(os.path.exists(destination))
def test_render_template_works_with_file_in_cwd(self):
temp_path = tempfile.mkdtemp()
os.chdir(temp_path)
config.render_template('kinto.tpl', 'kinto.ini', **{
'secret': "abcd-ceci-est-un-secret",
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
'storage_url': '',
'cache_url': '',
'permission_url': '',
'kinto_version': '',
'config_file_timestamp': ''
})
self.assertTrue(os.path.exists(
os.path.join(temp_path, 'kinto.ini')
))
|
monikagrabowska/osf.io
|
kinto/tests/test_config.py
|
Python
|
apache-2.0
| 6,371
| 0
|
def max_rectangle(heights):
res = 0
heights.append(0)
stack = [0]
for i in range(1, len(heights)):
while stack and heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i if not stack else i - stack[-1] - 1
res = max(res, h * w)
stack.append(i)
return res
class Solution:
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix or not matrix[0]:
return 0
m = len(matrix)
n = len(matrix[0])
heights = [1 if x == '1' else 0 for x in matrix[0]]
ans = max_rectangle(heights)
for i in range(1, m):
for j in range(n):
heights[j] = 0 if matrix[i][j] == '0' else heights[j] + 1
ans = max(ans, max_rectangle(heights))
return ans
if __name__ == "__main__":
sol = Solution()
M = [['1', '0', '1', '0', '0'],
['1', '0', '1', '1', '1'],
['1', '1', '1', '1', '1'],
['1', '0', '0', '1', '0']]
print(sol.maximalRectangle(M))
|
shenfei/oj_codes
|
leetcode/python/n85_Maximal_Rectangle.py
|
Python
|
mit
| 1,123
| 0.001781
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, frederic.mohier@alignak.net
#
"""
Alignak - Checks pack for NRPE monitored Linux hosts/services
"""
|
Alignak-monitoring-contrib/alignak-checks-nrpe
|
alignak_checks_nrpe/__init__.py
|
Python
|
agpl-3.0
| 200
| 0
|
from sqlagg.columns import SimpleColumn
from sqlagg.filters import BETWEEN, IN, EQ
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.sqlreport import SqlData, DataFormatter, TableDataFormat, DatabaseColumn
from custom.tdh.reports import UNNECESSARY_FIELDS, CHILD_HEADERS_MAP, INFANT_HEADERS_MAP, NEWBORN_HEADERS_MAP
def merge_rows(classification_sql_data, enroll_sql_data, treatment_sql_data):
result = []
classification_case_id_index = [id for id, column in enumerate(classification_sql_data.columns)
if column.slug == 'case_id'][0]
enroll_case_id_index = [id for id, column in enumerate(enroll_sql_data.columns)
if column.slug == 'case_id'][0]
treatment_case_id_index = [id for id, column in enumerate(treatment_sql_data.columns)
if column.slug == 'case_id'][0]
enroll_map = {row[enroll_case_id_index]: row for row in enroll_sql_data.rows}
treatment_map = {row[treatment_case_id_index]: row[:treatment_case_id_index]
+ row[treatment_case_id_index + 1:] for row in treatment_sql_data.rows}
for classification_row in classification_sql_data.rows:
row = classification_row[:classification_case_id_index] + classification_row[
classification_case_id_index + 1:]
classification_case_id = classification_row[classification_case_id_index]
if classification_case_id in enroll_map:
row = enroll_map[classification_case_id] + row
else:
row = [classification_case_id] + ['' for i in range(len(enroll_sql_data.headers) - 1)] + row
if classification_case_id in treatment_map:
row.extend(treatment_map[classification_case_id])
else:
row.extend(['' for i in range(len(treatment_sql_data.headers))])
result.append(row)
return result
class BaseSqlData(SqlData):
datatables = True
no_value = {'sort_key': 0, 'html': 0}
def header(self, header):
if self.__class__.__name__[0] == 'N':
return NEWBORN_HEADERS_MAP[header] if header in NEWBORN_HEADERS_MAP else header
elif self.__class__.__name__[0] == 'I':
return INFANT_HEADERS_MAP[header] if header in INFANT_HEADERS_MAP else header
else:
return CHILD_HEADERS_MAP[header] if header in CHILD_HEADERS_MAP else header
@property
def filters(self):
filters = [BETWEEN("date", "startdate", "enddate"), EQ('domain', 'domain')]
if self.config['emw']:
filters.append(IN('user_id', 'emw'))
return filters
@property
def group_by(self):
return []
@property
def columns(self):
columns = []
for k in self.group_by:
if k in ['zscore_hfa', 'zscore_wfa', 'zscore_wfh', 'mean_hfa', 'mean_wfa', 'mean_wfh']:
columns.append(DatabaseColumn(k, SimpleColumn(k),
format_fn=lambda x: "%.2f" % float(x if x else 0)))
else:
columns.append(DatabaseColumn(k, SimpleColumn(k)))
return columns
@property
def headers(self):
return [DataTablesColumn(self.header(k)) for k in self.group_by[1:]]
@property
def rows(self):
formatter = DataFormatter(TableDataFormat(self.columns, no_value=self.no_value))
return list(formatter.format(self.data, keys=self.keys, group_by=self.group_by))
class InfantConsultationHistory(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_consultation_history'
title = 'Infant Consultation History'
@property
def columns(self):
return EnrollChild().columns + InfantClassification(config=self.config).columns + InfantTreatment().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + InfantClassification(config=self.config).headers + InfantTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + InfantClassification(
config=self.config).group_by + InfantTreatment().group_by
@property
def rows(self):
return merge_rows(InfantClassification(config=self.config), EnrollChild(), InfantTreatment())
class InfantConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_consultation_history'
title = 'Infant Consultation History'
@property
def columns(self):
return EnrollChild().columns + InfantClassificationExtended(
config=self.config).columns + InfantTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + InfantClassificationExtended(
config=self.config).headers + InfantTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + InfantClassificationExtended(
config=self.config).group_by + InfantTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(InfantClassificationExtended(config=self.config), EnrollChild(),
InfantTreatmentExtended())
class NewbornConsultationHistory(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + NewbornClassification(
config=self.config).columns + NewbornTreatment().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + NewbornClassification(
config=self.config).headers + NewbornTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + NewbornClassification(
config=self.config).group_by + NewbornTreatment().group_by
@property
def rows(self):
return merge_rows(NewbornClassification(config=self.config), EnrollChild(), NewbornTreatment())
class NewbornConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + NewbornClassificationExtended(
config=self.config).columns + NewbornTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + NewbornClassificationExtended(
config=self.config).headers + NewbornTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + NewbornClassificationExtended(
config=self.config).group_by + NewbornTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(NewbornClassificationExtended(config=self.config), EnrollChild(),
NewbornTreatmentExtended())
class ChildConsultationHistory(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + ChildClassification(config=self.config).columns + ChildTreatment().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + ChildClassification(config=self.config).headers + ChildTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + ChildClassification(
config=self.config).group_by + ChildTreatment().group_by
@property
def rows(self):
return merge_rows(ChildClassification(config=self.config), EnrollChild(), ChildTreatment())
class ChildConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + ChildClassificationExtended(
config=self.config).columns + ChildTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + ChildClassificationExtended(
config=self.config).headers + ChildTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + ChildClassificationExtended(
config=self.config).group_by + ChildTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(ChildClassificationExtended(config=self.config), EnrollChild(), ChildTreatmentExtended())
class InfantClassification(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_classification'
title = 'Infant Classification'
@property
def group_by(self):
return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type',
'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa',
'zscore_wfh', 'mean_wfh', 'classification_deshydratation', 'classification_diahree',
'classification_infection', 'classification_malnutrition', 'classification_vih', 'inf_bac_qa',
'inf_bac_freq_resp', 'inf_bac_qc', 'inf_bac_qd', 'inf_bac_qe', 'inf_bac_qf', 'inf_bac_qg',
'inf_bac_qh', 'inf_bac_qj', 'inf_bac_qk', 'inf_bac_ql', 'inf_bac_qm', 'diarrhee_qa',
'alimentation_qa', 'alimentation_qb', 'alimentation_qc', 'alimentation_qd', 'alimentation_qf',
'alimentation_qg', 'alimentation_qh', 'vih_qa', 'vih_qb', 'vih_qc', 'vih_qd', 'vih_qe', 'vih_qf',
'vih_qg', 'vih_qh', 'vih_qi', 'vih_qj', 'vih_qk', 'vih_ql', 'other_comments']
class InfantClassificationExtended(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_classification'
title = 'Infant Classification'
@property
def columns(self):
from custom.tdh.models import TDHInfantClassificationFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHInfantClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHInfantClassificationFluff
return [DataTablesColumn(self.header(k)) for k in TDHInfantClassificationFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHInfantClassificationFluff
return [k for k in TDHInfantClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class NewbornClassification(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_classification'
title = 'Newborn Classification'
@property
def group_by(self):
return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type',
'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa',
'zscore_wfh', 'mean_wfh', 'classification_infection', 'classification_malnutrition',
'classification_occular', 'classification_poids', 'classification_vih', 'inf_bac_qa', 'inf_bac_qb',
'inf_bac_freq_resp', 'inf_bac_qd', 'inf_bac_qe', 'inf_bac_qf', 'inf_bac_qg', 'inf_bac_qh',
'inf_bac_qi', 'inf_bac_qj', 'poids_qa', 'inf_occ_qa', 'vih_qa', 'vih_qb', 'vih_qc', 'vih_qd',
'vih_qe', 'vih_qf', 'vih_qg', 'alimentation_qa', 'alimentation_qb', 'alimentation_qd',
'alimentation_qf', 'alimentation_qg', 'other_comments']
class NewbornClassificationExtended(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_classification'
title = 'Newborn Classification'
@property
def columns(self):
from custom.tdh.models import TDHNewbornClassificationFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHNewbornClassificationFluff
return [DataTablesColumn(self.header(k)) for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHNewbornClassificationFluff
return [k for k in TDHNewbornClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class ChildClassification(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'child_consultation_history'
title = 'Child Consultation History'
@property
def group_by(self):
return ['case_id', 'bcg', 'tablet_login_id', 'author_id', 'author_name', 'visit_date', 'consultation_type',
'number', 'weight', 'height', 'muac', 'temp', 'zscore_hfa', 'mean_hfa', 'zscore_wfa', 'mean_wfa',
'zscore_wfh', 'mean_wfh', 'measles_1', 'measles_2', 'opv_0', 'opv_1', 'opv_2', 'opv_3', 'penta_1',
'penta_2', 'penta_3', 'pneumo_1', 'pneumo_2', 'pneumo_3', 'rotavirus_1', 'rotavirus_2',
'rotavirus_3', 'yf', 'classification_anemie', 'classification_deshydratation',
'classification_diahree', 'classification_dysenterie', 'classification_malnutrition',
'classification_oreille', 'classification_paludisme', 'classification_pneumonie',
'classification_rougeole', 'classification_vih', 'classifications_graves', 'boire', 'vomit',
'convulsions_passe', 'lethargie', 'convulsions_present', 'toux_presence', 'toux_presence_duree',
'freq_resp', 'tirage', 'stridor', 'diarrhee', 'diarrhee_presence', 'diarrhee_presence_duree',
'sang_selles', 'conscience_agitation', 'yeux_enfonces', 'soif', 'pli_cutane', 'fievre_presence',
'fievre_presence_duree', 'fievre_presence_longue', 'tdr', 'urines_foncees', 'saignements_anormaux',
'raideur_nuque', 'ictere', 'choc', 'eruption_cutanee', 'ecoulement_nasal', 'yeux_rouge',
'ecoulement_oculaire', 'ulcerations', 'cornee', 'oreille', 'oreille_probleme', 'oreille_douleur',
'oreille_ecoulement', 'oreille_ecoulement_duree', 'oreille_gonflement', 'paleur_palmaire',
'oedemes', 'test_appetit', 'serologie_enfant', 'test_enfant', 'pneumonie_recidivante',
'diarrhee_dernierement', 'candidose_buccale', 'hypertrophie_ganglions_lymphatiques',
'augmentation_glande_parotide', 'test_mere', 'serologie_mere', 'other_comments']
class ChildClassificationExtended(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'child_classification'
title = 'Child Classification'
@property
def columns(self):
from custom.tdh.models import TDHChildClassificationFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys()
if k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHChildClassificationFluff
return [DataTablesColumn(self.header(k)) for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHChildClassificationFluff
return [k for k in TDHChildClassificationFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class EnrollChild(BaseSqlData):
table_name = "fluff_TDHEnrollChildFluff"
slug = 'enroll_child'
title = 'Enroll Child'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'dob', 'sex', 'village']
@property
def headers(self):
return [DataTablesColumn(self.header(k)) for k in self.group_by]
class EnrollChildExtended(BaseSqlData):
table_name = "fluff_TDHEnrollChildFluff"
slug = 'enroll_child'
title = 'Enroll Child'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHEnrollChildFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHEnrollChildFluff
return [DataTablesColumn(self.header(k)) for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHEnrollChildFluff
return [k for k in TDHEnrollChildFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class InfantTreatment(BaseSqlData):
table_name = "fluff_TDHInfantTreatmentFluff"
slug = 'infant_treatment'
title = 'Infant Treatment'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_treat_2',
'infection_grave_no_ref_treat_0', 'infection_grave_no_ref_treat_1',
'infection_grave_no_ref_treat_2', 'infection_grave_no_ref_treat_5', 'infection_locale_treat_0',
'infection_locale_treat_1', 'maladie_grave_treat_0', 'maladie_grave_treat_1']
class InfantTreatmentExtended(BaseSqlData):
table_name = "fluff_TDHInfantTreatmentFluff"
slug = 'infant_treatment'
title = 'Infant Treatment'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHInfantTreatmentFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHInfantTreatmentFluff
return [DataTablesColumn(self.header(k)) for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHInfantTreatmentFluff
return [k for k in TDHInfantTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class NewbornTreatment(BaseSqlData):
table_name = "fluff_TDHNewbornTreatmentFluff"
slug = 'newborn_treatment'
title = 'Newborn Treatment'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'infection_grave_treat_0', 'infection_grave_treat_1', 'infection_grave_no_ref_treat_0',
'infection_grave_no_ref_treat_1', 'infection_locale_treat_0', 'infection_locale_treat_1',
'incapable_nourrir_treat_0', 'incapable_nourrir_treat_1']
class NewbornTreatmentExtended(BaseSqlData):
table_name = "fluff_TDHNewbornTreatmentFluff"
slug = 'newborn_treatment'
title = 'Newborn Treatment'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHNewbornTreatmentFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHNewbornTreatmentFluff
return [DataTablesColumn(self.header(k)) for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHNewbornTreatmentFluff
return [k for k in TDHNewbornTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
class ChildTreatment(BaseSqlData):
table_name = "fluff_TDHChildTreatmentFluff"
slug = 'child_treatment'
title = 'Child Treatment'
@property
def filters(self):
return []
@property
def group_by(self):
return ['case_id', 'pneumonie_grave_treat_0',
'pneumonie_grave_treat_1', 'pneumonie_grave_treat_4', 'pneumonie_grave_no_ref_treat_0',
'pneumonie_grave_no_ref_treat_1', 'pneumonie_grave_no_ref_treat_3',
'pneumonie_grave_no_ref_treat_5', 'pneumonie_grave_no_ref_treat_6', 'pneumonie_treat_0',
'pneumonie_treat_1', 'deshydratation_severe_pas_grave_perfusion_treat_3',
'deshydratation_severe_pas_grave_perfusion_treat_4',
'deshydratation_severe_pas_grave_perfusion_treat_5',
'deshydratation_severe_pas_grave_perfusion_treat_6',
'deshydratation_severe_pas_grave_perfusion_treat_8',
'deshydratation_severe_pas_grave_perfusion_treat_9',
'deshydratation_severe_pas_grave_perfusion_treat_10',
'deshydratation_severe_pas_grave_perfusion_treat_11',
'deshydratation_severe_pas_grave_perfusion_treat_15',
'deshydratation_severe_pas_grave_perfusion_treat_16',
'deshydratation_severe_pas_grave_sng_treat_2', 'deshydratation_severe_pas_grave_sng_treat_3',
'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_3',
'deshydratation_severe_pas_grave_sans_sng_sans_perfusion_treat_4', 'signes_deshydratation_treat_0',
'signes_deshydratation_treat_3', 'pas_deshydratation_treat_1', 'dysenterie_treat_1',
'dysenterie_treat_2', 'dysenterie_treat_3', 'diahree_persistante_treat_0',
'diahree_persistante_treat_1', 'paludisme_grave_treat_0', 'paludisme_grave_treat_1',
'paludisme_grave_treat_2', 'paludisme_grave_treat_4', 'paludisme_grave_treat_5',
'paludisme_grave_treat_7', 'paludisme_grave_no_ref_treat_0', 'paludisme_grave_no_ref_treat_1',
'paludisme_grave_no_ref_treat_2', 'paludisme_grave_no_ref_treat_3',
'paludisme_grave_no_ref_treat_5', 'paludisme_grave_no_ref_treat_6', 'paludisme_simple_treat_1',
'paludisme_simple_treat_2', 'paludisme_simple_treat_3', 'paludisme_simple_treat_4',
'paludisme_simple_treat_6', 'rougeole_compliquee_treat_0', 'rougeole_compliquee_treat_1',
'rougeole_compliquee_treat_2', 'rougeole_compliquee_treat_3', 'rougeole_complications_treat_0',
'rougeole_complications_treat_1', 'rougeole_treat_0', 'rougeole_treat_1', 'rougeole_treat_2',
'rougeole_treat_3', 'antecedent_rougeole_treat_0', 'antecedent_rougeole_treat_1',
'mastoidite_treat_0', 'mastoidite_treat_1', 'mastoidite_treat_2',
'infection_aigue_oreille_treat_0', 'infection_aigue_oreille_treat_1', 'anemie_grave_treat_0',
'anemie_treat_0', 'anemie_treat_1', 'anemie_treat_2', 'anemie_treat_3', 'anemie_treat_4',
'anemie_treat_5', 'anemie_treat_6', 'mass_treat_2', 'mass_treat_3', 'mass_treat_4', 'mass_treat_5',
'mass_treat_7', 'mass_treat_8', 'mam_treat_2', 'mam_treat_3', 'mam_treat_5', 'mam_treat_6',
'mam_treat_7', 'pas_malnutrition_treat_2', 'pas_malnutrition_treat_3',
'vih_symp_confirmee_treat_1', 'vih_symp_confirmee_treat_2', 'vih_symp_confirmee_treat_4',
'vih_confirmee_treat_1', 'vih_confirmee_treat_2', 'vih_confirmee_treat_4',
'vih_symp_probable_treat_1', 'vih_symp_probable_treat_2', 'vih_symp_probable_treat_3',
'vih_possible_treat_1', 'vih_possible_treat_2', 'vih_possible_treat_3',
'paludisme_grave_tdr_negatif_treat_0', 'paludisme_grave_tdr_negatif_treat_1',
'paludisme_grave_tdr_negatif_treat_3', 'paludisme_grave_tdr_negatif_treat_4',
'paludisme_grave_tdr_negatif_treat_6', 'vitamine_a']
class ChildTreatmentExtended(BaseSqlData):
table_name = "fluff_TDHChildTreatmentFluff"
slug = 'child_treatment'
title = 'Child Treatment'
@property
def filters(self):
return []
@property
def columns(self):
from custom.tdh.models import TDHChildTreatmentFluff
return [DatabaseColumn(k, SimpleColumn(k)) for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS]
@property
def headers(self):
from custom.tdh.models import TDHChildTreatmentFluff
return [DataTablesColumn(self.header(k)) for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if
k not in UNNECESSARY_FIELDS + ['case_id']]
@property
def group_by(self):
from custom.tdh.models import TDHChildTreatmentFluff
return [k for k in TDHChildTreatmentFluff().__dict__['_obj'].keys() if k not in UNNECESSARY_FIELDS]
|
puttarajubr/commcare-hq
|
custom/tdh/sqldata.py
|
Python
|
bsd-3-clause
| 25,506
| 0.004822
|
import numpy as np
import copy
import datetime as dt
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkstudy.EventProfiler as ep
from bollinger import Bollinger
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(ls_symbols, d_data):
df_close = d_data['close']
ts_market = df_close['SPY']
print "Finding Events"
# Creating an empty dataframe
df_events = copy.deepcopy(df_close)
df_events = df_events * np.NAN
# Time stamps for the event range
ldt_timestamps = df_close.index
for s_sym in ls_symbols:
for i in range(1, len(ldt_timestamps)):
# Calculating the returns for this timestamp
f_symprice_today = df_close[s_sym].ix[ldt_timestamps[i]]
f_symprice_yest = df_close[s_sym].ix[ldt_timestamps[i - 1]]
#f_marketprice_today = ts_market.ix[ldt_timestamps[i]]
#f_marketprice_yest = ts_market.ix[ldt_timestamps[i - 1]]
#f_symreturn_today = (f_symprice_today / f_symprice_yest) - 1
#f_marketreturn_today = (f_marketprice_today / f_marketprice_yest) - 1
'''
Bollinger value of equity today < -2.0
Bollinger value of equity yesterday >= -2.0
Bollinger value of SPY today >= 1.5
'''
bollinger_obj = Bollinger(df_close)
equity_today = bollinger_obj.get_value(ldt_timestamps[i], s_sym)
equity_yesterday = bollinger_obj.get_value(ldt_timestamps[i - 1], s_sym)
mkt_today = bollinger_obj.get_value(ldt_timestamps[i], 'SPY')
if equity_today < -2.0 and equity_yesterday >= -2.0 and mkt_today >= 1.5:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
return df_events
if __name__ == '__main__':
dt_start = dt.datetime(2008, 1, 1)
dt_end = dt.datetime(2009, 12, 31)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
dataobj = da.DataAccess('Yahoo')
ls_symbols = dataobj.get_symbols_from_list('sp5002012')
ls_symbols.append('SPY')
ls_keys = ['close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_events = find_events(ls_symbols, d_data)
print "Creating Study"
ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20,
s_filename='BollingerStudy.pdf', b_market_neutral=True, b_errorbars=True,
s_market_sym='SPY')
|
RoyNexus/python
|
homework6.py
|
Python
|
unlicense
| 3,193
| 0.003758
|
# This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : gobry@pybliographer.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
''' Generic XML bibliographic style handler '''
import string
from Pyblio.Style import Parser
from Pyblio import Autoload, recode
def author_desc (group, coding, initials = 0, reverse = 0):
""" Create a nice string describing a group of authors.
coding : name of the output coding (as requested for recode)
initials : if = 1, uses initials instead of complete first names
reverse :
-1 use First Last format
0 use Last, First, excepted for the first entry
1 use Last, First for all the authors, not only the first
"""
l = len (group)
fulltext = ""
for i in range (0, l):
(honorific, first, last, lineage) = group [i].format (coding)
if initials:
first = group [i].initials (coding)
text = ""
if reverse == 1 or (i == 0 and reverse == 0):
if last: text = text + last
if lineage: text = text + ", " + lineage
if first: text = text + ", " + first
else:
if first: text = first + " "
if last: text = text + last
if lineage: text = text + ", " + lineage
if text:
if i < l - 2:
text = text + ", "
elif i == l - 2:
text = text + " and "
fulltext = fulltext + text
# avoid a dot at the end of the author list
if fulltext [-1] == '.':
fulltext = fulltext [0:-1]
return fulltext
def string_key (entry, fmt, table):
""" Generates an alphabetical key for an entry. fmt is the
output coding """
rc = recode.recode ("latin1.." + fmt)
if entry.has_key ('author'): aut = entry ['author']
elif entry.has_key ('editor'): aut = entry ['editor']
else: aut = ()
if len (aut) > 0:
if len (aut) > 1:
key = ''
for a in aut:
honorific, first, last, lineage = a.format (fmt)
key = key + string.join (map (lambda x:
x [0], string.split (last, ' ')), '')
if len (key) >= 3:
if len (aut) > 3:
key = key + '+'
break
else:
honorific, first, last, lineage = aut [0].format (fmt)
parts = string.split (last, ' ')
if len (parts) == 1:
key = parts [0][0:3]
else:
key = string.join (map (lambda x: x [0], parts), '')
else:
key = rc (entry.key.key [0:3])
if entry.has_key ('date'):
year = entry ['date'].format (fmt) [0]
if year:
key = key + year [2:]
if table.has_key (key) or table.has_key (key + 'a'):
if table.has_key (key):
# rename the old entry
new = key + 'a'
table [new] = table [key]
del table [key]
base = key
suff = ord ('b')
key = base + chr (suff)
while table.has_key (key):
suff = suff + 1
key = base + chr (suff)
return key
def numeric_key (entry, fmt, table):
count = 1
while table.has_key (str (count)):
count = count + 1
return str (count)
def create_string_key (database, keys, fmt):
table = {}
for key in keys:
s = string_key (database [key], fmt, table)
table [s] = key
skeys = table.keys ()
skeys.sort ()
return table, skeys
def create_numeric_key (database, keys, fmt):
table = {}
skeys = []
for key in keys:
s = numeric_key (database [key], fmt, table)
table [s] = key
skeys.append (s)
return table, skeys
def standard_date (entry, coding):
(text, month, day) = entry.format (coding)
if month: text = "%s/%s" % (month, text)
if day : text = "%s/%s" % (day, text)
return text
def last_first_full_authors (entry, coding):
return author_desc (entry, coding, 0, 1)
def first_last_full_authors (entry, coding):
return author_desc (entry, coding, 0, -1)
def full_authors (entry, coding):
return author_desc (entry, coding, 0, 0)
def initials_authors (entry, coding):
return author_desc (entry, coding, 1, 0)
def first_last_initials_authors (entry, coding):
return author_desc (entry, coding, 1, -1)
def last_first_initials_authors (entry, coding):
return author_desc (entry, coding, 1, 1)
Autoload.register ('style', 'Generic', {
'first_last_full_authors' : first_last_full_authors,
'last_first_full_authors' : last_first_full_authors,
'full_authors' : full_authors,
'first_last_initials_authors' : first_last_initials_authors,
'last_first_initials_authors' : last_first_initials_authors,
'initials_authors' : initials_authors,
'string_keys' : create_string_key,
'numeric_keys' : create_numeric_key,
'european_date' : standard_date,
})
|
matthew-brett/pyblio
|
Pyblio/Style/Generic.py
|
Python
|
gpl-2.0
| 5,843
| 0.023105
|
from PyQt5 import QtCore, QtWidgets
from ngspiceSimulation.pythonPlotting import plotWindow
from ngspiceSimulation.NgspiceWidget import NgspiceWidget
from configuration.Appconfig import Appconfig
from modelEditor.ModelEditor import ModelEditorclass
from subcircuit.Subcircuit import Subcircuit
from maker.makerchip import makerchip
from kicadtoNgspice.KicadtoNgspice import MainWindow
from browser.Welcome import Welcome
from browser.UserManual import UserManual
from ngspicetoModelica.ModelicaUI import OpenModelicaEditor
import os
dockList = ['Welcome']
count = 1
dock = {}
class DockArea(QtWidgets.QMainWindow):
"""
This class contains function for designing UI of all the editors
in dock area part:
- Test Editor.
- Model Editor.
- Python Plotting.
- Ngspice Editor.
- Kicad to Ngspice Editor.
- Subcircuit Editor.
- Modelica editor.
"""
def __init__(self):
"""This act as constructor for class DockArea."""
QtWidgets.QMainWindow.__init__(self)
self.obj_appconfig = Appconfig()
for dockName in dockList:
dock[dockName] = QtWidgets.QDockWidget(dockName)
self.welcomeWidget = QtWidgets.QWidget()
self.welcomeLayout = QtWidgets.QVBoxLayout()
self.welcomeLayout.addWidget(Welcome()) # Call browser
# Adding to main Layout
self.welcomeWidget.setLayout(self.welcomeLayout)
dock[dockName].setWidget(self.welcomeWidget)
# CSS
dock[dockName].setStyleSheet(" \
QWidget { border-radius: 15px; border: 1px solid gray;\
padding: 5px; width: 200px; height: 150px; } \
")
self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock[dockName])
# self.tabifyDockWidget(dock['Notes'],dock['Blank'])
self.show()
def createTestEditor(self):
"""This function create widget for Library Editor"""
global count
self.testWidget = QtWidgets.QWidget()
self.testArea = QtWidgets.QTextEdit()
self.testLayout = QtWidgets.QVBoxLayout()
self.testLayout.addWidget(self.testArea)
# Adding to main Layout
self.testWidget.setLayout(self.testLayout)
dock['Tips-' + str(count)] = \
QtWidgets.QDockWidget('Tips-' + str(count))
dock['Tips-' + str(count)].setWidget(self.testWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Tips-' + str(count)])
self.tabifyDockWidget(
dock['Welcome'], dock['Tips-' + str(count)])
dock['Tips-' + str(count)].setVisible(True)
dock['Tips-' + str(count)].setFocus()
dock['Tips-' + str(count)].raise_()
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['Tips-' + str(count)]
)
count = count + 1
def plottingEditor(self):
"""This function create widget for interactive PythonPlotting."""
self.projDir = self.obj_appconfig.current_project["ProjectName"]
self.projName = os.path.basename(self.projDir)
# self.project = os.path.join(self.projDir, self.projName)
global count
self.plottingWidget = QtWidgets.QWidget()
self.plottingLayout = QtWidgets.QVBoxLayout()
self.plottingLayout.addWidget(plotWindow(self.projDir, self.projName))
# Adding to main Layout
self.plottingWidget.setLayout(self.plottingLayout)
dock['Plotting-' + str(count)
] = QtWidgets.QDockWidget('Plotting-' + str(count))
dock['Plotting-' + str(count)].setWidget(self.plottingWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Plotting-' + str(count)])
self.tabifyDockWidget(dock['Welcome'], dock['Plotting-' + str(count)])
dock['Plotting-' + str(count)].setVisible(True)
dock['Plotting-' + str(count)].setFocus()
dock['Plotting-' + str(count)].raise_()
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['Plotting-' + str(count)]
)
count = count + 1
def ngspiceEditor(self, projDir):
""" This function creates widget for Ngspice window."""
self.projDir = projDir
self.projName = os.path.basename(self.projDir)
self.ngspiceNetlist = os.path.join(
self.projDir, self.projName + ".cir.out")
# Edited by Sumanto Kar 25/08/2021
if os.path.isfile(self.ngspiceNetlist) is False:
return False
global count
self.ngspiceWidget = QtWidgets.QWidget()
self.ngspiceLayout = QtWidgets.QVBoxLayout()
self.ngspiceLayout.addWidget(
NgspiceWidget(self.ngspiceNetlist, self.projDir)
)
# Adding to main Layout
self.ngspiceWidget.setLayout(self.ngspiceLayout)
dock['NgSpice-' + str(count)
] = QtWidgets.QDockWidget('NgSpice-' + str(count))
dock['NgSpice-' + str(count)].setWidget(self.ngspiceWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['NgSpice-' + str(count)])
self.tabifyDockWidget(dock['Welcome'], dock['NgSpice-' + str(count)])
# CSS
dock['NgSpice-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray; padding: 0px;\
width: 200px; height: 150px; } \
")
dock['NgSpice-' + str(count)].setVisible(True)
dock['NgSpice-' + str(count)].setFocus()
dock['NgSpice-' + str(count)].raise_()
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['NgSpice-' + str(count)]
)
count = count + 1
def modelEditor(self):
"""This function defines UI for model editor."""
print("in model editor")
global count
self.modelwidget = QtWidgets.QWidget()
self.modellayout = QtWidgets.QVBoxLayout()
self.modellayout.addWidget(ModelEditorclass())
# Adding to main Layout
self.modelwidget.setLayout(self.modellayout)
dock['Model Editor-' +
str(count)] = QtWidgets.QDockWidget('Model Editor-' + str(count))
dock['Model Editor-' + str(count)].setWidget(self.modelwidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Model Editor-' + str(count)])
self.tabifyDockWidget(dock['Welcome'],
dock['Model Editor-' + str(count)])
# CSS
dock['Model Editor-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray; \
padding: 5px; width: 200px; height: 150px; } \
")
dock['Model Editor-' + str(count)].setVisible(True)
dock['Model Editor-' + str(count)].setFocus()
dock['Model Editor-' + str(count)].raise_()
count = count + 1
def kicadToNgspiceEditor(self, clarg1, clarg2=None):
"""
This function is creating Editor UI for Kicad to Ngspice conversion.
"""
global count
self.kicadToNgspiceWidget = QtWidgets.QWidget()
self.kicadToNgspiceLayout = QtWidgets.QVBoxLayout()
self.kicadToNgspiceLayout.addWidget(MainWindow(clarg1, clarg2))
self.kicadToNgspiceWidget.setLayout(self.kicadToNgspiceLayout)
dock['kicadToNgspice-' + str(count)] = \
QtWidgets.QDockWidget('kicadToNgspice-' + str(count))
dock['kicadToNgspice-' +
str(count)].setWidget(self.kicadToNgspiceWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['kicadToNgspice-' + str(count)])
self.tabifyDockWidget(dock['Welcome'],
dock['kicadToNgspice-' + str(count)])
# CSS
dock['kicadToNgspice-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray;\
padding: 5px; width: 200px; height: 150px; } \
")
dock['kicadToNgspice-' + str(count)].setVisible(True)
dock['kicadToNgspice-' + str(count)].setFocus()
dock['kicadToNgspice-' + str(count)].raise_()
dock['kicadToNgspice-' + str(count)].activateWindow()
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['kicadToNgspice-' + str(count)]
)
count = count + 1
def subcircuiteditor(self):
"""This function creates a widget for different subcircuit options."""
global count
self.subcktWidget = QtWidgets.QWidget()
self.subcktLayout = QtWidgets.QVBoxLayout()
self.subcktLayout.addWidget(Subcircuit(self))
self.subcktWidget.setLayout(self.subcktLayout)
dock['Subcircuit-' +
str(count)] = QtWidgets.QDockWidget('Subcircuit-' + str(count))
dock['Subcircuit-' + str(count)].setWidget(self.subcktWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Subcircuit-' + str(count)])
self.tabifyDockWidget(dock['Welcome'],
dock['Subcircuit-' + str(count)])
# CSS
dock['Subcircuit-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray;\
padding: 5px; width: 200px; height: 150px; } \
")
dock['Subcircuit-' + str(count)].setVisible(True)
dock['Subcircuit-' + str(count)].setFocus()
dock['Subcircuit-' + str(count)].raise_()
count = count + 1
def makerchip(self):
"""This function creates a widget for different subcircuit options."""
global count
self.makerWidget = QtWidgets.QWidget()
self.makerLayout = QtWidgets.QVBoxLayout()
self.makerLayout.addWidget(makerchip(self))
self.makerWidget.setLayout(self.makerLayout)
dock['Makerchip-' +
str(count)] = QtWidgets.QDockWidget('Makerchip-' + str(count))
dock['Makerchip-' + str(count)].setWidget(self.makerWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Makerchip-' + str(count)])
self.tabifyDockWidget(dock['Welcome'],
dock['Makerchip-' + str(count)])
# CSS
dock['Makerchip-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray;\
padding: 5px; width: 200px; height: 150px; } \
")
dock['Makerchip-' + str(count)].setVisible(True)
dock['Makerchip-' + str(count)].setFocus()
dock['Makerchip-' + str(count)].raise_()
count = count + 1
def usermanual(self):
"""This function creates a widget for user manual."""
global count
self.usermanualWidget = QtWidgets.QWidget()
self.usermanualLayout = QtWidgets.QVBoxLayout()
self.usermanualLayout.addWidget(UserManual())
self.usermanualWidget.setLayout(self.usermanualLayout)
dock['User Manual-' +
str(count)] = QtWidgets.QDockWidget('User Manual-' + str(count))
dock['User Manual-' + str(count)].setWidget(self.usermanualWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['User Manual-' + str(count)])
self.tabifyDockWidget(dock['Welcome'],
dock['User Manual-' + str(count)])
# CSS
dock['User Manual-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray;\
padding: 5px; width: 200px; height: 150px; } \
")
dock['User Manual-' + str(count)].setVisible(True)
dock['User Manual-' + str(count)].setFocus()
dock['User Manual-' + str(count)].raise_()
count = count + 1
def modelicaEditor(self, projDir):
"""This function sets up the UI for ngspice to modelica conversion."""
global count
self.modelicaWidget = QtWidgets.QWidget()
self.modelicaLayout = QtWidgets.QVBoxLayout()
self.modelicaLayout.addWidget(OpenModelicaEditor(projDir))
self.modelicaWidget.setLayout(self.modelicaLayout)
dock['Modelica-' + str(count)
] = QtWidgets.QDockWidget('Modelica-' + str(count))
dock['Modelica-' + str(count)].setWidget(self.modelicaWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Modelica-' + str(count)])
self.tabifyDockWidget(dock['Welcome'], dock['Modelica-' + str(count)])
dock['Modelica-' + str(count)].setVisible(True)
dock['Modelica-' + str(count)].setFocus()
dock['Modelica-' + str(count)].raise_()
# CSS
dock['Modelica-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray;\
padding: 5px; width: 200px; height: 150px; } \
")
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['Modelica-' + str(count)]
)
count = count + 1
def closeDock(self):
"""
This function checks for the project in **dock_dict**
and closes it.
"""
self.temp = self.obj_appconfig.current_project['ProjectName']
for dockwidget in self.obj_appconfig.dock_dict[self.temp]:
dockwidget.close()
|
FOSSEE/eSim
|
src/frontEnd/DockArea.py
|
Python
|
gpl-3.0
| 13,847
| 0
|
# coding=utf-8
"""Clip and mask a hazard layer."""
import logging
from qgis.core import (
QgsGeometry,
QgsFeatureRequest,
QgsWkbTypes,
QgsFeature,
)
from safe.definitions.fields import hazard_class_field, aggregation_id_field
from safe.definitions.hazard_classifications import not_exposed_class
from safe.definitions.processing_steps import union_steps
from safe.gis.sanity_check import check_layer
from safe.gis.vector.clean_geometry import geometry_checker
from safe.gis.vector.tools import (
create_memory_layer, wkb_type_groups, create_spatial_index)
from safe.utilities.i18n import tr
from safe.utilities.profiling import profile
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('InaSAFE')
@profile
def union(union_a, union_b):
"""Union of two vector layers.
Issue https://github.com/inasafe/inasafe/issues/3186
Note : This algorithm is copied from :
https://github.com/qgis/QGIS/blob/master/python/plugins/processing/algs/
qgis/Union.py
:param union_a: The vector layer for the union.
:type union_a: QgsVectorLayer
:param union_b: The vector layer for the union.
:type union_b: QgsVectorLayer
:return: The clip vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = union_steps['output_layer_name']
output_layer_name = output_layer_name % (
union_a.keywords['layer_purpose'],
union_b.keywords['layer_purpose']
)
fields = union_a.fields()
fields.extend(union_b.fields())
writer = create_memory_layer(
output_layer_name,
union_a.geometryType(),
union_a.crs(),
fields
)
keywords_union_1 = union_a.keywords
keywords_union_2 = union_b.keywords
inasafe_fields_union_1 = keywords_union_1['inasafe_fields']
inasafe_fields_union_2 = keywords_union_2['inasafe_fields']
inasafe_fields = inasafe_fields_union_1
inasafe_fields.update(inasafe_fields_union_2)
# use to avoid modifying original source
writer.keywords = dict(union_a.keywords)
writer.keywords['inasafe_fields'] = inasafe_fields
writer.keywords['title'] = output_layer_name
writer.keywords['layer_purpose'] = 'aggregate_hazard'
writer.keywords['hazard_keywords'] = keywords_union_1.copy()
writer.keywords['aggregation_keywords'] = keywords_union_2.copy()
skip_field = inasafe_fields_union_2[aggregation_id_field['key']]
not_null_field_index = writer.fields().lookupField(skip_field)
writer.startEditing()
# Begin copy/paste from Processing plugin.
# Please follow their code as their code is optimized.
# The code below is not following our coding standards because we want to
# be able to track any diffs from QGIS easily.
index_a = create_spatial_index(union_b)
index_b = create_spatial_index(union_a)
count = 0
n_element = 0
# Todo fix callback
# nFeat = len(union_a.getFeatures())
for in_feat_a in union_a.getFeatures():
# progress.setPercentage(nElement / float(nFeat) * 50)
n_element += 1
list_intersecting_b = []
geom = geometry_checker(in_feat_a.geometry())
at_map_a = in_feat_a.attributes()
intersects = index_a.intersects(geom.boundingBox())
if len(intersects) < 1:
try:
_write_feature(at_map_a, geom, writer, not_null_field_index)
except BaseException:
# This really shouldn't happen, as we haven't
# edited the input geom at all
LOGGER.debug(
tr('Feature geometry error: One or more output features '
'ignored due to invalid geometry.'))
else:
request = QgsFeatureRequest().setFilterFids(intersects)
engine = QgsGeometry.createGeometryEngine(geom.constGet())
engine.prepareGeometry()
for in_feat_b in union_b.getFeatures(request):
count += 1
at_map_b = in_feat_b.attributes()
tmp_geom = geometry_checker(in_feat_b.geometry())
if engine.intersects(tmp_geom.constGet()):
int_geom = geometry_checker(geom.intersection(tmp_geom))
list_intersecting_b.append(QgsGeometry(tmp_geom))
if not int_geom:
# There was a problem creating the intersection
# LOGGER.debug(
# tr('GEOS geoprocessing error: One or more input '
# 'features have invalid geometry.'))
pass
int_geom = QgsGeometry()
else:
int_geom = QgsGeometry(int_geom)
if int_geom.wkbType() == QgsWkbTypes.UnknownGeometry \
or QgsWkbTypes.flatType(
int_geom.constGet().wkbType()) == \
QgsWkbTypes.GeometryCollection:
# Intersection produced different geometry types
temp_list = int_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
int_geom = QgsGeometry(geometry_checker(i))
try:
_write_feature(
at_map_a + at_map_b,
int_geom,
writer,
not_null_field_index,
)
except BaseException:
LOGGER.debug(
tr('Feature geometry error: One or '
'more output features ignored due '
'to invalid geometry.'))
else:
# Geometry list: prevents writing error
# in geometries of different types
# produced by the intersection
# fix #3549
if int_geom.wkbType() in wkb_type_groups[
wkb_type_groups[int_geom.wkbType()]]:
try:
_write_feature(
at_map_a + at_map_b,
int_geom,
writer,
not_null_field_index)
except BaseException:
LOGGER.debug(
tr('Feature geometry error: One or more '
'output features ignored due to '
'invalid geometry.'))
# the remaining bit of inFeatA's geometry
# if there is nothing left, this will just silently fail and we
# are good
diff_geom = QgsGeometry(geom)
if len(list_intersecting_b) != 0:
int_b = QgsGeometry.unaryUnion(list_intersecting_b)
diff_geom = geometry_checker(diff_geom.difference(int_b))
if diff_geom is None or \
diff_geom.isEmpty() or not diff_geom.isGeosValid():
# LOGGER.debug(
# tr('GEOS geoprocessing error: One or more input '
# 'features have invalid geometry.'))
pass
if diff_geom is not None and (
diff_geom.wkbType() == 0 or QgsWkbTypes.flatType(
diff_geom.constGet().wkbType()) ==
QgsWkbTypes.GeometryCollection):
temp_list = diff_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
diff_geom = QgsGeometry(geometry_checker(i))
try:
_write_feature(
at_map_a,
diff_geom,
writer,
not_null_field_index)
except BaseException:
LOGGER.debug(
tr('Feature geometry error: One or more output features '
'ignored due to invalid geometry.'))
length = len(union_a.fields())
# nFeat = len(union_b.getFeatures())
for in_feat_a in union_b.getFeatures():
# progress.setPercentage(nElement / float(nFeat) * 100)
geom = geometry_checker(in_feat_a.geometry())
atMap = [None] * length
atMap.extend(in_feat_a.attributes())
intersects = index_b.intersects(geom.boundingBox())
lstIntersectingA = []
for id in intersects:
request = QgsFeatureRequest().setFilterFid(id)
inFeatB = next(union_a.getFeatures(request))
tmpGeom = QgsGeometry(geometry_checker(inFeatB.geometry()))
if geom.intersects(tmpGeom):
lstIntersectingA.append(tmpGeom)
if len(lstIntersectingA) == 0:
res_geom = geom
else:
intA = QgsGeometry.unaryUnion(lstIntersectingA)
res_geom = geom.difference(intA)
if res_geom is None:
# LOGGER.debug(
# tr('GEOS geoprocessing error: One or more input features '
# 'have null geometry.'))
pass
continue # maybe it is better to fail like @gustry
# does below ....
if res_geom.isEmpty() or not res_geom.isGeosValid():
# LOGGER.debug(
# tr('GEOS geoprocessing error: One or more input features '
# 'have invalid geometry.'))
pass
try:
_write_feature(atMap, res_geom, writer, not_null_field_index)
except BaseException:
# LOGGER.debug(
# tr('Feature geometry error: One or more output features '
# 'ignored due to invalid geometry.'))
pass
n_element += 1
# End of copy/paste from processing
writer.commitChanges()
fill_hazard_class(writer)
check_layer(writer)
return writer
def _write_feature(attributes, geometry, writer, not_null_field_index):
"""
Internal function to write the feature to the output.
:param attributes: Attributes of the feature.
:type attributes: list
:param geometry: The geometry to write to the output.
:type geometry: QgsGeometry
:param writer: A vector layer in editing mode.
:type: QgsVectorLayer
:param not_null_field_index: The index in the attribute table which should
not be null.
:type not_null_field_index: int
"""
if writer.geometryType() != geometry.type():
# We don't write the feature if it's not the same geometry type.
return
compulsary_field = attributes[not_null_field_index]
if not compulsary_field:
# We don't want feature without a compulsary field.
# I think this a bug from the union algorithm.
return
out_feature = QgsFeature()
out_feature.setGeometry(geometry)
out_feature.setAttributes(attributes)
writer.addFeature(out_feature)
@profile
def fill_hazard_class(layer):
"""We need to fill hazard class when it's empty.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:return: The updated vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
hazard_field = layer.keywords['inasafe_fields'][hazard_class_field['key']]
expression = '"%s" is NULL OR "%s" = \'\'' % (hazard_field, hazard_field)
index = layer.fields().lookupField(hazard_field)
request = QgsFeatureRequest().setFilterExpression(expression)
layer.startEditing()
for feature in layer.getFeatures(request):
layer.changeAttributeValue(
feature.id(),
index,
not_exposed_class['key'])
layer.commitChanges()
return layer
|
ismailsunni/inasafe
|
safe/gis/vector/union.py
|
Python
|
gpl-3.0
| 12,362
| 0
|
import os
import glob
import subprocess
def expand_path(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def is_file(path):
if not path:
return False
if not os.path.isfile(path):
return False
return True
def arg_is_file(path):
try:
if not is_file(path):
raise
except:
msg = '{0!r} is not a file'.format(path)
raise argparse.ArgumentTypeError(msg)
return expand_path(path)
def run_jmodeltest(name):
jmodel_proc=subprocess.Popen('java -jar ~/phylo_tools/jmodeltest-2.1.5/jModelTest.jar -d '+str(name)+' -s 3 -f -i -g 4 -BIC -c 0.95 > '+str(name)+'.results.txt', shell=True, executable='/bin/bash')
jmodel_proc.wait()
def get_models(f, gene_name, out):
fl=file(f)
for line in fl:
line=line.strip()
if "the 95% confidence interval" in line:
model=line.split(': ')[1]
out.write(str(gene_name)+'\t'+str(model)+'\n')
def main():
for f in glob.glob('*.nex'):
run_jmodeltest(f)
out=open('models.txt','w')
for f in glob.glob('*.results.txt'):
gene_name=f.split('.')[0]
get_models(f, gene_name,out)
''' description = ('This program will run jModelTest on a single file or set '
'of files in nexus format. User can choose the set of models'
'and type of summary using flags. The standard 24 models used'
'in MrBayes and BIC summary with 95% credible set are defaults.')
FILE_FORMATS = ['nex']
parser = argparse.ArgumentParser(description = description)
parser.add_argument('input_files', metavar='INPUT-SEQ-FILE',
nargs = '+',
type = arg_is_file,
help = ('Input sequence file(s) name '))
parser.add_argument('-o', '--out-format',
type = str,
choices = ['nex', 'fasta', 'phy'],
help = ('The format of the output sequence file(s). Valid options '))
parser.add_argument('-j', '--path-to-jModelTest',
type = str,
help=('The full path to the jModelTest executable'))
parser.add_argument('-s', '--substitution-models',
type = str,
choices = ['3','5','7','11']
default = ['3']
help = ('Number of substitution schemes to test. Default is all GTR models "-s 3".'))
parser.add_argument('-g', '--gamma',
type = str,
default = ['4']
help = ('Include models with rate variation among sites and number of categories (e.g., -g 8)'))
parser.add_argument('-i', '--invar',
type = str,
default = ['false']
help = ('include models with a proportion invariable sites (e.g., -i)'))
args = parser.parse_args()
for f in args.input_files:
in_type=os.path.splitext(f)[1]
filename=os.path.splitext(f)[0]
if in_type == '.nex' or in_type == '.nexus':
dict=in_nex(f)
elif in_type == '.fa' or in_type == '.fas' or in_type == '.fasta':
dict=in_fasta(f)
elif in_type == '.phy' or in_type == '.phylip':
dict=in_phy(f)
if args.out_format == 'nex':
out_nex(dict, filename)
elif args.out_format == 'fasta':
out_fasta(dict, filename)
elif args.out_format == 'phy':
out_phy(dict, filename)'''
if __name__ == '__main__':
main()
|
cwlinkem/linkuce
|
modeltest_runner.py
|
Python
|
gpl-2.0
| 2,993
| 0.033077
|
# coding=utf-8
import random
lista = []
for x in range(10):
numero = random.randint(1, 100)
if x == 0:
maior, menor = numero, numero
elif numero > maior:
maior = numero
elif numero < menor:
menor = numero
lista.append(numero)
lista.sort()
print(lista)
print("Maior: %d" % maior)
print("Menor: %d" % menor)
|
renebentes/Python4Zumbis
|
Exercícios/Lista IV/questao01.py
|
Python
|
mit
| 350
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-26 12:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('codenerix_products', '0011_auto_20180202_0826'),
]
operations = [
migrations.AddField(
model_name='productunique',
name='caducity',
field=models.DateField(blank=True, default=None, null=True, verbose_name='Caducity'),
),
]
|
centrologic/django-codenerix-products
|
codenerix_products/migrations/0012_productunique_caducity.py
|
Python
|
apache-2.0
| 516
| 0.001938
|
#!/usr/bin/env python
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# This script provides help with parsing and reporting of perf results. It currently
# provides three main capabilities:
# 1) Printing perf results to console in 'pretty' format
# 2) Comparing two perf result sets together and displaying comparison results to console
# 3) Outputting the perf results in JUnit format which is useful for plugging in to
# Jenkins perf reporting.
# By default in Python if you divide an int by another int (5 / 2), the result will also
# be an int (2). The following line changes this behavior so that float will be returned
# if necessary (2.5).
from __future__ import division
import difflib
import json
import math
import os
import prettytable
from collections import defaultdict
from datetime import date, datetime
from optparse import OptionParser
from tests.util.calculation_util import calculate_tval, calculate_avg, calculate_stddev
from time import gmtime, strftime
# String constants
AVG = 'avg'
AVG_TIME = 'avg_time'
AVG_TIME_CHANGE = 'avg_time_change'
AVG_TIME_CHANGE_TOTAL = 'avg_time_change_total'
CLIENT_NAME = 'client_name'
COMPRESSION_CODEC = 'compression_codec'
COMPRESSION_TYPE = 'compression_type'
DETAIL = 'detail'
EST_NUM_ROWS = 'est_num_rows'
EST_PEAK_MEM = 'est_peak_mem'
EXECUTOR_NAME = 'executor_name'
EXEC_SUMMARY = 'exec_summary'
FILE_FORMAT = 'file_format'
ITERATIONS = 'iterations'
MAX_TIME = 'max_time'
MAX_TIME_CHANGE = 'max_time_change'
NAME = 'name'
NUM_CLIENTS = 'num_clients'
NUM_HOSTS = 'num_hosts'
NUM_ROWS = 'num_rows'
OPERATOR = 'operator'
PEAK_MEM = 'peak_mem'
PEAK_MEM_CHANGE = 'peak_mem_change'
PREFIX = 'prefix'
QUERY = 'query'
QUERY_STR = 'query_str'
RESULT_LIST = 'result_list'
RUNTIME_PROFILE = 'runtime_profile'
SCALE_FACTOR = 'scale_factor'
STDDEV = 'stddev'
STDDEV_TIME = 'stddev_time'
TEST_VECTOR = 'test_vector'
TIME_TAKEN = 'time_taken'
TOTAL = 'total'
WORKLOAD_NAME = 'workload_name'
parser = OptionParser()
parser.add_option("--input_result_file", dest="result_file",
default=os.environ['IMPALA_HOME'] + '/benchmark_results.json',
help="The input JSON file with benchmark results")
parser.add_option("--reference_result_file", dest="reference_result_file",
default=os.environ['IMPALA_HOME'] + '/reference_benchmark_results.json',
help="The input JSON file with reference benchmark results")
parser.add_option("--junit_output_file", dest="junit_output_file", default='',
help='If set, outputs results in Junit format to the specified file')
parser.add_option("--no_output_table", dest="no_output_table", action="store_true",
default= False, help='Outputs results in table format to the console')
parser.add_option("--report_description", dest="report_description", default=None,
help='Optional description for the report.')
parser.add_option("--cluster_name", dest="cluster_name", default='UNKNOWN',
help="Name of the cluster the results are from (ex. Bolt)")
parser.add_option("--verbose", "-v", dest="verbose", action="store_true",
default= False, help='Outputs to console with with increased verbosity')
parser.add_option("--output_all_summary_nodes", dest="output_all_summary_nodes",
action="store_true", default= False,
help='Print all execution summary nodes')
parser.add_option("--build_version", dest="build_version", default='UNKNOWN',
help="Build/version info about the Impalad instance results are from.")
parser.add_option("--lab_run_info", dest="lab_run_info", default='UNKNOWN',
help="Information about the lab run (name/id) that published "\
"the results.")
parser.add_option("--tval_threshold", dest="tval_threshold", default=None,
type="float", help="The ttest t-value at which a performance change "\
"will be flagged as sigificant.")
parser.add_option("--min_percent_change_threshold",
dest="min_percent_change_threshold", default=5.0,
type="float", help="Any performance changes below this threshold" \
" will not be classified as significant. If the user specifies an" \
" empty value, the threshold will be set to 0")
parser.add_option("--max_percent_change_threshold",
dest="max_percent_change_threshold", default=20.0,
type="float", help="Any performance changes above this threshold"\
" will be classified as significant. If the user specifies an" \
" empty value, the threshold will be set to the system's maxint")
parser.add_option("--allowed_latency_diff_secs",
dest="allowed_latency_diff_secs", default=0.0, type="float",
help="If specified, only a timing change that differs by more than\
this value will be considered significant.")
# These parameters are specific to recording results in a database. This is optional
parser.add_option("--save_to_db", dest="save_to_db", action="store_true",
default= False, help='Saves results to the specified database.')
parser.add_option("--is_official", dest="is_official", action="store_true",
default= False, help='Indicates this is an official perf run result')
parser.add_option("--db_host", dest="db_host", default='localhost',
help="Machine hosting the database")
parser.add_option("--db_name", dest="db_name", default='perf_results',
help="Name of the perf database.")
parser.add_option("--db_username", dest="db_username", default='hiveuser',
help="Username used to connect to the database.")
parser.add_option("--db_password", dest="db_password", default='password',
help="Password used to connect to the the database.")
options, args = parser.parse_args()
def get_dict_from_json(filename):
"""Given a JSON file, return a nested dictionary.
Everything in this file is based on the nested dictionary data structure. The dictionary
is structured as follows: Top level maps to workload. Each workload maps to queries.
Each query maps to file_format. Each file format is contains a key "result_list" that
maps to a list of QueryResult (look at query.py) dictionaries. The compute stats method
add additional keys such as "avg" or "stddev" here.
Here's how the keys are structred:
To get a workload, the key looks like this:
(('workload_name', 'tpch'), ('scale_factor', '300gb'))
Each workload has a key that looks like this:
(('name', 'TPCH_Q10'))
Each Query has a key like this:
(('file_format', 'text'), ('compression_codec', 'zip'),
('compression_type', 'block'))
This is useful for finding queries in a certain category and computing stats
Args:
filename (str): path to the JSON file
returns:
dict: a nested dictionary with grouped queries
"""
def add_result(query_result):
"""Add query to the dictionary.
Automatically finds the path in the nested dictionary and adds the result to the
appropriate list.
TODO: This method is hard to reason about, so it needs to be made more streamlined.
"""
def get_key(level_num):
"""Build a key for a particular nesting level.
The key is built by extracting the appropriate values from query_result.
"""
level = list()
# In the outer layer, we group by workload name and scale factor
level.append([('query', 'workload_name'), ('query', 'scale_factor')])
# In the middle layer, we group by file format and compression type
level.append([('query', 'test_vector', 'file_format'),
('query', 'test_vector', 'compression_codec'),
('query', 'test_vector', 'compression_type')])
# In the bottom layer, we group by query name
level.append([('query', 'name')])
key = []
def get_nested_val(path):
"""given a path to a variable in query result, extract the value.
For example, to extract compression_type from the query_result, we need to follow
the this path in the nested dictionary:
"query_result" -> "query" -> "test_vector" -> "compression_type"
"""
cur = query_result
for step in path:
cur = cur[step]
return cur
for path in level[level_num]:
key.append((path[-1], get_nested_val(path)))
return tuple(key)
# grouped is the nested dictionary defined in the outer function get_dict_from_json.
# It stores all the results grouped by query name and other parameters.
cur = grouped
# range(3) because there are 3 levels of nesting, as defined in get_key
for level_num in range(3):
cur = cur[get_key(level_num)]
cur[RESULT_LIST].append(query_result)
with open(filename, "r") as f:
data = json.load(f)
grouped = defaultdict( lambda: defaultdict(
lambda: defaultdict(lambda: defaultdict(list))))
for workload_name, workload in data.items():
for query_result in workload:
add_result(query_result)
# Calculate average runtime and stddev for each query type
calculate_time_stats(grouped)
return grouped
def calculate_time_stats(grouped):
"""Adds statistics to the nested dictionary. We are calculating the average runtime
and Standard Deviation for each query type.
"""
for workload_scale, workload in grouped.items():
for file_format, queries in workload.items():
for query_name, results in queries.items():
result_list = results[RESULT_LIST]
avg = calculate_avg(
[query_results[TIME_TAKEN] for query_results in result_list])
dev = calculate_stddev(
[query_results[TIME_TAKEN] for query_results in result_list])
num_clients = max(
int(query_results[CLIENT_NAME]) for query_results in result_list)
iterations = len(result_list)
results[AVG] = avg
results[STDDEV] = dev
results[NUM_CLIENTS] = num_clients
results[ITERATIONS] = iterations
def build_perf_change_str(result, ref_result, is_regression):
"""Build a performance change string"""
perf_change_type = "regression" if is_regression else "improvement"
query = result[RESULT_LIST][0][QUERY]
query_name = query[NAME]
file_format = query[TEST_VECTOR][FILE_FORMAT]
compression_codec = query[TEST_VECTOR][COMPRESSION_CODEC]
compression_type = query[TEST_VECTOR][COMPRESSION_TYPE]
template = ("Significant perf {perf_change_type}: "
"{query_name} [{file_format}/{compression_codec}/{compression_type}] "
"({ref_avg:.3f}s -> {avg:.3f}s)")
return template.format(
perf_change_type = perf_change_type,
query_name = query_name,
file_format = file_format,
compression_codec = compression_codec,
compression_type = compression_type,
ref_avg = ref_result[AVG],
avg = result[AVG])
def prettyprint(val, units, divisor):
""" Print a value in human readable format along with it's unit.
We start at the leftmost unit in the list and keep dividing the value by divisor until
the value is less than divisor. The value is then printed along with the unit type.
Args:
val (int or float): Value to be printed.
units (list of str): Unit names for different sizes.
divisor (float): ratio between two consecutive units.
"""
for unit in units:
if abs(val) < divisor:
if unit == units[0]:
return "%d%s" % (val, unit)
else:
return "%3.2f%s" % (val, unit)
val /= divisor
def prettyprint_bytes(byte_val):
return prettyprint(byte_val, ['B', 'KB', 'MB', 'GB', 'TB'], 1024.0)
def prettyprint_values(unit_val):
return prettyprint(unit_val, ["", "K", "M", "B"], 1000.0)
def prettyprint_time(time_val):
return prettyprint(time_val, ["ns", "us", "ms", "s"], 1000.0)
def prettyprint_percent(percent_val):
return '{0:+.2%}'.format(percent_val)
class CombinedExecSummaries(object):
"""All execution summaries for each query are combined into this object.
The overall average time is calculated for each node by averaging the average time
from each execution summary. The max time time is calculated by getting the max time
of max times.
This object can be compared to another one and ExecSummaryComparison can be generated.
Args:
exec_summaries (list of list of dict): A list of exec summaries (list of dict is how
it is received from the beeswax client.
Attributes:
rows (list of dict): each dict represents a row in the summary table. Each row in rows
is a dictionary. Each dictionary has the following keys:
prefix (str)
operator (str)
num_hosts (int)
num_rows (int)
est_num_rows (int)
detail (str)
avg_time (float): averge of average times in all the execution summaries
stddev_time: standard deviation of times in all the execution summaries
max_time: maximum of max times in all the execution summaries
peak_mem (int)
est_peak_mem (int)
"""
def __init__(self, exec_summaries):
# We want to make sure that all execution summaries have the same structure before
# we can combine them. If not, err_str will contain the reason why we can't combine
# the exec summaries.
ok, err_str = self.__check_exec_summary_schema(exec_summaries)
self.error_str = err_str
self.rows = []
if ok:
self.__build_rows(exec_summaries)
def __build_rows(self, exec_summaries):
first_exec_summary = exec_summaries[0]
for row_num, row in enumerate(first_exec_summary):
combined_row = {}
# Copy fixed values from the first exec summary
for key in [PREFIX, OPERATOR, NUM_HOSTS, NUM_ROWS, EST_NUM_ROWS, DETAIL]:
combined_row[key] = row[key]
avg_times = [exec_summary[row_num][AVG_TIME] for exec_summary in exec_summaries]
max_times = [exec_summary[row_num][MAX_TIME] for exec_summary in exec_summaries]
peak_mems = [exec_summary[row_num][PEAK_MEM] for exec_summary in exec_summaries]
est_peak_mems = [exec_summary[row_num][EST_PEAK_MEM]
for exec_summary in exec_summaries]
# Set the calculated values
combined_row[AVG_TIME] = calculate_avg(avg_times)
combined_row[STDDEV_TIME] = calculate_stddev(avg_times)
combined_row[MAX_TIME] = max(max_times)
combined_row[PEAK_MEM] = max(peak_mems)
combined_row[EST_PEAK_MEM] = max(est_peak_mems)
self.rows.append(combined_row)
def is_same_schema(self, reference):
"""Check if the reference CombinedExecSummaries summary has the same schema as this
one. (For example, the operator names are the same for each node).
The purpose of this is to check if it makes sense to combine this object with a
reference one to produce ExecSummaryComparison.
Args:
reference (CombinedExecSummaries): comparison
Returns:
bool: True if the schama's are similar enough to be compared, False otherwise.
"""
if len(self.rows) != len(reference.rows): return False
for row_num, row in enumerate(self.rows):
ref_row = reference.rows[row_num]
if row[OPERATOR] != ref_row[OPERATOR]:
return False
return True
def __str__(self):
if self.error_str: return self.error_str
table = prettytable.PrettyTable(
["Operator",
"#Hosts",
"Avg Time",
"Std Dev",
"Max Time",
"#Rows",
"Est #Rows"])
table.align = 'l'
for row in self.rows:
table_row = [ row[PREFIX] + row[OPERATOR],
prettyprint_values(row[NUM_HOSTS]),
prettyprint_time(row[AVG_TIME]),
prettyprint_time(row[STDDEV_TIME]),
prettyprint_time(row[MAX_TIME]),
prettyprint_values(row[NUM_ROWS]),
prettyprint_values(row[EST_NUM_ROWS])]
table.add_row(table_row)
return str(table)
@property
def total_runtime(self):
return sum([row[AVG_TIME] for row in self.rows])
def __check_exec_summary_schema(self, exec_summaries):
"""Check if all given exec summaries have the same structure.
This method is called to check if it is possible a single CombinedExecSummaries from
the list of exec_summaries. (For example all exec summaries must have the same
number of nodes.)
This method is somewhat similar to is_same_schema. The difference is that
is_same_schema() checks if two CombinedExecSummaries have the same structure and this
method checks if all exec summaries in the list have the same structure.
Args:
exec_summaries (list of dict): each dict represents an exec_summary
Returns:
(bool, str): True if all exec summaries have the same structure, otherwise False
followed by a string containing the explanation.
"""
err = 'Summaries cannot be combined: '
if len(exec_summaries) < 1:
return False, err + 'no exec summaries Found'
first_exec_summary = exec_summaries[0]
if len(first_exec_summary) < 1:
return False, err + 'exec summary contains no nodes'
for exec_summary in exec_summaries:
if len(exec_summary) != len(first_exec_summary):
return False, err + 'different number of nodes in exec summaries'
for row_num, row in enumerate(exec_summary):
comp_row = first_exec_summary[row_num]
if row[OPERATOR] != comp_row[OPERATOR]:
return False, err + 'different operator'
return True, str()
class ExecSummaryComparison(object):
"""Represents a comparison between two CombinedExecSummaries.
Args:
combined_summary (CombinedExecSummaries): current summary.
ref_combined_summary (CombinedExecSummaries): reference summaries.
Attributes:
rows (list of dict): Each dict represents a single row. Each dict has the following
keys:
prefix (str)
operator (str)
num_hosts (int)
avg_time (float)
stddev_time (float)
avg_time_change (float): % change in avg time compared to reference
avg_time_change_total (float): % change in avg time compared to total of the query
max_time (float)
max_time_change (float): % change in max time compared to reference
peak_mem (int)
peak_mem_change (float): % change compared to reference
num_rows (int)
est_num_rows (int)
est_peak_mem (int)
detail (str)
combined_summary (CombinedExecSummaries): original combined summary
ref_combined_summary (CombinedExecSummaries): original reference combined summary.
If the comparison cannot be constructed, these summaries can be printed.
Another possible way to implement this is to generate this object when we call
CombinedExecSummaries.compare(reference).
"""
def __init__(self, combined_summary, ref_combined_summary):
# Store the original summaries, in case we can't build a comparison
self.combined_summary = combined_summary
self.ref_combined_summary = ref_combined_summary
# If some error happened during calculations, store it here
self.error_str = str()
self.rows = []
self.__build_rows()
def __build_rows(self):
if self.combined_summary.is_same_schema(self.ref_combined_summary):
for i, row in enumerate(self.combined_summary.rows):
ref_row = self.ref_combined_summary.rows[i]
comparison_row = {}
for key in [PREFIX, OPERATOR, NUM_HOSTS, AVG_TIME, STDDEV_TIME,
MAX_TIME, PEAK_MEM, NUM_ROWS, EST_NUM_ROWS, EST_PEAK_MEM, DETAIL]:
comparison_row[key] = row[key]
comparison_row[AVG_TIME_CHANGE] = self.__calculate_change(
row[AVG_TIME], ref_row[AVG_TIME], ref_row[AVG_TIME])
comparison_row[AVG_TIME_CHANGE_TOTAL] = self.__calculate_change(
row[AVG_TIME], ref_row[AVG_TIME], self.ref_combined_summary.total_runtime)
comparison_row[MAX_TIME_CHANGE] = self.__calculate_change(
row[MAX_TIME], ref_row[MAX_TIME], ref_row[MAX_TIME])
comparison_row[PEAK_MEM_CHANGE] = self.__calculate_change(
row[PEAK_MEM], ref_row[PEAK_MEM], ref_row[PEAK_MEM])
self.rows.append(comparison_row)
else:
self.error_str = 'Execution summary structures are different'
def __str__(self):
"""Construct a PrettyTable containing the comparison"""
if self.error_str:
# If the summary comparison could not be constructed, output both summaries
output = self.error_str + '\n'
output += 'Execution Summary: \n'
output += str(self.combined_summary) + '\n'
output += 'Reference Execution Summary: \n'
output += str(self.ref_combined_summary)
return output
table = prettytable.PrettyTable(
["Operator",
"#Hosts",
"Avg Time",
"Std Dev",
"Avg Change",
"Tot Change",
"Max Time",
"Max Change",
"#Rows",
"Est #Rows"])
table.align = 'l'
def is_significant(row):
"""Check if the performance change in this row was significant"""
return options.output_all_summary_nodes or abs(row[AVG_TIME_CHANGE_TOTAL]) > 0.01
for row in self.rows:
if is_significant(row):
table_row = [row[OPERATOR],
prettyprint_values(row[NUM_HOSTS]),
prettyprint_time(row[AVG_TIME]),
prettyprint_time(row[STDDEV_TIME]),
prettyprint_percent(row[AVG_TIME_CHANGE]),
prettyprint_percent(row[AVG_TIME_CHANGE_TOTAL]),
prettyprint_time(row[MAX_TIME]),
prettyprint_percent(row[MAX_TIME_CHANGE]),
prettyprint_values(row[NUM_ROWS]),
prettyprint_values(row[EST_NUM_ROWS]) ]
table.add_row(table_row)
return str(table)
def __calculate_change(self, val, ref_val, compare_val):
"""Calculate how big the change in val compared to ref_val is compared to total"""
if ref_val == 0:
return 0
change = abs(val - ref_val) / compare_val
return change if val > ref_val else -change
def save_runtime_diffs(results, ref_results, change_significant, is_regression):
"""Given results and reference results, generate and output an HTML file
containing the Runtime Profile diff.
"""
diff = difflib.HtmlDiff(wrapcolumn=90, linejunk=difflib.IS_LINE_JUNK)
# We are comparing last queries in each run because they should have the most
# stable performance (unlike the first queries)
runtime_profile = results[RESULT_LIST][-1][RUNTIME_PROFILE]
ref_runtime_profile = ref_results[RESULT_LIST][-1][RUNTIME_PROFILE]
template = ("{prefix}-{query_name}-{scale_factor}-{file_format}-{compression_codec}"
"-{compression_type}")
query = results[RESULT_LIST][-1][QUERY]
# Neutral - no improvement or regression
prefix = 'neu'
if change_significant:
prefix = 'reg' if is_regression else 'imp'
file_name = template.format(
prefix = prefix,
query_name = query[NAME],
scale_factor = query[SCALE_FACTOR],
file_format = query[TEST_VECTOR][FILE_FORMAT],
compression_codec = query[TEST_VECTOR][COMPRESSION_CODEC],
compression_type = query[TEST_VECTOR][COMPRESSION_TYPE])
# Go into results dir
dir_path = os.path.join(os.environ["IMPALA_HOME"], 'results')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
elif not os.path.isdir(dir_path):
raise RuntimeError("Unable to create $IMPALA_HOME/results, results file exists")
runtime_profile_file_name = file_name + "-runtime_profile.html"
runtime_profile_file_path = os.path.join(dir_path, runtime_profile_file_name)
runtime_profile_diff = diff.make_file(
ref_runtime_profile.splitlines(),
runtime_profile.splitlines(),
fromdesc = "Baseline Runtime Profile",
todesc = "Current Runtime Profile")
with open(runtime_profile_file_path, 'w+') as f:
f.write(runtime_profile_diff)
def build_exec_summary_str(results, ref_results):
exec_summaries = [result[EXEC_SUMMARY] for result in results[RESULT_LIST]]
ref_exec_summaries = [result[EXEC_SUMMARY] for result in ref_results[RESULT_LIST]]
if None in exec_summaries or None in ref_exec_summaries:
return 'Unable to construct exec summary comparison\n'
combined_summary = CombinedExecSummaries(exec_summaries)
ref_combined_summary = CombinedExecSummaries(ref_exec_summaries)
comparison = ExecSummaryComparison(combined_summary, ref_combined_summary)
return str(comparison) + '\n'
def build_perf_change_row(result, ref_result, is_regression):
"""Build a performance change table row"""
query = result[RESULT_LIST][0][QUERY]
query_name = query[NAME]
file_format = query[TEST_VECTOR][FILE_FORMAT]
compression_codec = query[TEST_VECTOR][COMPRESSION_CODEC]
compression_type = query[TEST_VECTOR][COMPRESSION_TYPE]
format_str = '{0}/{1}/{2}'.format(file_format, compression_codec, compression_type)
ref_avg = ref_result[AVG]
avg = result[AVG]
return [query_name, format_str, ref_avg, avg]
def compare_time_stats(grouped, ref_grouped):
"""Given two nested dictionaries generated by get_dict_from_json, after running
calculate_time_stats on both, compare the performance of the given run to a reference
run.
A string will be returned with instances where there is a significant performance
difference
"""
regression_table_data = list()
improvement_table_data = list()
full_comparison_str = str()
for workload_scale_key, workload in grouped.items():
for query_name, file_formats in workload.items():
for file_format, results in file_formats.items():
ref_results = ref_grouped[workload_scale_key][query_name][file_format]
change_significant, is_regression = check_perf_change_significance(
results, ref_results)
if change_significant:
full_comparison_str += build_perf_change_str(
results, ref_results, is_regression) + '\n'
full_comparison_str += build_exec_summary_str(results, ref_results) + '\n'
change_row = build_perf_change_row(results, ref_results, is_regression)
if is_regression:
regression_table_data.append(change_row)
else:
improvement_table_data.append(change_row)
try:
save_runtime_diffs(results, ref_results, change_significant, is_regression)
except Exception as e:
print 'Could not generate an html diff: %s' % e
return full_comparison_str, regression_table_data, improvement_table_data
def is_result_group_comparable(grouped, ref_grouped):
"""Given two nested dictionaries generated by get_dict_from_json, return true if they
can be compared. grouped can be compared to ref_grouped if ref_grouped contains all the
queries that are in grouped.
"""
if ref_grouped is None:
return False
for workload_scale_key, workload in grouped.items():
for query_name, file_formats in workload.items():
for file_format, results in file_formats.items():
if file_format not in ref_grouped[workload_scale_key][query_name]:
return False
return True
def check_perf_change_significance(stat, ref_stat):
absolute_difference = abs(ref_stat[AVG] - stat[AVG])
try:
percent_difference = abs(ref_stat[AVG] - stat[AVG]) * 100 / ref_stat[AVG]
except ZeroDivisionError:
percent_difference = 0.0
stddevs_are_zero = (ref_stat[STDDEV] == 0) and (stat[STDDEV] == 0)
if absolute_difference < options.allowed_latency_diff_secs:
return False, False
if percent_difference < options.min_percent_change_threshold:
return False, False
if percent_difference > options.max_percent_change_threshold:
return True, ref_stat[AVG] < stat[AVG]
if options.tval_threshold and not stddevs_are_zero:
tval = calculate_tval(stat[AVG], stat[STDDEV], stat[ITERATIONS],
ref_stat[AVG], ref_stat[STDDEV], ref_stat[ITERATIONS])
return abs(tval) > options.tval_threshold, tval > options.tval_threshold
return False, False
def build_summary_header():
summary = "Execution Summary ({0})\n".format(date.today())
if options.report_description:
summary += 'Run Description: {0}\n'.format(options.report_description)
if options.cluster_name:
summary += '\nCluster Name: {0}\n'.format(options.cluster_name)
if options.build_version:
summary += 'Impala Build Version: {0}\n'.format(options.build_version)
if options.lab_run_info:
summary += 'Lab Run Info: {0}\n'.format(options.lab_run_info)
return summary
def get_summary_str(grouped):
summary_str = str()
for workload_scale, workload in grouped.items():
summary_str += "{0} / {1} \n".format(workload_scale[0][1], workload_scale[1][1])
table = prettytable.PrettyTable(["File Format", "Compression", "Avg (s)"])
table.align = 'l'
table.float_format = '.2'
for file_format, queries in workload.items():
# Calculate The average time for each file format and compression
ff = file_format[0][1]
compression = file_format[1][1] + " / " + file_format[2][1]
avg = calculate_avg([query_results[TIME_TAKEN] for results in queries.values() for
query_results in results[RESULT_LIST]])
table.add_row([ff, compression, avg])
summary_str += str(table) + '\n'
return summary_str
def get_stats_str(grouped):
stats_str = str()
for workload_scale, workload in grouped.items():
stats_str += "Workload / Scale Factor: {0} / {1}\n".format(
workload_scale[0][1], workload_scale[1][1])
table = prettytable.PrettyTable(["Query", "File Format", "Compression", "Avg(s)",
"StdDev(s)", "Rel StdDev", "Num Clients", "Iters"])
table.align = 'l'
table.float_format = '.2'
for file_format, queries in workload.items():
for query_name, results in queries.items():
relative_stddev = results[STDDEV] / results[AVG] if results[AVG] > 0 else 0.0
relative_stddev_str = '{0:.2%}'.format(relative_stddev)
if relative_stddev > 0.1:
relative_stddev_str = '* ' + relative_stddev_str + ' *'
else:
relative_stddev_str = ' ' + relative_stddev_str
table.add_row([query_name[0][1],
file_format[0][1],
file_format[1][1] + ' / ' + file_format[2][1],
results[AVG],
results[STDDEV],
relative_stddev_str,
results[NUM_CLIENTS],
results[ITERATIONS]])
stats_str += str(table) + '\n'
return stats_str
def all_query_results(grouped):
for workload_scale_key, workload in grouped.items():
for query_name, file_formats in workload.items():
for file_format, results in file_formats.items():
yield(results)
def write_results_to_datastore(grouped):
""" Saves results to a database """
from perf_result_datastore import PerfResultDataStore
print 'Saving perf results to database'
current_date = datetime.now()
data_store = PerfResultDataStore(host=options.db_host, username=options.db_username,
password=options.db_password, database_name=options.db_name)
run_info_id = data_store.insert_run_info(options.lab_run_info)
for results in all_query_results(grouped):
first_query_result = results[RESULT_LIST][0]
executor_name = first_query_result[EXECUTOR_NAME]
workload = first_query_result[QUERY][WORKLOAD_NAME]
scale_factor = first_query_result[QUERY][SCALE_FACTOR]
query_name = first_query_result[QUERY][NAME]
query = first_query_result[QUERY][QUERY_STR]
file_format = first_query_result[QUERY][TEST_VECTOR][FILE_FORMAT]
compression_codec = first_query_result[QUERY][TEST_VECTOR][COMPRESSION_CODEC]
compression_type = first_query_result[QUERY][TEST_VECTOR][COMPRESSION_TYPE]
avg_time = results[AVG]
stddev = results[STDDEV]
num_clients = results[NUM_CLIENTS]
num_iterations = results[ITERATIONS]
runtime_profile = first_query_result[RUNTIME_PROFILE]
file_type_id = data_store.get_file_format_id(
file_format, compression_codec, compression_type)
if file_type_id is None:
print 'Skipping unkown file type: %s / %s' % (file_format, compression)
continue
workload_id = data_store.get_workload_id(workload, scale_factor)
if workload_id is None:
workload_id = data_store.insert_workload_info(workload, scale_factor)
query_id = data_store.get_query_id(query_name, query)
if query_id is None:
query_id = data_store.insert_query_info(query_name, query)
data_store.insert_execution_result(
query_id = query_id,
workload_id = workload_id,
file_type_id = file_type_id,
num_clients = num_clients,
cluster_name = options.cluster_name,
executor_name = executor_name,
avg_time = avg_time,
stddev = stddev,
run_date = current_date,
version = options.build_version,
notes = options.report_description,
run_info_id = run_info_id,
num_iterations = num_iterations,
runtime_profile = runtime_profile,
is_official = options.is_official)
def build_perf_summary_table(table_data):
table = prettytable.PrettyTable(
['Query',
'Format',
'Original Time (s)',
'Current Time (s)'])
table.align = 'l'
table.float_format = '.2'
for row in table_data:
table.add_row(row)
return str(table)
if __name__ == "__main__":
"""Workflow:
1. Build a nested dictionary for the current result JSON and reference result JSON.
2. Calculate runtime statistics for each query for both results and reference results.
5. Save performance statistics to the performance database.
3. Construct a string with a an overview of workload runtime and detailed performance
comparison for queries with significant performance change.
"""
# Generate a dictionary based on the JSON file
grouped = get_dict_from_json(options.result_file)
try:
# Generate a dictionary based on the reference JSON file
ref_grouped = get_dict_from_json(options.reference_result_file)
except Exception as e:
# If reference result file could not be read we can still continue. The result can
# be saved to the performance database.
print 'Could not read reference result file: %s' % e
ref_grouped = None
if options.save_to_db: write_results_to_datastore(grouped)
summary_str = get_summary_str(grouped)
stats_str = get_stats_str(grouped)
comparison_str = ("Comparison could not be generated because reference results do "
"not contain all queries\nin results (or reference results are "
"missing)")
regression_table_data = []
improvement_table_data = []
if is_result_group_comparable(grouped, ref_grouped):
comparison_str, regression_table_data, improvement_table_data = compare_time_stats(
grouped, ref_grouped)
regression_table_str = str()
improvement_table_str = str()
if len(regression_table_data) > 0:
regression_table_str += 'Performance Regressions:\n'
regression_table_str += build_perf_summary_table(regression_table_data) + '\n'
if len(improvement_table_data) > 0:
improvement_table_str += 'Performance Improvements:\n'
improvement_table_str += build_perf_summary_table(improvement_table_data) + '\n'
print build_summary_header()
print summary_str
print stats_str
print regression_table_str
print improvement_table_str
print comparison_str
|
gistic/PublicSpatialImpala
|
tests/benchmark/report-benchmark-results.py
|
Python
|
apache-2.0
| 35,471
| 0.013307
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.eager.python import network
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.layers import core
# pylint: disable=not-callable
class MyNetwork(network.Network):
def __init__(self):
super(MyNetwork, self).__init__(name="abcd")
self.l1 = self.add_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.l1(x)
class NetworkTest(test.TestCase):
def testTrainableAttribute(self):
net = network.Network()
self.assertTrue(net.trainable)
with self.assertRaises(AttributeError):
net.trainable = False
self.assertTrue(net.trainable)
def testNetworkCall(self):
net = MyNetwork()
net(constant_op.constant([[2.0]])) # Force variables to be created.
self.assertEqual(1, len(net.trainable_variables))
net.trainable_variables[0].assign([[17.0]])
# TODO(josh11b): Support passing Python values to networks.
result = net(constant_op.constant([[2.0]]))
self.assertEqual(34.0, result.numpy())
def testNetworkAsAGraph(self):
self.skipTest("TODO(ashankar,josh11b): FIX THIS")
# Verify that we're using ResourceVariables
def testNetworkVariablesDoNotInterfere(self):
self.skipTest("TODO: FIX THIS")
net1 = MyNetwork()
net2 = MyNetwork()
one = constant_op.constant([[1.]])
print(type(net1(one)))
net2(one)
net1.trainable_weights[0].assign(constant_op.constant([[1.]]))
net2.trainable_weights[0].assign(constant_op.constant([[2.]]))
print("NET1")
print(net1.name)
print(net1.variables)
print(net1(one))
print("NET2")
print(net2.name)
print(net2.variables)
print(net2(one))
class SequentialTest(test.TestCase):
def testTwoLayers(self):
# Create a sequential network with one layer.
net = network.Sequential([core.Dense(1, use_bias=False)])
# Set that layer's weights so it multiplies by 3
l1 = net.get_layer(index=0)
net(constant_op.constant([[2.0]])) # Create l1's variables
self.assertEqual(1, len(l1.trainable_variables))
l1.trainable_variables[0].assign([[3.0]])
self.assertEqual(21.0, net(constant_op.constant([[7.0]])).numpy())
# Add a second layer to the network.
l2 = core.Dense(1, use_bias=False)
net.add_layer(l2)
# Set the second layer's weights so it multiplies by 11
net(constant_op.constant([[2.0]])) # Create l2's variables
self.assertEqual(1, len(l2.trainable_variables))
l2.trainable_variables[0].assign([[11.0]])
self.assertEqual(231.0, net(constant_op.constant([[7.0]])).numpy())
if __name__ == "__main__":
test.main()
|
mdrumond/tensorflow
|
tensorflow/contrib/eager/python/network_test.py
|
Python
|
apache-2.0
| 3,454
| 0.002606
|
from datetime import timedelta
from requests.auth import HTTPBasicAuth
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.core.cache import cache
from ..settings import (
WFRS_GATEWAY_COMPANY_ID,
WFRS_GATEWAY_ENTITY_ID,
WFRS_GATEWAY_API_HOST,
WFRS_GATEWAY_CONSUMER_KEY,
WFRS_GATEWAY_CONSUMER_SECRET,
WFRS_GATEWAY_CLIENT_CERT_PATH,
WFRS_GATEWAY_PRIV_KEY_PATH,
)
from ..security import encrypt_pickle, decrypt_pickle
import requests
import logging
import uuid
logger = logging.getLogger(__name__)
class BearerTokenAuth(requests.auth.AuthBase):
def __init__(self, api_key):
self.api_key = api_key
def __call__(self, request):
request.headers["Authorization"] = "Bearer %s" % self.api_key
return request
class WFRSAPIKey:
def __init__(self, api_key, expires_on):
self.api_key = api_key
self.expires_on = expires_on
@property
def is_expired(self):
# Force key rotation 10 minutes before it actually expires
expires_on = self.expires_on - timedelta(minutes=10)
now = timezone.now()
return now >= expires_on
@property
def ttl(self):
return int((self.expires_on - timezone.now()).total_seconds())
def __str__(self):
return "<WFRSAPIKey expires_on=[%s]>" % self.expires_on
class WFRSGatewayAPIClient:
company_id = WFRS_GATEWAY_COMPANY_ID
entity_id = WFRS_GATEWAY_ENTITY_ID
api_host = WFRS_GATEWAY_API_HOST
consumer_key = WFRS_GATEWAY_CONSUMER_KEY
consumer_secret = WFRS_GATEWAY_CONSUMER_SECRET
client_cert_path = WFRS_GATEWAY_CLIENT_CERT_PATH
priv_key_path = WFRS_GATEWAY_PRIV_KEY_PATH
scopes = [
"PLCCA-Prequalifications",
"PLCCA-Applications",
"PLCCA-Payment-Calculations",
"PLCCA-Transactions-Authorization",
"PLCCA-Transactions-Charge",
"PLCCA-Transactions-Authorization-Charge",
"PLCCA-Transactions-Return",
"PLCCA-Transactions-Cancel-Authorization",
"PLCCA-Transactions-Void-Return",
"PLCCA-Transactions-Void-Sale",
"PLCCA-Transactions-Timeout-Authorization-Charge",
"PLCCA-Transactions-Timeout-Return",
"PLCCA-Account-Details",
]
cache_version = 1
@property
def cache_key(self):
return "wfrs-gateway-api-key-{api_host}-{consumer_key}".format(
api_host=self.api_host, consumer_key=self.consumer_key
)
def api_get(self, path, **kwargs):
return self.make_api_request("get", path, **kwargs)
def api_post(self, path, **kwargs):
return self.make_api_request("post", path, **kwargs)
def make_api_request(self, method, path, client_request_id=None, **kwargs):
url = "https://{host}{path}".format(host=self.api_host, path=path)
# Setup authentication
auth = BearerTokenAuth(self.get_api_key().api_key)
cert = None
if self.client_cert_path and self.priv_key_path:
cert = (self.client_cert_path, self.priv_key_path)
# Build headers
request_id = (
str(uuid.uuid4()) if client_request_id is None else str(client_request_id)
)
headers = {
"request-id": request_id,
"gateway-company-id": self.company_id,
"gateway-entity-id": self.entity_id,
}
if client_request_id is not None:
headers["client-request-id"] = str(client_request_id)
# Send request
logger.info(
"Sending WFRS Gateway API request. URL=[%s], RequestID=[%s]",
url,
request_id,
)
request_fn = getattr(requests, method)
resp = request_fn(url, auth=auth, cert=cert, headers=headers, **kwargs)
logger.info(
"WFRS Gateway API request returned. URL=[%s], RequestID=[%s], Status=[%s]",
url,
request_id,
resp.status_code,
)
# Check response for errors
if resp.status_code == 400:
resp_data = resp.json()
errors = []
for err in resp_data.get("errors", []):
exc = ValidationError(err["description"], code=err["error_code"])
errors.append(exc)
raise ValidationError(errors)
# Return response
return resp
def get_api_key(self):
# Check for a cached key
key_obj = self.get_cached_api_key()
if key_obj is None:
key_obj = self.generate_api_key()
self.store_cached_api_key(key_obj)
return key_obj
def get_cached_api_key(self):
# Try to get an API key from cache
encrypted_obj = cache.get(self.cache_key, version=self.cache_version)
if encrypted_obj is None:
return None
# Try to decrypt the object we got from cache
try:
key_obj = decrypt_pickle(encrypted_obj)
except Exception as e:
logger.exception(e)
return None
# Check if the key is expired
if key_obj.is_expired:
return None
# Return the key
return key_obj
def store_cached_api_key(self, key_obj):
# Pickle and encrypt the key object
encrypted_obj = encrypt_pickle(key_obj)
# Store it in Django's cache for later
cache.set(
self.cache_key, encrypted_obj, key_obj.ttl, version=self.cache_version
)
def generate_api_key(self):
url = "https://{host}/token".format(host=self.api_host)
auth = HTTPBasicAuth(self.consumer_key, self.consumer_secret)
cert = (self.client_cert_path, self.priv_key_path)
req_data = {
"grant_type": "client_credentials",
"scope": " ".join(self.scopes),
}
resp = requests.post(url, auth=auth, cert=cert, data=req_data)
resp.raise_for_status()
resp_data = resp.json()
expires_on = timezone.now() + timedelta(seconds=resp_data["expires_in"])
logger.info("Generated new WFRS API Key. ExpiresIn=[%s]", expires_on)
key_obj = WFRSAPIKey(api_key=resp_data["access_token"], expires_on=expires_on)
return key_obj
|
thelabnyc/django-oscar-wfrs
|
src/wellsfargo/connector/client.py
|
Python
|
isc
| 6,228
| 0.000963
|
import pandas as pd
from pandas import DataFrame
from matplotlib import pyplot as plt
from matplotlib import style
style.use('ggplot')
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
#print df.head()
df['STD'] = pd.rolling_std(df['Close'], 25, min_periods=1)
ax1 = plt.subplot(2, 1, 1)
df['Close'].plot()
plt.ylabel('Close')
# do not do sharex first
ax2 = plt.subplot(2, 1, 2, sharex = ax1)
df['STD'].plot()
plt.ylabel('Standard Deviation')
plt.show()
|
PythonProgramming/Pandas-Basics-with-2.7
|
pandas 8 - Standard Deviation.py
|
Python
|
mit
| 486
| 0.010288
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingPlugin.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing import interface
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import shutil
import inspect
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from processing.commander.CommanderWindow import CommanderWindow
from processing.core.Processing import Processing
from processing.tools import dataobjects
from processing.tools.system import *
from processing.gui.ProcessingToolbox import ProcessingToolbox
from processing.gui.HistoryDialog import HistoryDialog
from processing.gui.ConfigDialog import ConfigDialog
from processing.gui.ResultsDialog import ResultsDialog
from processing.modeler.ModelerDialog import ModelerDialog
import processing.resources_rc
cmd_folder = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
class ProcessingPlugin:
def __init__(self, iface):
interface.iface = iface
Processing.initialize()
def initGui(self):
self.commander = None
self.toolbox = ProcessingToolbox()
interface.iface.addDockWidget(Qt.RightDockWidgetArea, self.toolbox)
self.toolbox.hide()
Processing.addAlgListListener(self.toolbox)
self.menu = QMenu(interface.iface.mainWindow())
self.menu.setTitle(QCoreApplication.translate("Processing", "Processing"))
self.toolboxAction = self.toolbox.toggleViewAction()
self.toolboxAction.setIcon(QIcon(":/processing/images/alg.png"))
self.toolboxAction.setText(QCoreApplication.translate("Processing", "Toolbox"))
self.menu.addAction(self.toolboxAction)
self.modelerAction = QAction(QIcon(":/processing/images/model.png"),
QCoreApplication.translate("Processing", "Graphical modeler"),
interface.iface.mainWindow())
self.modelerAction.triggered.connect(self.openModeler)
self.menu.addAction(self.modelerAction)
self.historyAction = QAction(QIcon(":/processing/images/history.gif"),
QCoreApplication.translate("Processing", "History and log"),
interface.iface.mainWindow())
self.historyAction.triggered.connect(self.openHistory)
self.menu.addAction(self.historyAction)
self.configAction = QAction(QIcon(":/processing/images/config.png"),
QCoreApplication.translate("Processing", "Options and configuration"),
interface.iface.mainWindow())
self.configAction.triggered.connect(self.openConfig)
self.menu.addAction(self.configAction)
self.resultsAction = QAction(QIcon(":/processing/images/results.png"),
QCoreApplication.translate("Processing", "&Results viewer"),
interface.iface.mainWindow())
self.resultsAction.triggered.connect(self.openResults)
self.menu.addAction(self.resultsAction)
menuBar = interface.iface.mainWindow().menuBar()
menuBar.insertMenu(interface.iface.firstRightStandardMenu().menuAction(), self.menu)
self.commanderAction = QAction(QIcon(":/processing/images/commander.png"),
QCoreApplication.translate("Processing", "&Commander"),
interface.iface.mainWindow())
self.commanderAction.triggered.connect(self.openCommander)
self.menu.addAction(self.commanderAction)
interface.iface.registerMainWindowAction(self.commanderAction, "Ctrl+Alt+M")
def unload(self):
self.toolbox.setVisible(False)
self.menu.deleteLater()
#delete temporary output files
folder = tempFolder()
if QDir(folder).exists():
shutil.rmtree(folder, True)
interface.iface.unregisterMainWindowAction(self.commanderAction)
def openCommander(self):
if self.commander is None:
self.commander = CommanderWindow(interface.iface.mainWindow(), interface.iface.mapCanvas())
Processing.addAlgListListener(self.commander)
self.commander.prepareGui()
self.commander.show()
#dlg.exec_()
def openToolbox(self):
if self.toolbox.isVisible():
self.toolbox.hide()
else:
self.toolbox.show()
def openModeler(self):
dlg = ModelerDialog()
dlg.exec_()
if dlg.update:
self.toolbox.updateTree()
def openResults(self):
dlg = ResultsDialog()
dlg.exec_()
def openHistory(self):
dlg = HistoryDialog()
dlg.exec_()
def openConfig(self):
dlg = ConfigDialog(self.toolbox)
dlg.exec_()
|
camptocamp/QGIS
|
python/plugins/processing/ProcessingPlugin.py
|
Python
|
gpl-2.0
| 5,606
| 0.004281
|
from django.contrib import admin
import models
# Register your models here.
admin.site.register(models.UserProfile)
admin.site.register(models.Event)
|
zhaogaolong/oneFinger
|
alarm/admin.py
|
Python
|
apache-2.0
| 150
| 0
|
#!/usr/bin/python
import sys
import pyxbackup as pxb
import pytest
def test__parse_port_param():
assert(pxb._parse_port_param('27017,27019')) == True
assert(pxb.xb_opt_remote_nc_port_min) == 27017
assert(pxb.xb_opt_remote_nc_port_max) == 27019
assert(pxb._parse_port_param('27017, 27019')) == True
assert(pxb._parse_port_param('abcde, 27019')) == False
assert(pxb._parse_port_param('abcde, ')) == False
assert(pxb._parse_port_param('9999, ')) == False
assert(pxb._parse_port_param('9999 ')) == False
assert(pxb._parse_port_param('9999')) == True
assert(pxb.xb_opt_remote_nc_port_min) == 9999
assert(pxb.xb_opt_remote_nc_port_max) == 9999
def test__xb_version():
assert(pxb._xb_version(verstr = '2.2.13')) == [2, 2, 13]
assert(pxb._xb_version(verstr = '2.2.13', tof = True)) == 2.2
|
dotmanila/pyxbackup
|
tests/all_test.py
|
Python
|
gpl-2.0
| 835
| 0.019162
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import pkg_resources
from pifpaf import drivers
class CephDriver(drivers.Driver):
DEFAULT_PORT = 6790
def __init__(self, port=DEFAULT_PORT,
**kwargs):
"""Create a new Ceph cluster."""
super(CephDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for Ceph Monitor"},
]
def _setUp(self):
super(CephDriver, self)._setUp()
self._ensure_xattr_support()
fsid = str(uuid.uuid4())
conffile = os.path.join(self.tempdir, "ceph.conf")
mondir = os.path.join(self.tempdir, "mon", "ceph-a")
osddir = os.path.join(self.tempdir, "osd", "ceph-0")
os.makedirs(mondir)
os.makedirs(osddir)
_, version = self._exec(["ceph", "--version"], stdout=True)
version = version.decode("ascii").split()[2]
version = pkg_resources.parse_version(version)
if version < pkg_resources.parse_version("12.0.0"):
extra = """
mon_osd_nearfull_ratio = 1
mon_osd_full_ratio = 1
osd_failsafe_nearfull_ratio = 1
osd_failsafe_full_ratio = 1
"""
else:
extra = """
mon_allow_pool_delete = true
"""
# FIXME(sileht): check availible space on /dev/shm
# if os.path.exists("/dev/shm") and os.access('/dev/shm', os.W_OK):
# journal_path = "/dev/shm/$cluster-$id-journal"
# else:
journal_path = "%s/osd/$cluster-$id/journal" % self.tempdir
with open(conffile, "w") as f:
f.write("""[global]
fsid = %(fsid)s
# no auth for now
auth cluster required = none
auth service required = none
auth client required = none
## no replica
osd pool default size = 1
osd pool default min size = 1
osd crush chooseleaf type = 0
## some default path change
run dir = %(tempdir)s
pid file = %(tempdir)s/$type.$id.pid
admin socket = %(tempdir)s/$cluster-$name.asok
mon data = %(tempdir)s/mon/$cluster-$id
osd data = %(tempdir)s/osd/$cluster-$id
osd journal = %(journal_path)s
log file = %(tempdir)s/$cluster-$name.log
mon cluster log file = %(tempdir)s/$cluster.log
# Only omap to have same behavior for all filesystems
filestore xattr use omap = True
# workaround for ext4 and last Jewel version
osd max object name len = 256
osd max object namespace len = 64
osd op threads = 10
filestore max sync interval = 10001
filestore min sync interval = 10000
%(extra)s
journal_aio = false
journal_dio = false
journal zero on create = false
journal block align = false
# run as file owner
setuser match path = %(tempdir)s/$type/$cluster-$id
[mon.a]
host = localhost
mon addr = 127.0.0.1:%(port)d
""" % dict(fsid=fsid, tempdir=self.tempdir, port=self.port,
journal_path=journal_path, extra=extra)) # noqa
ceph_opts = ["ceph", "-c", conffile]
mon_opts = ["ceph-mon", "-c", conffile, "--id", "a", "-d"]
osd_opts = ["ceph-osd", "-c", conffile, "--id", "0", "-d",
"-m", "127.0.0.1:%d" % self.port]
# Create and start monitor
self._exec(mon_opts + ["--mkfs"])
self._touch(os.path.join(mondir, "done"))
mon, _ = self._exec(
mon_opts,
wait_for_line=r"mon.a@0\(leader\).mds e1 print_map")
# Create and start OSD
self._exec(ceph_opts + ["osd", "create"])
self._exec(ceph_opts + ["osd", "crush", "add", "osd.0", "1",
"root=default"])
self._exec(osd_opts + ["--mkfs", "--mkjournal"])
if version < pkg_resources.parse_version("0.94.0"):
wait_for_line = "journal close"
else:
wait_for_line = "done with init"
osd, _ = self._exec(osd_opts, wait_for_line=wait_for_line)
if version >= pkg_resources.parse_version("12.0.0"):
self._exec(ceph_opts + ["osd", "set-full-ratio", "0.95"])
self._exec(ceph_opts + ["osd", "set-backfillfull-ratio", "0.95"])
self._exec(ceph_opts + ["osd", "set-nearfull-ratio", "0.95"])
# Wait it's ready
out = b""
while b"HEALTH_OK" not in out:
ceph, out = self._exec(ceph_opts + ["health"], stdout=True)
if b"HEALTH_ERR" in out:
raise RuntimeError("Fail to deploy ceph")
self.putenv("CEPH_CONF", conffile, True)
self.putenv("CEPH_CONF", conffile)
self.putenv("URL", "ceph://localhost:%d" % self.port)
|
sileht/pifpaf
|
pifpaf/drivers/ceph.py
|
Python
|
apache-2.0
| 5,134
| 0
|
#!/usr/bin/env python3
# ReText
# Copyright 2011-2012 Dmitry Shachnev
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import sys
import signal
from ReText import *
from ReText.window import ReTextWindow
def main():
app = QApplication(sys.argv)
app.setOrganizationName("ReText project")
app.setApplicationName("ReText")
RtTranslator = QTranslator()
for path in datadirs:
if RtTranslator.load('retext_'+QLocale.system().name(), path+'/locale'):
break
QtTranslator = QTranslator()
QtTranslator.load("qt_"+QLocale.system().name(), QLibraryInfo.location(QLibraryInfo.TranslationsPath))
app.installTranslator(RtTranslator)
app.installTranslator(QtTranslator)
if settings.contains('appStyleSheet'):
stylename = readFromSettings('appStyleSheet', str)
sheetfile = QFile(stylename)
sheetfile.open(QIODevice.ReadOnly)
app.setStyleSheet(QTextStream(sheetfile).readAll())
sheetfile.close()
window = ReTextWindow()
window.show()
fileNames = [QFileInfo(arg).canonicalFilePath() for arg in sys.argv[1:]]
for fileName in fileNames:
try:
fileName = QString.fromUtf8(fileName)
except:
# Not needed for Python 3
pass
if QFile.exists(fileName):
window.openFileWrapper(fileName)
signal.signal(signal.SIGINT, lambda sig, frame: window.close())
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
codemedic/retext
|
retext.py
|
Python
|
gpl-3.0
| 1,988
| 0.017606
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.build.construct Contains functions to construct ski files from model definitions.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.log import log
from ...core.filter.filter import parse_filter
from ...core.tools.introspection import skirt_main_version, has_skirt
from ...core.tools.stringify import tostr
from ...core.filter.filter import Filter
from ...core.tools import types
# -----------------------------------------------------------------
# Check SKIRT version
if not has_skirt(): version_number = 8
else: version_number = skirt_main_version()
# Set flags
if version_number == 7:
skirt7 = True
skirt8 = False
elif version_number == 8:
skirt7 = False
skirt8 = True
else: raise RuntimeError("Invalid SKIRT version number")
# -----------------------------------------------------------------
def add_stellar_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adding stellar component '" + name + "' to the ski file ...")
# THIS HAS TO COME FIRST!!
# If an input map is required
if "map_path" in component: filename = set_stellar_input_map(name, component)
else: filename = None
# NEW COMPONENT OR ADJUST EXISTING
if title is not None and not ski.has_stellar_component(title): add_new_stellar_component(ski, name, component, title=title)
else: adjust_stellar_component(ski, name, component, title=title)
# Return the input filename
return filename
# -----------------------------------------------------------------
def add_new_stellar_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adding new stellar component '" + name + "' to the ski file ...")
# From properties
if component.properties is not None:
# Check title
if title is None: log.warning("Title for the component '" + name + "' is not given")
# Add component
ski.add_stellar_component(component.properties, title=title)
return
# Initialize properties
geometry = None
geometry_type = None
geometry_properties = None
sed_type = None
sed_properties = None
normalization_type = None
normalization_properties = None
luminosities = [1]
sed_template = None
age = None
metallicity = None
compactness = None
pressure = None
covering_factor = None
luminosity = None
filter_or_wavelength = None
# Set properties of the component
if "model" in component: geometry = component.model
elif "deprojection" in component: geometry = component.deprojection
# Parameters are defined
if component.parameters is not None:
# Check if this is a new component (geometry not defined above): add geometry, SED and normalization all at once
if "geometry" in component.parameters:
# Get class names
geometry_type = component.parameters.geometry
sed_type = component.parameters.sed
normalization_type = component.parameters.normalization
# Get properties for each of the three classes
geometry_properties = component.properties["geometry"]
sed_properties = component.properties["sed"]
normalization_properties = component.properties["normalization"]
# Component with MAPPINGS template (geometry defined above)
elif "sfr" in component.parameters: #set_stellar_component_mappings(ski, component)
# Set template for MAPPINGS
sed_template = "Mappings"
# Get SED properties
metallicity = component.parameters.metallicity
compactness = component.parameters.compactness
pressure = component.parameters.pressure
covering_factor = component.parameters.covering_factor
# Get normalization
fltr = parse_filter(component.parameters.filter)
luminosity = component.parameters.luminosity
# Determine the normalization wavelength
filter_or_wavelength = fltr.center
# Existing component, no MAPPINGS
else: # set_stellar_component(ski, component)
# Get SED properties
sed_template = component.parameters.template
age = component.parameters.age
metallicity = component.parameters.metallicity
# Get normalization
luminosity = component.parameters.luminosity
# Determine the normalization wavelength
if "wavelength" in component.parameters: wavelength = component.parameters.wavelength
elif "filter" in component.parameters:
fltr = parse_filter(component.parameters.filter)
wavelength = fltr.wavelength
else: raise ValueError("Neither wavelength nor filter is defined in the component parameters")
# Set the normalization to the wavelength
filter_or_wavelength = wavelength
# Check whether title is defined
if title is None: log.warning("Title for the component '" + name + "' is not defined")
# Set normalization type
if normalization_type is None:
if filter_or_wavelength is None: raise ValueError("Cannot determine normalization type")
if isinstance(filter_or_wavelength, Filter): normalization_type = "LuminosityStellarCompNormalization"
elif types.is_length_quantity(filter_or_wavelength): normalization_type = "SpectralLuminosityStellarCompNormalization"
else: normalization_type = "BolLuminosityStellarCompNormalization" #raise ValueError("Unrecognized filter of wavelength of type '" + str(type(filter_or_wavelength)))
# Set stellar component properties
properties = dict()
properties["geometry"] = geometry
properties["geometry_type"] = geometry_type
properties["geometry_properties"] = geometry_properties
properties["sed_type"] = sed_type
properties["sed_properties"] = sed_properties
properties["normalization_type"] = normalization_type
properties["normalization_properties"] = normalization_properties
properties["luminosities"] = luminosities
properties["sed_template"] = sed_template
properties["age"] = age
properties["metallicity"] = metallicity
properties["compactness"] = compactness
properties["pressure"] = pressure
properties["covering_factor"] = covering_factor
properties["luminosity"] = luminosity
properties["filter_or_wavelength"] = filter_or_wavelength
# Show properties
log.debug("")
log.debug("Stellar component properties:")
log.debug("")
for label in properties:
if label == "geometry":
log.debug(" - geometry:")
for parameter in properties[label]:
value = properties[label][parameter]
if value is None: continue
log.debug(" * " + parameter + ": " + tostr(value))
else:
value = properties[label]
if value is None: continue
log.debug(" - " + label + ": " + tostr(value))
log.debug("")
# Create new component
ski.create_new_stellar_component(title, **properties)
# -----------------------------------------------------------------
def adjust_stellar_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adjusting existing stellar component '" + name + "' in the ski file ...")
# Set geometry
if "model" in component: set_stellar_component_model(ski, component)
# Set deprojection
elif "deprojection" in component: set_stellar_component_deprojection(ski, component)
# From parameters
if component.parameters is not None:
# Check if this is a new component, add geometry, SED and normalization all at once
if "geometry" in component.parameters: set_stellar_component_geometry_sed_and_normalization(ski, component)
# Existing component, with MAPPINGS template
elif "sfr" in component.parameters: set_stellar_component_mappings(ski, component)
# Existing component, no MAPPINGS
else: set_stellar_component(ski, component)
# From properties
if component.properties is not None:
# Check if title is given
if title is None: log.warning("Title for the component '" + name + "' is not specified")
# Add component
ski.add_stellar_component(component.properties, title=title)
# -----------------------------------------------------------------
def set_stellar_input_map(name, component):
"""
This function ...
:param name:
:param component:
:return:
"""
# Generate a filename for the map
map_filename = "stars_" + name + ".fits"
# Set the filename
if "deprojection" in component: component.deprojection.filename = map_filename
#elif "geometry" in component.parameters: component.properties["geometry"].filename = filename # HOW IT WAS
elif component.parameters is not None and "geometry" in component.parameters: component.parameters["geometry"].filename = map_filename
elif component.properties is not None: component.properties["children"]["geometry"]["children"]["ReadFitsGeometry"]["filename"] = map_filename
else: raise RuntimeError("Stellar component based on an input map should either have a deprojection or geometry properties")
# Add entry to the input maps dictionary
#self.input_map_paths[filename] = component.map_path
return map_filename
# -----------------------------------------------------------------
def set_stellar_component_model(ski, component):
"""
This function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Set the geometry
ski.set_stellar_component_geometry(title, component.model)
# -----------------------------------------------------------------
def set_stellar_component_deprojection(ski, component):
"""
THis function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Set the deprojection geometry
ski.set_stellar_component_geometry(title, component.deprojection)
# -----------------------------------------------------------------
def set_stellar_component_geometry_sed_and_normalization(ski, component):
"""
This function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Get class names
geometry_type = component.parameters.geometry
sed_type = component.parameters.sed
normalization_type = component.parameters.normalization
# Get properties for each of the three classes
geometry_properties = component.properties["geometry"]
sed_properties = component.properties["sed"]
normalization_properties = component.properties["normalization"]
# Create stellar component
ski.create_new_stellar_component(title, geometry_type, geometry_properties, sed_type, sed_properties,
normalization_type, normalization_properties)
# -----------------------------------------------------------------
def set_stellar_component_mappings(ski, component):
"""
THis function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Get SED properties
metallicity = component.parameters.metallicity
compactness = component.parameters.compactness
pressure = component.parameters.pressure
covering_factor = component.parameters.covering_factor
# Get normalization
fltr = parse_filter(component.parameters.filter)
luminosity = component.parameters.luminosity
# Determine the normalization wavelength
wavelength = fltr.center
# Set SED
ski.set_stellar_component_mappingssed(title, metallicity, compactness, pressure, covering_factor) # SED
# Set center wavelength of the filter as normalization wavelength (keeps label)
ski.set_stellar_component_normalization_wavelength(title, wavelength)
# Set spectral luminosity at that wavelength (keeps label)
ski.set_stellar_component_luminosity(title, luminosity, filter_or_wavelength=wavelength)
# Scale height doesn't need to be set as parameter, this is already in the deprojection model
# -----------------------------------------------------------------
def set_stellar_component(ski, component):
"""
This function ...
:return:
:param ski:
:param component:
"""
# Get title
title = component.parameters.title
# Get SED properties
template = component.parameters.template
age = component.parameters.age
metallicity = component.parameters.metallicity
# Get normalization
fltr = parse_filter(component.parameters.filter)
luminosity = component.parameters.luminosity
# Determine the normalization wavelength
wavelength = fltr.center
# Set SED
ski.set_stellar_component_sed(title, template, age, metallicity)
# Set center wavelength of the filter as normalization wavelength (keeps label)
ski.set_stellar_component_normalization_wavelength(title, wavelength)
# Set spectral luminosity at that wavelength (keeps label)
ski.set_stellar_component_luminosity(title, luminosity, filter_or_wavelength=wavelength)
# Scale height doesn't need to be set as parameter, this is already in the deprojection model
# -----------------------------------------------------------------
def add_dust_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adding dust component '" + name + "' to the ski file ...")
# THIS HAS TO COME FIRST!!
# If an input map is required
if "map_path" in component: filename = set_dust_input_map(name, component)
else: filename = None
# NEW COMPONENT OR ADJUST EXISTING
if title is not None and not ski.has_dust_component(title): add_new_dust_component(ski, name, component, title=title)
else: adjust_dust_component(ski, name, component, title=title)
# Return the map filename
return filename
# -----------------------------------------------------------------
def add_new_dust_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adding new dust component '" + name + "' to the ski file ...")
# From properties
if component.properties is not None:
# Check if title is given
if title is None: log.warning("Title of the component '" + name + "' is not given")
# Add component
ski.add_dust_component(component.properties, title=title)
return
# Initialize properties
geometry = None
geometry_type = None
geometry_properties = None
mix_type = None
mix_properties = None
normalization_type = None
normalization_properties = None
mix = None
mass = None
# For THEMIS mix
hydrocarbon_pops = None
silicate_pops = None
# For Zubko mix
graphite_populations = None
silicate_populations = None
pah_populations = None
# Set properties of the component
if "model" in component: geometry = component.model
elif "deprojection" in component: geometry = component.deprojection
# Parameters are defined
if component.parameters is not None:
# Check title
if title is not None and component.parameters.title != title: raise ValueError("The title of the component '" + title + "' doesn't match that defined in the component parameters")
# Check if this is a new component (geometry not defined above): add geometry, mix and normalization
if "geometry" in component.parameters:
# Get class names
geometry_type = component.parameters.geometry
mix_type = component.parameters.sed
normalization_type = component.parameters.normalization
# Get properties for each of the three classes
geometry_properties = component.properties["geometry"]
mix_properties = component.properties["mix"]
normalization_properties = component.properties["normalization"]
# Existing component (geometry defined above), THEMIS dust mix
elif "hydrocarbon_pops" in component.parameters: #set_dust_component_themis_mix(ski, component)
# Set mix name
mix = "themis"
# Get parameters
mass = component.parameters.mass
hydrocarbon_pops = component.parameters.hydrocarbon_pops
silicate_pops = component.parameters.silicate_pops
# Existing component (geometry defined above), Zubko dust mix
elif "graphite_populations" in component.parameters:
# Set mix name
mix = "zubko"
# Get parameters
mass = component.parameters.mass
graphite_populations = component.parameters.graphite_populations
silicate_populations = component.parameters.silicate_populations
pah_populations = component.parameters.pah_populations
# Existing component, not THEMIS dust mix
else: raise NotImplementedError("Only THEMIS dust mixes are implemented at this moment")
# Check whether the title is defined
if title is None: log.warning("The title for the '" + name + "' dust component is not specified")
# Set dust component properties
properties = dict()
properties["geometry"] = geometry
properties["geometry_type"] = geometry_type
properties["geometry_properties"] = geometry_properties
properties["mix_type"] = mix_type
properties["mix_properties"] = mix_properties
properties["normalization_type"] = normalization_type
properties["normalization_properties"] = normalization_properties
properties["mix"] = mix
properties["mass"] = mass
properties["hydrocarbon_pops"] = hydrocarbon_pops
properties["silicate_pops"] = silicate_pops
properties["graphite_populations"] = graphite_populations
properties["silicate_populations"] = silicate_populations
properties["pah_populations"] = pah_populations
# Show properties
log.debug("")
log.debug("Dust component properties:")
log.debug("")
for label in properties:
if label == "geometry":
log.debug(" - geometry:")
for parameter in properties[label]:
value = properties[label][parameter]
if value is None: continue
log.debug(" * " + parameter + ": " + tostr(value))
else:
value = properties[label]
if value is None: continue
log.debug(" - " + label + ": " + tostr(properties[label]))
log.debug("")
# Add the new component
ski.create_new_dust_component(title, **properties)
# -----------------------------------------------------------------
def adjust_dust_component(ski, name, component, title=None):
"""
Thisf unction ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adjusting existing dust component '" + name + "' in the ski file ...")
# Set geometry
if "model" in component: set_dust_component_model(ski, component)
# Set deprojection
elif "deprojection" in component: set_dust_component_deprojection(ski, component)
# From parameters
if component.parameters is not None:
# Check title
if title is not None and component.parameters.title != title: raise ValueError("The title of the component '" + title + "' doesn't match that defined in the component parameters")
# Check if this is a new dust component, add geometry, mix and normalization all at once
if "geometry" in component.parameters: set_dust_component_geometry_mix_and_normalization(ski, component)
# Existing component, THEMIS dust mix
elif "hydrocarbon_pops" in component.parameters: set_dust_component_themis_mix(ski, component)
# Existing component, not THEMIS dust mix
else: raise NotImplementedError("Only THEMIS dust mixes are implemented at this moment")
# TODO: implement 'Existing component, no THEMIX'
#else: set_dust_component(ski, component)
# From properties
if component.properties is not None:
# From unicode
#if isinstance(name, unicode): name = name.encode('ascii','ignore')
#print(name)
# Create element
#element = ski.create_element(element_name, component.properties)
#print(element)
# Check if title is given
if title is None: log.warning("Title for the component '" + name + "' is not given")
ski.add_dust_component(component.properties, title=title)
# -----------------------------------------------------------------
def set_dust_input_map(name, component):
"""
This function ...
:param name:
:param component:
:return:
"""
# Generate a filename for the map
map_filename = "dust_" + name + ".fits"
# Set the filename
if "deprojection" in component: component.deprojection.filename = map_filename
#elif "geometry" in component.parameters: component.properties["geometry"].filename = map_filename # HOW IT WAS
elif component.parameters is not None and "geometry" in component.parameters: component.parameters["geometry"].filename = map_filename
elif component.properties is not None: component.properties["children"]["geometry"]["children"]["ReadFitsGeometry"]["filename"] = map_filename
else: raise RuntimeError("Dust component based on an input map should either have a deprojection or geometry properties")
# Add entry to the input maps dictionary
#self.input_map_paths[filename] = component.map_path
return map_filename
# -----------------------------------------------------------------
def set_dust_component_model(ski, component):
"""
This function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Set the geometry
ski.set_dust_component_geometry(title, component.model)
# -----------------------------------------------------------------
def set_dust_component_deprojection(ski, component):
"""
This function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Set the deprojection geometry
ski.set_dust_component_geometry(title, component.deprojection)
# -----------------------------------------------------------------
def set_dust_component_geometry_mix_and_normalization(ski, component):
"""
This function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Get class names
geometry_type = component.parameters.geometry
mix_type = component.parameters.sed
normalization_type = component.parameters.normalization
# Get properties for each of the three classes
geometry_properties = component.properties["geometry"]
mix_properties = component.properties["mix"]
normalization_properties = component.properties["normalization"]
# Create stellar component
ski.create_new_dust_component(title, geometry_type, geometry_properties, mix_type, mix_properties, normalization_type, normalization_properties)
# -----------------------------------------------------------------
def set_dust_component_themis_mix(ski, component):
"""
This function ...
:param ski:
:param component:
:return:
"""
# Get title
title = component.parameters.title
# Get parameters
mass = component.parameters.mass
hydrocarbon_pops = component.parameters.hydrocarbon_pops
silicate_pops = component.parameters.silicate_pops
# Set the dust mix
ski.set_dust_component_themis_mix(title, hydrocarbon_pops, silicate_pops) # dust mix
# Set the dust mass (keeps label)
ski.set_dust_component_mass(title, mass)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/build/construct.py
|
Python
|
agpl-3.0
| 25,266
| 0.00661
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
import logging
from modules.oauthlib import common
from modules.oauthlib.uri_validate import is_absolute_uri
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class AuthorizationCodeGrant(GrantTypeBase):
"""`Authorization Code Grant`_
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI ---->| |
| User- | | Authorization |
| Agent -+----(B)-- User authenticates --->| Server |
| | | |
| -+----(C)-- Authorization Code ---<| |
+-|----|---+ +---------------+
| | ^ v
(A) (C) | |
| | | |
^ v | |
+---------+ | |
| |>---(D)-- Authorization Code ---------' |
| Client | & Redirection URI |
| | |
| |<---(E)----- Access Token -------------------'
+---------+ (w/ Optional Refresh Token)
Note: The lines illustrating steps (A), (B), and (C) are broken into
two parts as they pass through the user-agent.
Figure 3: Authorization Code Flow
The flow illustrated in Figure 3 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier (in the request or during
client registration). The redirection URI includes an
authorization code and any local state provided by the client
earlier.
(D) The client requests an access token from the authorization
server's token endpoint by including the authorization code
received in the previous step. When making the request, the
client authenticates with the authorization server. The client
includes the redirection URI used to obtain the authorization
code for verification.
(E) The authorization server authenticates the client, validates the
authorization code, and ensures that the redirection URI
received matches the URI used to redirect the client in
step (C). If valid, the authorization server responds back with
an access token and, optionally, a refresh token.
.. _`Authorization Code Grant`: http://tools.ietf.org/html/rfc6749#section-4.1
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_authorization_code(self, request):
"""Generates an authorization grant represented as a dictionary."""
grant = {'code': common.generate_token()}
if hasattr(request, 'state') and request.state:
grant['state'] = request.state
log.debug('Created authorization code grant %r for request %r.',
grant, request)
return grant
def create_authorization_response(self, request, token_handler):
"""
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "code".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The client directs the resource owner to the constructed URI using an
HTTP redirection response, or by other means available to it via the
user-agent.
:param request: oauthlib.commong.Request
:param token_handler: A token handler instace, for example of type
oauthlib.oauth2.BearerToken.
:returns: headers, body, status
:raises: FatalClientError on invalid redirect URI or client id.
ValueError if scopes are not set on the request object.
A few examples::
>>> from your_validator import your_validator
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F')
>>> from oauthlib.common import Request
>>> from oauthlib.oauth2 import AuthorizationCodeGrant, BearerToken
>>> token = BearerToken(your_validator)
>>> grant = AuthorizationCodeGrant(your_validator)
>>> grant.create_authorization_response(request, token)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/grant_types.py", line 513, in create_authorization_response
raise ValueError('Scopes must be set on post auth.')
ValueError: Scopes must be set on post auth.
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?error=invalid_request&error_description=Missing+response_type+parameter.', None, None, 400)
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F'
... '&response_type=code')
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?code=u3F05aEObJuP2k7DordviIgW5wl52N', None, None, 200)
>>> # If the client id or redirect uri fails validation
>>> grant.create_authorization_response(request, token)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/grant_types.py", line 515, in create_authorization_response
>>> grant.create_authorization_response(request, token)
File "oauthlib/oauth2/rfc6749/grant_types.py", line 591, in validate_authorization_request
oauthlib.oauth2.rfc6749.errors.InvalidClientIdError
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
"""
try:
# request.scopes is only mandated in post auth and both pre and
# post auth use validate_authorization_request
if not request.scopes:
raise ValueError('Scopes must be set on post auth.')
self.validate_authorization_request(request)
log.debug('Pre resource owner authorization validation ok for %r.',
request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the query component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# http://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
request.redirect_uri = request.redirect_uri or self.error_uri
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples)}, None, 302
grant = self.create_authorization_code(request)
log.debug('Saving grant %r for %r.', grant, request)
self.request_validator.save_authorization_code(
request.client_id, grant, request)
return {'Location': common.add_params_to_uri(request.redirect_uri, grant.items())}, None, 302
def create_token_response(self, request, token_handler):
"""Validate the authorization code.
The client MUST NOT use the authorization code more than once. If an
authorization code is used more than once, the authorization server
MUST deny the request and SHOULD revoke (when possible) all tokens
previously issued based on that authorization code. The authorization
code is bound to the client identifier and redirection URI.
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
self.validate_token_request(request)
log.debug('Token request validation ok for %r.', request)
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=True)
self.request_validator.invalidate_authorization_code(
request.client_id, request.code, request)
return headers, json.dumps(token), 200
def validate_authorization_request(self, request):
"""Check the authorization request for normal and fatal errors.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# REQUIRED. The client identifier as described in Section 2.2.
# http://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(request=request)
# OPTIONAL. As described in Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
log.debug('Validating redirection uri %s for client %s.',
request.redirect_uri, request.client_id)
if request.redirect_uri is not None:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(request=request)
if not self.request_validator.validate_redirect_uri(
request.client_id, request.redirect_uri, request):
raise errors.MismatchingRedirectURIError(request=request)
else:
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
request.using_default_redirect_uri = True
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(request=request)
# Then check for normal errors.
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the query component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# http://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions.
if request.response_type is None:
raise errors.InvalidRequestError(description='Missing response_type parameter.', request=request)
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type, request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# REQUIRED. Value MUST be set to "code".
if request.response_type != 'code':
raise errors.UnsupportedResponseTypeError(request=request)
# OPTIONAL. The scope of the access request as described by Section 3.3
# http://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
return request.scopes, {
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
'request': request,
}
def validate_token_request(self, request):
# REQUIRED. Value MUST be set to "authorization_code".
if request.grant_type != 'authorization_code':
raise errors.UnsupportedGrantTypeError(request=request)
if request.code is None:
raise errors.InvalidRequestError(
description='Missing code parameter.', request=request)
for param in ('client_id', 'grant_type', 'redirect_uri'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
request=request)
if self.request_validator.client_authentication_required(request):
# If the client type is confidential or the client was issued client
# credentials (or assigned other authentication requirements), the
# client MUST authenticate with the authorization server as described
# in Section 3.2.1.
# http://tools.ietf.org/html/rfc6749#section-3.2.1
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
# REQUIRED, if the client is not authenticating with the
# authorization server as described in Section 3.2.1.
# http://tools.ietf.org/html/rfc6749#section-3.2.1
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
if not hasattr(request.client, 'client_id'):
raise NotImplementedError('Authenticate client must set the '
'request.client.client_id attribute '
'in authenticate_client.')
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The authorization code received from the
# authorization server.
if not self.request_validator.validate_code(request.client_id,
request.code, request.client, request):
log.debug('Client, %r (%r), is not allowed access to scopes %r.',
request.client_id, request.client, request.scopes)
raise errors.InvalidGrantError(request=request)
for attr in ('user', 'state', 'scopes'):
if getattr(request, attr) is None:
log.debug('request.%s was not set on code validation.', attr)
# REQUIRED, if the "redirect_uri" parameter was included in the
# authorization request as described in Section 4.1.1, and their
# values MUST be identical.
if not self.request_validator.confirm_redirect_uri(request.client_id, request.code,
request.redirect_uri, request.client):
log.debug('Redirect_uri (%r) invalid for client %r (%r).',
request.redirect_uri, request.client_id, request.client)
raise errors.AccessDeniedError(request=request)
|
insiderr/insiderr-app
|
app/modules/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py
|
Python
|
gpl-3.0
| 19,942
| 0.001254
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 24 11:30:24 2013
@author: Sat Kumar Tomer
@email: satkumartomer@gmail.com
@website: www.ambhas.com
"""
import numpy as np
import h5py
import os
import datetime as dt
def extract_smc(h5_file, lat, lon):
"""
Extract Soil Moisture Content from AMSR2 h5 products
Input:
h5_file: a single file name
lat: latitude, either a single value or min,max limits
eg.
lat = 12
lat = [10,15]
lon: longitude, either a single value or min,max limits
eg. as for lat
"""
res = 0.1
######### convert lat, lon into indices ##############
# min max are given
min_max = type(lat) is list
if min_max:
lat_min = lat[0]
lat_max = lat[1]
i_lat_min = int(np.floor((90-lat_min)/res))
i_lat_max = int(np.floor((90-lat_max)/res))
lon_min = lon[0]
lon_max = lon[1]
if lon_min<0: lon_min += 360
if lon_max<0: lon_max += 360
j_lon_min = int(np.floor(lon_min/res))
j_lon_max = int(np.floor(lon_max/res))
else: # if only single value of lat, lon is given
i_lat = np.floor((90-lat)/res)
i_lat = i_lat.astype(int)
lon1 = np.copy(lon)
if lon1<0:
lon1 += 360
j_lon = np.floor(lon1/res)
j_lon = j_lon.astype(int)
# read the data
if type(h5_file) is str:
f = h5py.File(h5_file, "r")
if min_max:
smc = f["Geophysical Data"][i_lat_max:i_lat_min+1, j_lon_min:j_lon_max+1,0]
else:
smc = f["Geophysical Data"][i_lat, j_lon,0]
elif type(h5_file) is list:
n = len(h5_file)
if min_max:
nlat = i_lat_min+1 - i_lat_max
nlon = j_lon_max+1 - j_lon_min
smc = np.empty((n, nlat, nlon))
for h5_f,i in zip(h5_file, range(n)):
f = h5py.File(h5_f, "r")
smc[i,:,:] = f["Geophysical Data"][i_lat_max:i_lat_min+1, j_lon_min:j_lon_max+1,0]
f.close()
else:
smc = np.empty(n,)
for h5_f,i in zip(h5_file, range(n)):
f = h5py.File(h5_f, "r")
smc[i] = f["Geophysical Data"][i_lat, j_lon,0]
f.close()
try:
smc[smc<0] = np.nan
except:
if smc <0: smc = np.nan
return smc
def extract_dates(h5_file):
h5_dates = []
for h5_f in h5_file:
foo = os.path.basename(h5_f)[7:15]
h5_dates.append(dt.datetime.strptime(foo, '%Y%m%d'))
return h5_dates
def extract_orbit(h5_file):
asc = []
for h5_f in h5_file:
f = h5py.File(h5_f, "r")
if f.attrs['OrbitDirection'][0] == 'Ascending':
asc.append(True)
elif f.attrs['OrbitDirection'][0] == 'Descending':
asc.append(False)
else:
asc.append(None)
f.close()
return asc
if __name__ == "__main__":
import glob
h5_file = '/home/tomer/amsr2/data/h5/GW1AM2_20130722_01D_EQMD_L3SGSMCHA1100100.h5'
h5_file = glob.glob('/home/tomer/amsr2/data/h5/GW1AM2_201?????_01D*.h5')
h5_file.sort()
h5_file = h5_file[:5]
lat = [8, 38]
lon = [68, 98]
sm = extract_smc(h5_file, lat, lon)
sm_dates = extract_dates(h5_file)
asc = extract_orbit(h5_file)
|
andreas-koukorinis/ambhas
|
ambhas/amsr2.py
|
Python
|
lgpl-2.1
| 3,408
| 0.010857
|
# Copyright 2016, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from opsmgr.common import constants
from opsmgr.common import exceptions
from opsmgr.common.utils import entry_exit, execute_command
from opsmgr.inventory.interfaces import IManagerDevicePlugin
class PowerNodePlugin(IManagerDevicePlugin.IManagerDevicePlugin):
IPMI_TOOL = "/usr/local/bin/ipmitool"
def __init__(self):
self.host = None
self.userid = None
self.password = None
self.version = None
self.machine_type_model = ""
self.serial_number = ""
@staticmethod
def get_type():
return "PowerNode"
@staticmethod
def get_web_url(host):
return "https://" + host
@staticmethod
def get_capabilities():
return [constants.MONITORING_CAPABLE]
@entry_exit(exclude_index=[0, 3, 4], exclude_name=["self", "password", "ssh_key_string"])
def connect(self, host, userid, password=None, ssh_key_string=None):
"""connect to the BMC and store the mtm and serial number
"""
_method_ = "PowerNodePlugin.connect"
self.host = host
self.userid = userid
self.password = password
if ssh_key_string is not None:
raise exceptions.AuthenticationException("SSH Key Authentication "
"is not supported for PowerNode devices")
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H",
host, "-U", userid, "-P", password, "fru", "print"]
(_rc, stdout, stderr) = execute_command(" ".join(cmd_parms))
logging.warning("%s::ipmi query standard error output %s", _method_, stderr)
for line in stderr:
if "Unable to establish IPMI" in line:
raise exceptions.ConnectionException(
"Unable to connect to the device using IPMI")
for line in stdout:
if "Chassis Part Number" in line:
self.machine_type_model = line.split(":")[1].strip()
elif "Chassis Serial" in line:
self.serial_number = line.split(":")[1].strip()
@entry_exit(exclude_index=[0], exclude_name=["self"])
def disconnect(self):
pass
@entry_exit(exclude_index=[0], exclude_name=["self"])
def get_machine_type_model(self):
return self.machine_type_model
@entry_exit(exclude_index=[0], exclude_name=["self"])
def get_serial_number(self):
return self.serial_number
@entry_exit(exclude_index=[0], exclude_name=["self"])
def get_version(self):
_method_ = "PowerNodePlugin.get_version"
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host,
"-U", self.userid, "-P", self.password, "mc", "info"]
(rc, stdout, stderr) = execute_command(" ".join(cmd_parms))
if rc != 0:
logging.warning("%s::ipmi query failed with output %s", _method_, stderr)
raise exceptions.DeviceException("ipmi query failed with output %s" % stderr)
for line in stdout:
if "Firmware Revision" in line:
self.version = line.split(":")[1].strip()
break
return self.version
@entry_exit(exclude_index=[0], exclude_name=["self"])
def get_architecture(self):
return None
@entry_exit(exclude_index=[0, 1], exclude_name=["self", "new_password"])
def change_device_password(self, new_password):
"""Update the password of the ipmi default user on the BMC of the openpower server.
"""
_method_ = "PowerNodePlugin.change_device_password"
user_number = self._get_user_number()
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host, "-U", self.userid,
"-P", self.password, "user", "set", "password", user_number, new_password]
(rc, _stdout, stderr) = execute_command(" ".join(cmd_parms))
if rc != 0:
logging.error("%s::ipmi password change failed with output %s", _method_, stderr)
raise exceptions.DeviceException("ipmi password change failed with output %s" % stderr)
@entry_exit(exclude_index=[0], exclude_name=["self"])
def _get_user_number(self):
"""Each user in IPMI has a number associated with that is used on the command line
when modifying a user. This method will find the number associated with the userid
"""
_method_ = "PowerNodePlugin._get_user_number"
user_id = None
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host, "-U", self.userid,
"-P", self.password, "user", "list"]
(rc, stdout, stderr) = execute_command(" ".join(cmd_parms))
if rc != 0:
logging.warning("%s::ipmi query failed with output %s", _method_, stderr)
raise exceptions.DeviceException("ipmi query failed with output %s" % stderr)
for line in stdout:
ids = line.split()[0]
user = line.split()[1]
if user == self.userid:
user_id = ids
break
if user_id:
return user_id
else:
raise exceptions.DeviceException("Failed to determine the id for the user: %s" %
self.userid)
|
open-power-ref-design/opsmgr
|
plugins/devices/powernode/opsmgr/plugins/devices/powernode/PowerNodePlugin.py
|
Python
|
apache-2.0
| 5,846
| 0.002737
|
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
TEST_ROOT = os.path.join(os.path.dirname(PROJECT_ROOT), "tests")
SINGLE_ORCA_RUN_FILE = os.path.join(PROJECT_ROOT, "code", "single_core_run_orca.py")
SINGLE_RUN_MOB_KMC_FILE = os.path.join(
PROJECT_ROOT, "code", "single_core_run_mob_KMC.py"
)
SINGLE_RUN_DEVICE_KMC_FILE = os.path.join(
PROJECT_ROOT, "code", "single_core_run_device_KMC.py"
)
|
matty-jones/MorphCT
|
morphct/definitions.py
|
Python
|
gpl-3.0
| 419
| 0.002387
|
import math
import config
import utils
import packets
import logbot
import fops
import blocks
import behavior_tree as bt
from axisbox import AABB
log = logbot.getlogger("BOT_ENTITY")
class BotObject(object):
def __init__(self):
self.velocities = utils.Vector(0.0, 0.0, 0.0)
self.direction = utils.Vector2D(0, 0)
self._x = 0
self._y = 0
self._z = 0
self.stance_diff = config.PLAYER_EYELEVEL
self.pitch = None
self.yaw = None
self.on_ground = False
self.is_collided_horizontally = False
self.horizontally_blocked = False
self.action = 2 # normal
self._action = self.action
self.is_jumping = False
self.hold_position_flag = True
def set_xyz(self, x, y, z):
self._x = x
self._y = y
self._z = z
self._aabb = AABB.from_player_coords(self.position)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
@property
def position(self):
return utils.Vector(self.x, self.y, self.z)
@property
def position_grid(self):
return utils.Vector(self.grid_x, self.grid_y, self.grid_z)
@property
def position_eyelevel(self):
return utils.Vector(self.x, self.y_eyelevel, self.z)
@property
def y_eyelevel(self):
return self.y + config.PLAYER_EYELEVEL
@property
def stance(self):
return self.y + self.stance_diff
@property
def grid_x(self):
return utils.grid_shift(self.x)
@property
def grid_y(self):
return utils.grid_shift(self.y)
@property
def grid_z(self):
return utils.grid_shift(self.z)
@property
def aabb(self):
return self._aabb
@aabb.setter
def aabb(self, v):
raise Exception('setting bot aabb')
class BotEntity(object):
def __init__(self, world, name):
self.world = world
self.name = name
self.bot_object = BotObject()
self.eid = None
self.chunks_ready = False
self.ready = False
self.i_am_dead = False
self.location_received = False
self.check_location_received = False
self.spawn_point_received = False
self.behavior_tree = bt.BehaviorTree(self.world, self)
def on_connection_lost(self):
if self.location_received:
self.location_received = False
self.chunks_ready = False
def new_location(self, x, y, z, stance, grounded, yaw, pitch):
self.bot_object.set_xyz(x, y, z)
self.bot_object.stance_diff = stance - y
self.bot_object.on_ground = grounded
self.bot_object.yaw = yaw
self.bot_object.pitch = pitch
self.bot_object.velocities = utils.Vector(0.0, 0.0, 0.0)
self.check_location_received = True
if self.location_received is False:
self.location_received = True
if not self.in_complete_chunks(self.bot_object):
log.msg("Server sent me into incomplete chunks, will wait until they load up.")
self.ready = False
def in_complete_chunks(self, b_obj):
return self.world.grid.aabb_in_complete_chunks(b_obj.aabb)
def tick(self):
if self.location_received is False:
return
if not self.ready:
self.ready = self.in_complete_chunks(self.bot_object) and self.spawn_point_received
if not self.ready:
return
self.move(self.bot_object)
self.bot_object.direction = utils.Vector2D(0, 0)
self.send_location(self.bot_object)
self.send_action(self.bot_object)
self.stop_sneaking(self.bot_object)
if not self.i_am_dead:
utils.do_now(self.behavior_tree.tick)
def send_location(self, b_obj):
self.world.send_packet("player position&look", {
"position": packets.Container(x=b_obj.x, y=b_obj.y, z=b_obj.z,
stance=b_obj.stance),
"orientation": packets.Container(yaw=b_obj.yaw, pitch=b_obj.pitch),
"grounded": packets.Container(grounded=b_obj.on_ground)})
def send_action(self, b_obj):
"""
sneaking, not sneaking, leave bed, start sprinting, stop sprinting
"""
if b_obj.action != b_obj._action:
b_obj.action = b_obj._action
self.world.send_packet("entity action", {"eid": self.eid, "action": b_obj._action})
def turn_to_point(self, b_obj, point):
if point.x == b_obj.x and point.z == b_obj.z:
return
yaw, pitch = utils.yaw_pitch_between(point, b_obj.position_eyelevel)
if yaw is None or pitch is None:
return
b_obj.yaw = yaw
b_obj.pitch = pitch
def turn_to_direction(self, b_obj, x, y, z):
if x == 0 and z == 0:
return
yaw, pitch = utils.vector_to_yaw_pitch(x, y, z)
b_obj.yaw = yaw
b_obj.pitch = pitch
def turn_to_vector(self, b_obj, vect):
if vect.x == 0 and vect.z == 0:
return
yaw, pitch = utils.vector_to_yaw_pitch(vect.x, vect.y, vect.z)
b_obj.yaw = yaw
b_obj.pitch = pitch
def clip_abs_velocities(self, b_obj):
if abs(b_obj.velocities.x) < 0.005: # minecraft value
b_obj.velocities.x = 0
if abs(b_obj.velocities.y) < 0.005: # minecraft value
b_obj.velocities.y = 0
if abs(b_obj.velocities.z) < 0.005: # minecraft value
b_obj.velocities.z = 0
def clip_ladder_velocities(self, b_obj):
if self.is_on_ladder(b_obj):
if b_obj.velocities.y < -0.15:
b_obj.velocities.y = -0.15
if abs(b_obj.velocities.x) > 0.15:
b_obj.velocities.x = math.copysign(0.15, b_obj.velocities.x)
if abs(b_obj.velocities.z) > 0.15:
b_obj.velocities.z = math.copysign(0.15, b_obj.velocities.z)
if self.is_sneaking(b_obj) and b_obj.velocities.y < 0:
b_obj.velocities.y = 0
def handle_water_movement(self, b_obj):
is_in_water = False
water_current = utils.Vector(0, 0, 0)
bb = b_obj.aabb.expand(-0.001, -0.401, -0.001)
top_y = utils.grid_shift(bb.max_y + 1)
for blk in self.world.grid.blocks_in_aabb(bb):
if isinstance(blk, blocks.BlockWater):
if top_y >= (blk.y + 1 - blk.height_percent):
is_in_water = True
water_current = blk.add_velocity_to(water_current)
if water_current.size > 0:
water_current.normalize()
wconst = 0.014
water_current = water_current * wconst
b_obj.velocities = b_obj.velocities + water_current
return is_in_water
def handle_lava_movement(self, b_obj):
for blk in self.world.grid.blocks_in_aabb(
b_obj.aabb.expand(-0.1,
-0.4,
-0.1)):
if isinstance(blk, blocks.BlockLava):
return True
return False
def move_collisions(self, b_obj, vx, vy, vz):
if self.is_in_web(b_obj):
vx *= 0.25
vy *= 0.05000000074505806
vz *= 0.25
b_obj.velocities.x = 0
b_obj.velocities.y = 0
b_obj.velocities.z = 0
aabbs = self.world.grid.collision_aabbs_in(b_obj.aabb.extend_to(vx, vy, vz))
b_bb = b_obj.aabb
dy = vy
if not fops.eq(vy, 0):
for bb in aabbs:
dy = b_bb.calculate_axis_offset(bb, dy, 1)
b_bb = b_bb.offset(dy=dy)
dx = vx
if not fops.eq(vx, 0):
for bb in aabbs:
dx = b_bb.calculate_axis_offset(bb, dx, 0)
b_bb = b_bb.offset(dx=dx)
dz = vz
if not fops.eq(vz, 0):
for bb in aabbs:
dz = b_bb.calculate_axis_offset(bb, dz, 2)
b_bb = b_bb.offset(dz=dz)
if vy != dy and vy < 0 and (dx != vx or dz != vz):
st = config.MAX_STEP_HEIGHT
aabbs = self.world.grid.collision_aabbs_in(b_obj.aabb.extend_to(vx, st, vz))
b_bbs = b_obj.aabb
dys = st
for bb in aabbs:
dys = b_bbs.calculate_axis_offset(bb, dys, 1)
b_bbs = b_bbs.offset(dy=dys)
dxs = vx
for bb in aabbs:
dxs = b_bbs.calculate_axis_offset(bb, dxs, 0)
b_bbs = b_bbs.offset(dx=dxs)
dzs = vz
for bb in aabbs:
dzs = b_bbs.calculate_axis_offset(bb, dzs, 2)
b_bbs = b_bbs.offset(dz=dzs)
if fops.gt(dxs * dxs + dzs * dzs, dx * dx + dz * dz):
dx = dxs
dy = dys
dz = dzs
b_bb = b_bbs
b_obj.on_ground = vy != dy and vy < 0
b_obj.is_collided_horizontally = dx != vx or dz != vz
b_obj.horizontally_blocked = not fops.eq(dx, vx) and not fops.eq(dz, vz)
if not fops.eq(vx, dx):
b_obj.velocities.x = 0
if not fops.eq(vy, dy):
b_obj.velocities.y = 0
if not fops.eq(vz, dz):
b_obj.velocities.z = 0
b_obj.set_xyz(b_bb.posx, b_bb.min_y, b_bb.posz)
self.do_block_collision(b_obj)
def move(self, b_obj):
self.clip_abs_velocities(b_obj)
is_in_water = self.handle_water_movement(b_obj)
is_in_lava = self.handle_lava_movement(b_obj)
if b_obj.is_jumping:
if is_in_water or is_in_lava:
b_obj.velocities.y += config.SPEED_LIQUID_JUMP
elif b_obj.on_ground:
b_obj.velocities.y = config.SPEED_JUMP
elif self.is_on_ladder(b_obj):
b_obj.velocities.y = config.SPEED_CLIMB
b_obj.is_jumping = False
if is_in_water:
if b_obj.hold_position_flag:
b_obj.velocities.y = 0
orig_y = b_obj.y
self.update_directional_speed(b_obj, 0.02, balance=True)
self.move_collisions(b_obj, b_obj.velocities.x, b_obj.velocities.y, b_obj.velocities.z)
b_obj.velocities.x *= 0.8
b_obj.velocities.y *= 0.8
b_obj.velocities.z *= 0.8
b_obj.velocities.y -= 0.02
if b_obj.is_collided_horizontally and \
self.is_offset_in_liquid(b_obj, b_obj.velocities.x,
b_obj.velocities.y + 0.6 -
b_obj.y + orig_y,
b_obj.velocities.z):
b_obj.velocities.y = 0.3
elif is_in_lava:
if b_obj.hold_position_flag:
b_obj.velocities.y = 0
orig_y = b_obj.y
self.update_directional_speed(b_obj, 0.02)
self.move_collisions(b_obj, b_obj.velocities.x, b_obj.velocities.y, b_obj.velocities.z)
b_obj.velocities.x *= 0.5
b_obj.velocities.y *= 0.5
b_obj.velocities.z *= 0.5
b_obj.velocities.y -= 0.02
if b_obj.is_collided_horizontally and \
self.is_offset_in_liquid(b_obj, self.velocities.x,
self.velocities.y + 0.6 -
self.y + orig_y,
self.velocities.z):
self.velocities.y = 0.3
else:
if self.is_on_ladder(b_obj) and b_obj.hold_position_flag:
self.start_sneaking(b_obj)
slowdown = self.current_slowdown(b_obj)
self.update_directional_speed(b_obj, self.current_speed_factor(b_obj))
self.clip_ladder_velocities(b_obj)
self.move_collisions(b_obj, b_obj.velocities.x, b_obj.velocities.y, b_obj.velocities.z)
if b_obj.is_collided_horizontally and self.is_on_ladder(b_obj):
b_obj.velocities.y = 0.2
b_obj.velocities.y -= config.BLOCK_FALL
b_obj.velocities.y *= config.DRAG
b_obj.velocities.x *= slowdown
b_obj.velocities.z *= slowdown
def directional_speed(self, direction, speedf):
dx = direction.x * speedf
dz = direction.z * speedf
return utils.Vector2D(dx, dz)
def update_directional_speed(self, b_obj, speedf, balance=False):
direction = self.directional_speed(b_obj.direction, speedf)
if balance and direction.size > 0:
perpedicular_dir = utils.Vector2D(- direction.z, direction.x)
dot = (b_obj.velocities.x * perpedicular_dir.x + b_obj.velocities.z * perpedicular_dir.z) / \
(perpedicular_dir.x * perpedicular_dir.x + perpedicular_dir.z * perpedicular_dir.z)
if dot < 0:
dot *= -1
perpedicular_dir = utils.Vector2D(direction.z, - direction.x)
direction = utils.Vector2D(direction.x - perpedicular_dir.x * dot, direction.z - perpedicular_dir.z * dot)
self.turn_to_direction(b_obj, direction.x, 0, direction.z)
if balance and b_obj.hold_position_flag:
self.turn_to_direction(b_obj, -b_obj.velocities.x, 0, -b_obj.velocities.z)
b_obj.velocities.x = 0
b_obj.velocities.z = 0
b_obj.velocities.x += direction.x
b_obj.velocities.z += direction.z
def current_slowdown(self, b_obj):
slowdown = 0.91
if b_obj.on_ground:
slowdown = 0.546
block = self.world.grid.get_block(b_obj.grid_x, b_obj.grid_y - 1, b_obj.grid_z)
if block is not None:
slowdown = block.slipperiness * 0.91
return slowdown
def current_speed_factor(self, b_obj):
if b_obj.on_ground:
slowdown = self.current_slowdown(b_obj)
modf = 0.16277136 / (slowdown * slowdown * slowdown)
factor = config.SPEED_ON_GROUND * modf
else:
factor = config.SPEED_IN_AIR
return factor * 0.98
def current_motion(self, b_obj):
self.clip_abs_velocities(b_obj)
vx = b_obj.velocities.x
vz = b_obj.velocities.z
return math.hypot(vx, vz) + self.current_speed_factor(b_obj)
def is_on_ladder(self, b_obj):
return self.world.grid.aabb_on_ladder(b_obj.aabb)
def is_in_water(self, b_obj):
is_in_water = False
bb = b_obj.aabb.expand(-0.001, -0.4010000059604645, -0.001)
top_y = utils.grid_shift(bb.max_y + 1)
for blk in self.world.grid.blocks_in_aabb(bb):
if isinstance(blk, blocks.BlockWater):
if top_y >= (blk.y + 1 - blk.height_percent):
is_in_water = True
return is_in_water
def is_in_web(self, b_obj):
bb = b_obj.aabb.expand(dx=-0.001, dy=-0.001, dz=-0.001)
for blk in self.world.grid.blocks_in_aabb(bb):
if isinstance(blk, blocks.Cobweb):
return True
return False
def head_inside_water(self, b_obj):
bb = b_obj.aabb
eye_y = bb.min_y + eye_height
ey = utils.grid_shift(eye_y)
blk = self.world.grid.get_block(bb.gridpos_x, ey, bb.gridpos_z)
if blk.is_water:
wh = blk.height_percent - 0.11111111
return eye_y < (ey + 1 - wh)
else:
return False
def do_block_collision(self, b_obj):
bb = b_obj.aabb.expand(-0.001, -0.001, -0.001)
for blk in self.world.grid.blocks_in_aabb(bb):
blk.on_entity_collided(b_obj)
def is_sneaking(self, b_obj):
return b_obj.action == 1
def start_sneaking(self, b_obj):
b_obj.action = 1
def stop_sneaking(self, b_obj):
b_obj.action = 2
def is_offset_in_liquid(self, b_obj, dx, dy, dz):
bb = b_obj.aabb.offset(dx, dy, dz)
if self.world.grid.aabb_collides(bb):
return False
else:
return not self.world.grid.is_any_liquid(bb)
def do_respawn(self):
self.world.send_packet("client statuses", {"status": 1})
def standing_on_block(self, b_obj):
return self.world.grid.standing_on_block(b_obj.aabb)
def is_standing(self, b_obj):
return self.standing_on_block(b_obj) is not None
|
lukleh/TwistedBot
|
twistedbot/botentity.py
|
Python
|
mit
| 16,437
| 0.000973
|
import Decimal_pb2
import Log_pb2
import uRPC_pb2
import client
import server
|
npalko/uRPC
|
python/urpc/__init__.py
|
Python
|
bsd-3-clause
| 78
| 0.012821
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Test cases for the TAAR Hybrid recommender
"""
from taar.recommenders.hybrid_recommender import CuratedRecommender
from taar.recommenders.hybrid_recommender import HybridRecommender
from taar.recommenders.ensemble_recommender import EnsembleRecommender
from taar.recommenders.s3config import TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY
# from taar.recommenders.hybrid_recommender import ENSEMBLE_WEIGHTS
from .test_ensemblerecommender import install_mock_ensemble_data
from .mocks import MockRecommenderFactory
import json
from moto import mock_s3
import boto3
def install_no_curated_data(ctx):
ctx = ctx.child()
conn = boto3.resource("s3", region_name="us-west-2")
conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET)
conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(Body="")
return ctx
def install_mock_curated_data(ctx):
mock_data = []
for i in range(20):
mock_data.append(str(i) * 16)
ctx = ctx.child()
conn = boto3.resource("s3", region_name="us-west-2")
conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET)
conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(
Body=json.dumps(mock_data)
)
return ctx
def install_ensemble_fixtures(ctx):
ctx = install_mock_ensemble_data(ctx)
factory = MockRecommenderFactory()
ctx["recommender_factory"] = factory
ctx["recommender_map"] = {
"collaborative": factory.create("collaborative"),
"similarity": factory.create("similarity"),
"locale": factory.create("locale"),
}
ctx["ensemble_recommender"] = EnsembleRecommender(ctx.child())
return ctx
@mock_s3
def test_curated_can_recommend(test_ctx):
ctx = install_no_curated_data(test_ctx)
r = CuratedRecommender(ctx)
# CuratedRecommender will always recommend something no matter
# what
assert r.can_recommend({})
assert r.can_recommend({"installed_addons": []})
@mock_s3
def test_curated_recommendations(test_ctx):
ctx = install_mock_curated_data(test_ctx)
r = CuratedRecommender(ctx)
# CuratedRecommender will always recommend something no matter
# what
for LIMIT in range(1, 5):
guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT)
# The curated recommendations should always return with some kind
# of recommendations
assert len(guid_list) == LIMIT
@mock_s3
def test_hybrid_recommendations(test_ctx):
# verify that the recommendations mix the curated and
# ensemble results
ctx = install_mock_curated_data(test_ctx)
ctx = install_ensemble_fixtures(ctx)
r = HybridRecommender(ctx)
# Test that we can generate lists of results
for LIMIT in range(4, 8):
guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT)
# The curated recommendations should always return with some kind
# of recommendations
assert len(guid_list) == LIMIT
# Test that the results are actually mixed
guid_list = r.recommend({"client_id": "000000"}, limit=4)
# A mixed list will have two recommendations with weight > 1.0
# (ensemble) and 2 with exactly weight 1.0 from the curated list
assert guid_list[0][1] > 1.0
assert guid_list[1][1] > 1.0
assert guid_list[2][1] == 1.0
assert guid_list[3][1] == 1.0
|
maurodoglio/taar
|
tests/test_hybrid_recommender.py
|
Python
|
mpl-2.0
| 3,508
| 0.000285
|
import unittest
import os
from flow.benchmarks.baselines.bottleneck0 import bottleneck0_baseline
from flow.benchmarks.baselines.bottleneck1 import bottleneck1_baseline
from flow.benchmarks.baselines.bottleneck2 import bottleneck2_baseline
from flow.benchmarks.baselines.figureeight012 import figure_eight_baseline
from flow.benchmarks.baselines.grid0 import grid0_baseline
from flow.benchmarks.baselines.grid1 import grid1_baseline
from flow.benchmarks.baselines.merge012 import merge_baseline
os.environ["TEST_FLAG"] = "True"
class TestBaselines(unittest.TestCase):
"""
Tests that the baselines in the benchmarks folder are running and
returning expected values (i.e. values that match those in the CoRL paper
reported on the website, or other).
"""
def test_bottleneck0(self):
"""
Tests flow/benchmark/baselines/bottleneck0.py
"""
# run the bottleneck to make sure it runs
bottleneck0_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_bottleneck1(self):
"""
Tests flow/benchmark/baselines/bottleneck1.py
"""
# run the bottleneck to make sure it runs
bottleneck1_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_bottleneck2(self):
"""
Tests flow/benchmark/baselines/bottleneck2.py
"""
# run the bottleneck to make sure it runs
bottleneck2_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_figure_eight(self):
"""
Tests flow/benchmark/baselines/figureeight{0,1,2}.py
"""
# run the bottleneck to make sure it runs
figure_eight_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_grid0(self):
"""
Tests flow/benchmark/baselines/grid0.py
"""
# run the bottleneck to make sure it runs
grid0_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_grid1(self):
"""
Tests flow/benchmark/baselines/grid1.py
"""
# run the bottleneck to make sure it runs
grid1_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_merge(self):
"""
Tests flow/benchmark/baselines/merge{0,1,2}.py
"""
# run the bottleneck to make sure it runs
merge_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
if __name__ == '__main__':
unittest.main()
|
cathywu/flow
|
tests/slow_tests/test_baselines.py
|
Python
|
mit
| 2,809
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0009_dummyprovider'),
]
operations = [
migrations.CreateModel(
name='ExternalCredentials',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('provider_name', models.CharField(max_length=1024)),
('username', models.CharField(max_length=1024)),
('password', models.CharField(max_length=1024)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortalForm',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
('template_name', models.CharField(max_length=1024)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortalFormGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
('parent', models.ForeignKey(to='api.ExternalJobPortalFormGroup', null=True)),
('portal', models.ForeignKey(to='api.ExternalJobPortal')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortalSubmission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('data', models.TextField()),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(to='api.ExternalJobPortal')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortalSubmissionStateChange',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('state', models.CharField(max_length=256, choices=[(b'EXTERNAL_SUBMISSION_RUNNING', b'Running'), (b'EXTERNAL_SUBMISSION_FAILED', b'FAILED'), (b'EXTERNAL_SUBMISSION_PENDING', b'Pending'), (b'EXTERNAL_SUBMISSION_PENDING_SUBMISSION', b'Submission in progress'), (b'EXTERNAL_SUBMISSION_SUCCESS', b'Succeeded')])),
('external_submission', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='externaljobportalform',
name='parent',
field=models.ForeignKey(to='api.ExternalJobPortalFormGroup', null=True),
),
migrations.AddField(
model_name='externaljobportalform',
name='portal',
field=models.ForeignKey(to='api.ExternalJobPortal'),
),
]
|
h2020-westlife-eu/VRE
|
api/migrations/0010_auto_20160121_1536.py
|
Python
|
mit
| 4,758
| 0.002102
|
#Equals and hash
class Eq(object):
def __init__(self, data):
self.data = data
def __eq__(self, other):
return self.data == other.data
class Ne(object):
def __init__(self, data):
self.data = data
def __ne__(self, other):
return self.data != other.data
class Hash(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return hash(self.data)
class Unhashable1(object):
__hash__ = None
class EqOK1(Unhashable1):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class Unhashable2(object):
#Not the idiomatic way of doing it, but not uncommon either
def __hash__(self):
raise TypeError("unhashable object")
class EqOK2(Unhashable2):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class ReflectiveNotEquals(object):
def __ne__(self, other):
return not self == other
class EqOK3(ReflectiveNotEquals, Unhashable1):
def __eq__(self, other):
return self.data == other.data
|
github/codeql
|
python/ql/test/3/query-tests/Classes/equals-hash/equals_hash.py
|
Python
|
mit
| 1,147
| 0.012206
|
# -*- coding: utf-8 -*-
# #
# #
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico; if not, see <http://www.gnu.org/licenses/>.
import argparse
import cPickle
import fcntl
import logging
import logging.handlers
import os
import pprint
import signal
import SocketServer
import struct
import sys
import termios
import textwrap
from threading import Lock
import sqlparse
from pygments import highlight
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.lexers.agile import PythonLexer, PythonTracebackLexer
from pygments.lexers.sql import SqlLexer
output_lock = Lock()
help_text = textwrap.dedent("""
To use this script, you need to add the following to your logging.conf:
[logger_db]
level=DEBUG
handlers=db
qualname=indico.db
propagate=0
[handler_db]
class=handlers.SocketHandler
level=DEBUG
args=('localhost', 9020)
Also add your new logger/handler to the loggers/handlers lists, e.g. like this:
[loggers]
keys=root,db
[handlers]
keys=indico,db,other,smtp
""").strip()
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
def handle(self):
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
size = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(size)
while len(chunk) < size:
chunk = chunk + self.connection.recv(size - len(chunk))
obj = cPickle.loads(chunk)
self.handle_log(obj)
def handle_log(self, obj):
sql_log_type = obj.get('sql_log_type')
if sql_log_type == 'start':
source = prettify_source(obj['sql_source'], self.server.traceback_frames) if obj['sql_source'] else None
statement = prettify_statement(obj['sql_statement'])
params = prettify_params(obj['sql_params']) if obj['sql_params'] else None
with output_lock:
if source:
print prettify_caption('Source')
print source
print
print prettify_caption('Statement')
print statement
if params:
print
print prettify_caption('Params')
print params
elif sql_log_type == 'end':
with output_lock:
print
print prettify_caption('Duration')
print ' {:.06f}s'.format(obj['sql_duration'])
print_linesep()
class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
allow_reuse_address = True
def __init__(self, host, port, handler=LogRecordStreamHandler, traceback_frames=1):
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.timeout = 1
self.traceback_frames = traceback_frames
def terminal_size():
h, w, hp, wp = struct.unpack('HHHH', fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
return w, h
def print_linesep():
print terminal_size()[0] * u'\N{BOX DRAWINGS LIGHT HORIZONTAL}'
def indent(msg, level=4):
indentation = level * ' '
return indentation + msg.replace('\n', '\n' + indentation)
def prettify_caption(caption):
return '\x1b[38;5;75;04m{}\x1b[0m'.format(caption)
def prettify_source(source, traceback_frames):
if not traceback_frames:
return None
msg = 'Traceback (most recent call last):\n'
frame_msg = textwrap.dedent("""
File "{}", line {}, in {}
{}\n""").strip()
msg += indent('\n'.join(frame_msg.format(*frame) for frame in source[:traceback_frames]), 2)
highlighted = highlight(msg, PythonTracebackLexer(), Terminal256Formatter(style='native'))
# Remove first line (just needed for PythonTracebackLexer)
highlighted = '\n'.join(highlighted.splitlines()[1:])
return indent(highlighted, 2).rstrip()
def prettify_statement(statement):
statement = sqlparse.format(statement, keyword_case='upper', reindent=True)
return indent(highlight(statement, SqlLexer(), Terminal256Formatter(style='native'))).rstrip()
def prettify_params(args):
args = pprint.pformat(args)
return indent(highlight(args, PythonLexer(), Terminal256Formatter(style='native'))).rstrip()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='port', type=int, default=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
help='The port to bind the UDP listener to')
parser.add_argument('-t', dest='traceback_frames', type=int, default=1,
help='Number of stack frames to show (max. 3)')
parser.add_argument('--setup-help', action='store_true', help='Explain how to enable logging for script')
return parser.parse_args()
def sigint(*unused):
print '\rTerminating'
os._exit(1)
def main():
args = parse_args()
if args.setup_help:
print help_text
sys.exit(1)
signal.signal(signal.SIGINT, sigint)
print 'Listening on 127.0.0.1:{}'.format(args.port)
server = LogRecordSocketReceiver('localhost', args.port, traceback_frames=args.traceback_frames)
try:
server.serve_forever()
except KeyboardInterrupt:
print
if __name__ == '__main__':
main()
|
pferreir/indico-backup
|
bin/utils/db_log.py
|
Python
|
gpl-3.0
| 5,979
| 0.004014
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import re
import shutil
import sys
import utils
BASENAME = "VTK"
GIT_REPO = "http://vtk.org/VTK.git"
GIT_TAG = "v5.8.0"
VTK_BASE_VERSION = "vtk-5.8"
# this patch does three things:
# 1. adds try/catch blocks to all python method calls in order
# to trap bad_alloc exceptions
# 2. implements my scheme for turning all VTK errors into Python exceptions
# by making use of a special output window class
# 3. gives up the GIL around all VTK calls. This is also necessary
# for 2 not to deadlock on multi-cores.
EXC_PATCH = "pyvtk580_tryexcept_and_pyexceptions.diff"
# fixes attributes in vtkproperty for shader use in python
VTKPRPRTY_PATCH = "vtkProperty_PyShaderVar.diff"
# recent segfault with vtk 5.6.1 and wxPython 2.8.11.0
# see here for more info:
# http://vtk.1045678.n5.nabble.com/wx-python-scripts-segfault-td1234471.html
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH = "wxvtkrwi_displayid_segfault.diff"
dependencies = ['CMake']
class VTK58(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s-build' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
self.exc_patch_src = os.path.join(config.patches_dir, EXC_PATCH)
self.exc_patch_dst = os.path.join(config.archive_dir, EXC_PATCH)
self.vtkprprty_patch_filename = os.path.join(config.patches_dir,
VTKPRPRTY_PATCH)
self.wxvtkrwi_displayid_segfault_patch_filename = os.path.join(
config.patches_dir,
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH)
config.VTK_LIB = os.path.join(self.inst_dir, 'lib')
# whatever the case may be, we have to register VTK variables
if os.name == 'nt':
# on Win, inst/VTK/bin contains the so files
config.VTK_SODIR = os.path.join(self.inst_dir, 'bin')
# inst/VTK/lib/site-packages the VTK python package
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'site-packages')
else:
# on *ix, inst/VTK/lib contains DLLs
config.VTK_SODIR = os.path.join(
config.VTK_LIB, VTK_BASE_VERSION)
# on *ix, inst/lib/python2.5/site-packages contains the
# VTK python package
# sys.version is (2, 5, 0, 'final', 0)
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'python%d.%d/site-packages' % \
sys.version_info[0:2])
# this contains the VTK cmake config (same on *ix and Win)
config.VTK_DIR = os.path.join(config.VTK_LIB, VTK_BASE_VERSION)
def get(self):
if os.path.exists(self.source_dir):
utils.output("VTK already checked out, skipping step.")
else:
utils.goto_archive()
ret = os.system("git clone %s %s" % (GIT_REPO, BASENAME))
if ret != 0:
utils.error("Could not clone VTK repo. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("git checkout %s" % (GIT_TAG,))
if ret != 0:
utils.error("Could not checkout VTK %s. Fix and try again." % (GIT_TAG,))
if not os.path.exists(self.exc_patch_dst):
utils.output("Applying EXC patch")
# we do this copy so we can see if the patch has been done yet or not
shutil.copyfile(self.exc_patch_src, self.exc_patch_dst)
os.chdir(self.source_dir)
# default git-generated patch, so needs -p1
ret = os.system(
"%s -p1 < %s" % (config.PATCH, self.exc_patch_dst))
if ret != 0:
utils.error(
"Could not apply EXC patch. Fix and try again.")
# # VTKPRPRTY PATCH
# utils.output("Applying VTKPRPRTY patch")
# os.chdir(os.path.join(self.source_dir, 'Rendering'))
# ret = os.system(
# "%s -p0 < %s" % (config.PATCH, self.vtkprprty_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply VTKPRPRTY patch. Fix and try again.")
# # WXVTKRWI_DISPLAYID_SEGFAULT patch
# utils.output("Applying VTKWXRWI_DISPLAYID_SEGFAULT patch")
# os.chdir(self.source_dir)
# # default git-generated patch, so needs -p1
# ret = os.system(
# "%s -p1 < %s" % (config.PATCH,
# self.wxvtkrwi_displayid_segfault_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply WXVTKRWI_DISPLAYID_SEGFAULT patch. Fix and try again.")
def unpack(self):
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("VTK build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
cmake_params = "-DBUILD_SHARED_LIBS=ON " \
"-DBUILD_TESTING=OFF " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DVTK_USE_TK=NO " \
"-DVTK_USE_METAIO=ON " \
"-DVTK_USE_PARALLEL=ON " \
"-DPYTHON_EXECUTABLE=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DPYTHON_INCLUDE_PATH=%s " \
"-DVTK_WRAP_PYTHON=ON " % (self.inst_dir,
config.PYTHON_EXECUTABLE,
config.PYTHON_LIBRARY,
config.PYTHON_INCLUDE_PATH)
ret = utils.cmake_command(self.build_dir, self.source_dir,
cmake_params)
if ret != 0:
utils.error("Could not configure VTK. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir,
'bin/libvtkWidgetsPython.so')
nt_file = os.path.join(self.build_dir, 'bin', config.BUILD_TARGET,
'vtkWidgetsPythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('VTK.sln')
if ret != 0:
utils.error("Error building VTK. Fix and try again.")
def install(self):
posix_file = os.path.join(self.inst_dir, 'bin/vtkpython')
nt_file = os.path.join(self.inst_dir, 'bin', 'vtkpython.exe')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already installed. Skipping build step.")
else:
# python 2.5.2 setup.py complains that this does not exist
# with VTK PV-3-2-1. This is only on installations with
# EasyInstall / Python Eggs, then the VTK setup.py uses
# EasyInstall and not standard distutils. gah!
# just tested with VTK 5.8.0 and Python 2.7.2
# it indeed installs VTK_PYTHON/VTK-5.8.0-py2.7.egg
# but due to the site.py and easy-install.pth magic in there,
# adding VTK_PYTHON to the PYTHONPATH still works. We can keep
# pip, yay!
if not os.path.exists(config.VTK_PYTHON):
os.makedirs(config.VTK_PYTHON)
os.chdir(self.build_dir)
# we save, set and restore the PP env variable, else
# stupid setuptools complains
save_env = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = config.VTK_PYTHON
ret = utils.make_command('VTK.sln', install=True)
os.environ['PYTHONPATH'] = save_env
if ret != 0:
utils.error("Could not install VTK. Fix and try again.")
# now do some surgery on VTKConfig.cmake and
# VTKLibraryDepends.cmake so builds of VTK-dependent libraries
# with only the DRE to link with Just Work(tm)
# on windows, we need to replace backslash with forward slash
# as that's the style used by the config files. On *ix mostly
# harmless
idp = re.sub(r'\\','/', config.inst_dir)
for fn in [os.path.join(config.VTK_DIR, 'VTKConfig.cmake'),
os.path.join(config.VTK_DIR, 'VTKLibraryDepends.cmake'),
os.path.join(config.VTK_DIR, 'VTKTargets-relwithdebinfo.cmake')]:
if os.path.exists(fn):
utils.re_sub_filter_file(
[(idp, '${VTK_INSTALL_PREFIX}/..')],
fn)
def clean_build(self):
utils.output("Removing build and installation directories.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def clean_install(self):
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
import vtk
return vtk.vtkVersion.GetVTKVersion()
|
nagyistoce/devide.johannes
|
install_packages/ip_vtk58.py
|
Python
|
bsd-3-clause
| 9,776
| 0.004603
|
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import retrying
from nova import exception
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.tests.test_utils import pvmhttp
from nova.virt.powervm import mgmt
LPAR_HTTPRESP_FILE = "lpar.txt"
class TestMgmt(test.TestCase):
def setUp(self):
super(TestMgmt, self).setUp()
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt)
self.assertIsNotNone(
lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE)
self.resp = lpar_http.response
@mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
def test_mgmt_uuid(self, mock_get_partition):
mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
adpt = mock.Mock()
# First run should call the partition only once
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
mock_get_partition.assert_called_once_with(adpt)
# But a subsequent call should effectively no-op
mock_get_partition.reset_mock()
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
self.assertEqual(mock_get_partition.call_count, 0)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
def test_discover_vscsi_disk(self, mock_realpath, mock_writefile,
mock_glob):
scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan'
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid)
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# Realistically, first glob would return e.g. .../host0/.../host0/...
# but it doesn't matter for test purposes.
mock_glob.side_effect = [[scanpath], [devlink]]
mgmt.discover_vscsi_disk(mapping)
mock_glob.assert_has_calls(
[mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])])
mock_writefile.assert_called_once_with(scanpath, 'a', '- - -')
mock_realpath.assert_called_with(devlink)
@mock.patch('retrying.retry', autospec=True)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_discover_vscsi_disk_not_one_result(self, mock_writefile,
mock_glob, mock_retry):
"""Zero or more than one disk is found by discover_vscsi_disk."""
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(300000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_passthrough(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return _poll_for_dev
return wrapped
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return raiser
return wrapped
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# No disks found
mock_retry.side_effect = retry_timeout
mock_glob.side_effect = lambda path: []
self.assertRaises(exception.NoDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
# Multiple disks found
mock_retry.side_effect = retry_passthrough
mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']]
self.assertRaises(exception.UniqueDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
@mock.patch('time.sleep', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
@mock.patch('os.stat', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_remove_block_dev(self, mock_writefile, mock_stat, mock_realpath,
mock_sleep):
link = '/dev/link/foo'
realpath = '/dev/sde'
delpath = '/sys/block/sde/device/delete'
mock_realpath.return_value = realpath
# Good path
mock_stat.side_effect = (None, None, OSError())
mgmt.remove_block_dev(link)
mock_realpath.assert_called_with(link)
mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath),
mock.call(realpath)])
mock_writefile.assert_called_once_with(delpath, 'a', '1')
self.assertEqual(0, mock_sleep.call_count)
# Device param not found
mock_writefile.reset_mock()
mock_stat.reset_mock()
mock_stat.side_effect = (OSError(), None, None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called once; exec was not called
self.assertEqual(1, mock_stat.call_count)
self.assertEqual(0, mock_writefile.call_count)
# Delete special file not found
mock_writefile.reset_mock()
mock_stat.reset_mock()
mock_stat.side_effect = (None, OSError(), None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called twice; exec was not called
self.assertEqual(2, mock_stat.call_count)
self.assertEqual(0, mock_writefile.call_count)
@mock.patch('retrying.retry')
@mock.patch('os.path.realpath')
@mock.patch('os.stat')
@mock.patch('nova.privsep.path.writefile')
def test_remove_block_dev_timeout(self, mock_dacw, mock_stat,
mock_realpath, mock_retry):
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(10000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_del):
return raiser
return wrapped
# Deletion was attempted, but device is still there
link = '/dev/link/foo'
delpath = '/sys/block/sde/device/delete'
realpath = '/dev/sde'
mock_realpath.return_value = realpath
mock_stat.side_effect = lambda path: 1
mock_retry.side_effect = retry_timeout
self.assertRaises(
exception.DeviceDeletionException, mgmt.remove_block_dev, link)
mock_realpath.assert_called_once_with(link)
mock_dacw.assert_called_with(delpath, 'a', '1')
|
rahulunair/nova
|
nova/tests/unit/virt/powervm/test_mgmt.py
|
Python
|
apache-2.0
| 7,781
| 0
|
# This file is formatted with black.
# https://github.com/psf/black
import os
import json
import subprocess
import sys
import simplekml
ALT_MODE = simplekml.AltitudeMode.absolute # Absolute altitude means from sea floor
# Current commit
if os.environ.get("TRAVIS"):
COMMIT = os.environ["TRAVIS_COMMIT"]
else:
# For local dev
proc = subprocess.run(
["git", "rev-parse", "HEAD"], capture_output=True, cwd="../../../", text=True
)
if proc.returncode != 0:
print("Git command failed")
sys.exit(1)
COMMIT = proc.stdout.strip()
def get_desc(node):
"""Generate HTML description for a node."""
# Required keys
desc = f"<h1>{node['name']}</h1>"
desc += f"<h2>{node['status']}</h2>"
desc += f"Type: {node['type']}<br>"
desc += f"Altitude: {node['altitude']}<br>"
desc += f"Date Added: {node['dateAdded']}<br>"
desc += f"Group: {node['group']}<br>"
# Optional keys
desc += f"Model: {node.get('model')}<br>"
desc += f"IPv4: {node.get('ipv4')}<br>"
desc += f"IPv6: {node.get('ipv6')}<br>"
desc += f"Mode: {node.get('mode')}<br>"
if node["type"] != "router":
desc += f"Connected Router: {node.get('router')}<br>"
# Antenna specific keys
if node["type"] == "antenna":
desc += f"SSID: {node.get('ssid')}<br>"
desc += "<br>"
desc += f"Antenna Type: {node.get('antennaType')}<br>"
desc += f"Antenna Cone: {node.get('antennaCone')}<br>"
desc += f"Antenna Direction: {node.get('antennaDirection')}<br>"
desc += f"Antenna Distance: {node.get('antennaDistance')}<br>"
desc += f"Antenna Protocol: {node.get('antennaProtocol')}<br>"
desc += "<br>"
# Images
if node.get("images") is not None:
for image in node["images"]:
url = (
"https://raw.githubusercontent.com/tomeshnet/node-list/"
+ COMMIT
+ "/images/"
+ image
)
desc += f'<a href={url}><img alt={image} src={url} width="300"></a><br>'
return "<![CDATA[" + desc + "]]>"
with open("../../../tomeshnet-node-list.json", "r") as f:
nodes = json.load(f)["nodeList"]
kml = simplekml.Kml(name="Toronto Community Network")
active = kml.newfolder(name="Active Nodes", open=0, visibility=1)
proposed = kml.newfolder(name="Proposed Nodes", open=0, visibility=1)
inactive = kml.newfolder(name="Inactive Nodes", open=0, visibility=0)
for node in nodes:
if node["status"] == "active":
folder = active
vis = 1 # Active nodes always visible
# Yellow
icon_url = "http://maps.google.com/mapfiles/kml/pushpin/ylw-pushpin.png"
elif node["status"] == "proposed":
folder = proposed
vis = 1
# Light Blue
icon_url = "http://maps.google.com/mapfiles/kml/pushpin/ltblu-pushpin.png"
else:
# All other nodes are considered inactive
folder = inactive
vis = 0
# Red
icon_url = "http://maps.google.com/mapfiles/kml/pushpin/red-pushpin.png"
pnt = folder.newpoint(
name=node["name"],
altitudemode=ALT_MODE,
coords=[(node["longitude"], node["latitude"], node["altitude"])],
visibility=vis,
description=get_desc(node),
snippet=simplekml.Snippet(), # Empty snippet
)
pnt.style.iconstyle.icon.href = icon_url
kml.save("../../build/tomeshnet-node-list-kml.kml")
|
tomeshnet/node-list
|
ci/scripts/kml/main.py
|
Python
|
gpl-3.0
| 3,457
| 0.001736
|
#! /usr/bin/python
import sys, localconfig, platform, time
#OS Runtime comments
if platform.system() == "Windows":
sys.path.append(localconfig.winpath)
print "You are running the AnkitBot UAA Module for Windows. Sponsored by DQ. :)"
else:
sys.path.append(localconfig.linuxpath)
print "You are running the AnkitBot UAA Module for Linux. Sponsored by DQ. :)"
import wikipedia
import globalfunc as globe
override = False
if not globe.startAllowed(override):
print "Fatal - System Access Denied."
sys.exit(1)
print "System Alert - Program is still running."
globe.main()
globe.checkWait()
globe.pageCleanup()
wikipedia.stopme()
|
QEDK/AnkitBot
|
UAA/UAA.py
|
Python
|
epl-1.0
| 680
| 0.010294
|
from unittest import TestCase
from django.template import Context, Template, VariableNode
from django.test import override_settings
class NodelistTest(TestCase):
def test_for(self):
template = Template('{% for i in 1 %}{{ a }}{% endfor %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
template = Template('{% if x %}{{ a }}{% endif %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
template = Template('{% ifequal x y %}{{ a }}{% endifequal %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
template = Template('{% ifchanged x %}{{ a }}{% endifchanged %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
class ErrorIndexTest(TestCase):
"""
Checks whether index of error is calculated correctly in
template debugger in for loops. Refs ticket #5831
"""
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
def test_correct_exception_index(self):
tests = [
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)),
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)),
('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)),
]
context = Context({
'range': range(5),
'five': 5,
})
for source, expected_error_source_index in tests:
template = Template(source)
try:
template.render(context)
except (RuntimeError, TypeError) as e:
error_source_index = e.django_template_source[1]
self.assertEqual(error_source_index,
expected_error_source_index)
|
oscaro/django
|
tests/template_tests/test_nodelist.py
|
Python
|
bsd-3-clause
| 2,315
| 0.00216
|
"""Test cases for STEREO Map subclasses.
This particular test file pertains to CORMap.
@Author: Pritish C. (VaticanCameos)
"""
import os
import glob
from sunpy.map.sources.stereo import CORMap
from sunpy.map import Map
import sunpy.data.test
path = sunpy.data.test.rootdir
fitspath = glob.glob(os.path.join(path, "cor1_20090615_000500_s4c1A.fts"))
cor = Map(fitspath)
# COR Tests
def test_fitstoEIT():
"""Tests the creation of CORMap using FITS."""
assert isinstance(cor, CORMap)
def test_is_datasource_for():
"""Test the is_datasource_for method of CORMap.
Note that header data to be provided as an argument
can be a MapMeta object."""
assert cor.is_datasource_for(cor.data, cor.meta)
def test_measurement():
"""Tests the measurement property of the CORMap object."""
assert cor.measurement == "white-light"
def test_observatory():
"""Tests the observatory property of the CORMap object."""
assert cor.observatory == "STEREO A"
|
Alex-Ian-Hamilton/sunpy
|
sunpy/map/sources/tests/test_cor_source.py
|
Python
|
bsd-2-clause
| 977
| 0.004094
|
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import crypt
import glob
import hashlib
import itertools
import json
import ntpath
import os.path
import re
import string
import sys
import uuid
from collections import MutableMapping, MutableSequence
from datetime import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
import yaml
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
from ansible import errors
from ansible.module_utils.six import iteritems, string_types, integer_types
from ansible.module_utils.six.moves import reduce, shlex_quote
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
from ansible.vars.hostvars import HostVars
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
class AnsibleJSONEncoder(json.JSONEncoder):
'''
Simple encoder class to deal with JSON encoding of internal
types like HostVars
'''
def default(self, o):
if isinstance(o, HostVars):
return dict(o)
else:
return super(AnsibleJSONEncoder, self).default(o)
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw)
try:
return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ('yes', 'on', '1', 'true', 1):
return True
return False
def to_datetime(string, format="%Y-%d-%m %H:%M:%S"):
return datetime.strptime(string, format)
def quote(a):
''' return its argument quoted for shell usage '''
return shlex_quote(a)
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [ g for g in glob.glob(pathname) if os.path.isfile(g) ]
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise errors.AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(to_bytes(data, errors='surrogate_then_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
if hashtype in ['md5']:
saltsize = 8
else:
saltsize = 16
saltcharset = string.ascii_letters + string.digits + '/.'
salt = ''.join([r.choice(saltcharset) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
if hashtype == 'blowfish':
cls = passlib.hash.bcrypt
else:
cls = getattr(passlib.hash, '%s_crypt' % hashtype)
encrypted = cls.encrypt(password, salt=salt)
return encrypted
return None
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
def mandatory(a):
from jinja2.runtime import Undefined
''' Make a variable mandatory '''
if isinstance(a, Undefined):
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def combine(*terms, **kwargs):
recursive = kwargs.get('recursive', False)
if len(kwargs) > 1 or (len(kwargs) == 1 and 'recursive' not in kwargs):
raise errors.AnsibleFilterError("'recursive' is the only valid keyword argument")
for t in terms:
if not isinstance(t, dict):
raise errors.AnsibleFilterError("|combine expects dictionaries, got " + repr(t))
if recursive:
return reduce(merge_hash, terms)
else:
return dict(itertools.chain(*map(iteritems, terms)))
def comment(text, style='plain', **kw):
# Predefined comment types
comment_styles = {
'plain': {
'decoration': '# '
},
'erlang': {
'decoration': '% '
},
'c': {
'decoration': '// '
},
'cblock': {
'beginning': '/*',
'decoration': ' * ',
'end': ' */'
},
'xml': {
'beginning': '<!--',
'decoration': ' - ',
'end': '-->'
}
}
# Pointer to the right comment type
style_params = comment_styles[style]
if 'decoration' in kw:
prepostfix = kw['decoration']
else:
prepostfix = style_params['decoration']
# Default params
p = {
'newline': '\n',
'beginning': '',
'prefix': (prepostfix).rstrip(),
'prefix_count': 1,
'decoration': '',
'postfix': (prepostfix).rstrip(),
'postfix_count': 1,
'end': ''
}
# Update default params
p.update(style_params)
p.update(kw)
# Compose substrings for the final string
str_beginning = ''
if p['beginning']:
str_beginning = "%s%s" % (p['beginning'], p['newline'])
str_prefix = ''
if p['prefix']:
if p['prefix'] != p['newline']:
str_prefix = str(
"%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
else:
str_prefix = str(
"%s" % (p['newline'])) * int(p['prefix_count'])
str_text = ("%s%s" % (
p['decoration'],
# Prepend each line of the text with the decorator
text.replace(
p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
# Remove trailing spaces when only decorator is on the line
"%s%s" % (p['decoration'], p['newline']),
"%s%s" % (p['decoration'].rstrip(), p['newline']))
str_postfix = p['newline'].join(
[''] + [p['postfix'] for x in range(p['postfix_count'])])
str_end = ''
if p['end']:
str_end = "%s%s" % (p['newline'], p['end'])
# Return the final string
return "%s%s%s%s%s" % (
str_beginning,
str_prefix,
str_text,
str_postfix,
str_end)
def extract(item, container, morekeys=None):
from jinja2.runtime import Undefined
value = container[item]
if value is not Undefined and morekeys is not None:
if not isinstance(morekeys, list):
morekeys = [morekeys]
try:
value = reduce(lambda d, k: d[k], morekeys, value)
except KeyError:
value = Undefined()
return value
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc', 0)
failed = item.get('failed', False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and isinstance(item['results'], MutableSequence)
and isinstance(item['results'][0], MutableMapping)):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if not isinstance(item, MutableMapping):
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
@environmentfilter
def do_groupby(environment, value, attribute):
"""Overridden groupby filter for jinja2, to address an issue with
jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
has repr that prevents ansible.template.safe_eval.safe_eval from being
able to parse and eval the data.
jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
>=2.9.5 uses a standard tuple repr on the namedtuple.
The adaptation here, is to run the jinja2 `do_groupby` function, and
cast all of the namedtuples to a regular tuple.
See https://github.com/ansible/ansible/issues/20098
We may be able to remove this in the future.
"""
return [tuple(t) for t in _do_groupby(environment, value, attribute)]
def b64encode(string):
return to_text(base64.b64encode(to_bytes(string, errors='surrogate_then_strict')))
def b64decode(string):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_then_strict')))
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# jinja2 overrides
'groupby': do_groupby,
# base 64
'b64decode': b64decode,
'b64encode': b64encode,
# uuid
'to_uuid': to_uuid,
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': to_yaml,
'to_nice_yaml': to_nice_yaml,
'from_yaml': from_yaml,
#date
'to_datetime': to_datetime,
# path
'basename': partial(unicode_wrap, os.path.basename),
'dirname': partial(unicode_wrap, os.path.dirname),
'expanduser': partial(unicode_wrap, os.path.expanduser),
'realpath': partial(unicode_wrap, os.path.realpath),
'relpath': partial(unicode_wrap, os.path.relpath),
'splitext': partial(unicode_wrap, os.path.splitext),
'win_basename': partial(unicode_wrap, ntpath.basename),
'win_dirname': partial(unicode_wrap, ntpath.dirname),
'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
# value as boolean
'bool': to_bool,
# quote string for shell usage
'quote': quote,
# hash filters
# md5 hex digest of string
'md5': md5s,
# sha1 hex digeset of string
'sha1': checksum_s,
# checksum of string as used by ansible for checksuming files
'checksum': checksum_s,
# generic hashing
'password_hash': get_encrypted_password,
'hash': get_hash,
# file glob
'fileglob': fileglob,
# regex
'regex_replace': regex_replace,
'regex_escape': regex_escape,
'regex_search': regex_search,
'regex_findall': regex_findall,
# ? : ;
'ternary': ternary,
# list
# random stuff
'random': rand,
'shuffle': randomize_list,
# undefined
'mandatory': mandatory,
# merge dicts
'combine': combine,
# comment-style decoration
'comment': comment,
# array and dict lookups
'extract': extract,
# failure testing
'failed' : failed,
'failure' : failed,
'success' : success,
'succeeded' : success,
# changed testing
'changed' : changed,
'change' : changed,
# skip testing
'skipped' : skipped,
'skip' : skipped,
# debug
'type_debug': lambda o: o.__class__.__name__,
}
|
Sodki/ansible
|
lib/ansible/plugins/filter/core.py
|
Python
|
gpl-3.0
| 17,081
| 0.003396
|
from sa_tools.base.magic import MagicMixin
from sa_tools.inbox import Inbox
from sa_tools.session import SASession
from sa_tools.index import Index
import os
import pickle
import sys
def py_ver() -> str:
return str(sys.version_info.major)
class APSession(object):
def __init__(self, username: str, passwd: str=None, save_session: bool=False, *args, **kwargs):
self.username = username
self.passwd = passwd
self._session_bak = \
'.' + username.replace(' ', '_') + py_ver() + '.bak'
self.session = self._get_session(save_session=save_session)
del passwd
del self.passwd
def _get_session(self, save_session: bool=True) -> SASession:
backup_exists = os.path.exists(self._session_bak)
# session = None
if backup_exists:
session = self._load_session()
else:
session = SASession(self.username, self.passwd)
if save_session:
self._save_session(session)
return session
def _load_session(self) -> None:
with open(self._session_bak, 'rb') as old_session:
print("Loading from backup: " + self._session_bak)
session = pickle.load(old_session)
return session
def _save_session(self, session: SASession) -> None:
with open(self._session_bak, 'wb') as session_file:
pickle.dump(session, session_file)
class AwfulPy(APSession, MagicMixin):
def __init__(self, username, *args, **kwargs):
super().__init__(username, *args, **kwargs)
self.index = Index(self.session)
self.inbox = Inbox(self.session)
self.name = "awful.py"
self.version = "v0.2014.08.24"
def __repr__(self):
info = '[' + self.name + ' ' + self.version + '] '
acct = 'Logged in as ' + self.username
login_time = ' on ' + self.session.login_time
return info + acct + login_time
|
thismachinechills/awful.py
|
awful.py
|
Python
|
gpl-3.0
| 1,950
| 0.004103
|
# Authors: Aaron Qiu <zqiu@ulg.ac.be>,
# Antonio Sutera <a.sutera@ulg.ac.be>,
# Arnaud Joly <a.joly@ulg.ac.be>,
# Gilles Louppe <g.louppe@ulg.ac.be>,
# Vincent Francois <v.francois@ulg.ac.be>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
from itertools import chain
import numpy as np
from sklearn.externals.joblib import Parallel, delayed, cpu_count
from utils import scale
def _partition_X(X, n_jobs):
"""Private function used to partition X between jobs."""
n_nodes = X.shape[1]
# Compute the number of jobs
n_jobs = min(cpu_count() if n_jobs == -1 else n_jobs, n_nodes)
# Partition estimators between jobs
n_node_per_job = (n_nodes // n_jobs) * np.ones(n_jobs, dtype=np.int)
n_node_per_job[:n_nodes % n_jobs] += 1
starts = np.cumsum(n_node_per_job)
return n_jobs, [0] + starts.tolist()
def _parallel_count(X, start, end):
"""Private function used to compute a batch of score within a job."""
count = np.zeros((end - start, X.shape[1]))
for index, jx in enumerate(range(start, end)):
X_jx_bot = X[:-1, jx] + 0.2
X_jx_top = X[:-1, jx] + 0.5
for j in range(X.shape[1]):
if j == jx:
continue
count[index, j] = ((X[1:, j] > X_jx_bot) &
(X[1:, j] < X_jx_top)).sum()
return count
def make_prediction_directivity(X, threshold=0.12, n_jobs=1):
"""Score neuron connectivity using a precedence measure
Parameters
----------
X : numpy array of shape (n_samples, n_nodes)
Fluorescence signals
threshold : float, (default=0.11)
Threshold value for hard thresholding filter:
x_new[i] = x[i] if x[i] >= threshold else 0.
n_jobs : integer, optional (default=1)
The number of jobs to run the algorithm in parallel.
If -1, then the number of jobs is set to the number of cores.
Returns
-------
score : numpy array of shape (n_nodes, n_nodes)
Pairwise neuron connectivity score.
"""
# Perform filtering
X_new = np.zeros((X.shape))
for i in range(1, X.shape[0] - 1):
for j in range(X.shape[1]):
X_new[i, j] = (X[i, j] + 1 * X[i - 1, j] + 0.8 * X[i - 2, j] +
0.4 * X[i - 3, j])
X_new = np.diff(X_new, axis=0)
thresh1 = X_new < threshold * 1
thresh2 = X_new >= threshold * 1
X_new[thresh1] = 0
X_new[thresh2] = pow(X_new[thresh2], 0.9)
# Score directivity
n_jobs, starts = _partition_X(X, n_jobs)
all_counts = Parallel(n_jobs=n_jobs)(
delayed(_parallel_count)(X_new, starts[i], starts[i + 1])
for i in range(n_jobs))
count = np.vstack(list(chain.from_iterable(all_counts)))
return scale(count - np.transpose(count))
|
orlandi/connectomicsPerspectivesPaper
|
participants_codes/aaagv/directivity.py
|
Python
|
mit
| 2,853
| 0
|
# Copyright 2011 Kyriakos Zarifis
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is an L2 learning switch derived originally from NOX's pyswitch
example. It is now a demonstration of the ofcommand library for constructing
OpenFlow messages.
"""
from time import time
# TODO: mac_to_str and mact_to_int aren't currently defined in packet_utils...
#from pox.lib.packet.packet_utils import mac_to_str, mac_to_int
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.tcp import tcp
from pox.lib.packet.udp import udp
from pox.lib.packet.vlan import vlan
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.icmp import icmp
from pox.lib.packet.ethernet import ethernet
from pox.core import core
from pox.lib.revent import *
from pox.lib.addresses import EthAddr
log = core.getLogger()
import pox.openflow.ofcommand as ofcommand
class dumb_l2_switch (EventMixin):
def __init__ (self):
log.info("Starting")
self.listenTo(core)
self.st = {}
def _handle_GoingUpEvent (self, event):
self.listenTo(core.openflow)
def _handle_PacketIn (self, event):
"""Packet entry method.
Drop LLDP packets (or we get confused) and attempt learning and forwarding
"""
con = event.connection
dpid = event.connection.dpid
inport = event.port
packet = event.parse()
buffer_id = event.ofp.buffer_id
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if not con in self.st:
log.info('registering new switch ' + str(dpid))
self.st[con] = {}
# don't forward lldp packets
if packet.type == ethernet.LLDP_TYPE:
return
# learn MAC on incoming port
self.do_l2_learning(con, inport, packet)
# forward packet
self.forward_l2_packet(con, inport, packet, packet.arr, buffer_id)
def do_l2_learning(self, con, inport, packet):
"""Given a packet, learn the source and peg to a switch/inport
"""
# learn MAC on incoming port
srcaddr = EthAddr(packet.src)
#if ord(srcaddr[0]) & 1:
# return
if self.st[con].has_key(srcaddr.toStr()): # change to raw?
# we had already heard from this switch
dst = self.st[con][srcaddr.toStr()] # raw?
if dst[0] != inport:
# but from a different port
log.info('MAC has moved from '+str(dst)+'to'+str(inport))
else:
return
else:
log.info('learned MAC '+srcaddr.toStr()+' on Switch %s, Port %d'% (con.dpid,inport))
# learn or update timestamp of entry
self.st[con][srcaddr.toStr()] = (inport, time(), packet) # raw?
# Replace any old entry for (switch,mac).
#mac = mac_to_int(packet.src)
def forward_l2_packet(self, con, inport, packet, buf, bufid):
"""If we've learned the destination MAC set up a flow and
send only out of its inport. Else, flood.
"""
dstaddr = EthAddr(packet.dst)
#if not ord(dstaddr[0]) & 1 and # what did this do?
if self.st[con].has_key(dstaddr.toStr()): # raw?
prt = self.st[con][dstaddr.toStr()] # raw?
if prt[0] == inport:
log.warning('**warning** learned port = inport')
ofcommand.floodPacket(con, inport, packet, buf, bufid)
else:
# We know the outport, set up a flow
log.info('installing flow for ' + str(packet))
match = ofcommand.extractMatch(packet)
actions = [ofcommand.Output(prt[0])]
ofcommand.addFlowEntry(con, inport, match, actions, bufid)
# Separate bufid, make addFlowEntry() only ADD the entry
# send/wait for Barrier
# sendBufferedPacket(bufid)
else:
# haven't learned destination MAC. Flood
ofcommand.floodPacket(con, inport, packet, buf, bufid)
'''
add arp cache timeout?
# Timeout for cached MAC entries
CACHE_TIMEOUT = 5
def timer_callback():
"""Responsible for timing out cache entries. Called every 1 second.
"""
global st
curtime = time()
for con in st.keys():
for entry in st[con].keys():
if (curtime - st[con][entry][1]) > CACHE_TIMEOUT:
con.msg('timing out entry '+mac_to_str(entry)+" -> "+str(st[con][entry][0])+' on switch ' + str(con))
st[con].pop(entry)
'''
|
lewischeng-ms/pox
|
pox/forwarding/l2_ofcommand_learning.py
|
Python
|
gpl-3.0
| 5,023
| 0.014334
|
import _plotly_utils.basevalidators
class DeltaValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="delta", parent_name="indicator", **kwargs):
super(DeltaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Delta"),
data_docs=kwargs.pop(
"data_docs",
"""
decreasing
:class:`plotly.graph_objects.indicator.delta.De
creasing` instance or dict with compatible
properties
font
Set the font used to display the delta
increasing
:class:`plotly.graph_objects.indicator.delta.In
creasing` instance or dict with compatible
properties
position
Sets the position of delta with respect to the
number.
reference
Sets the reference value to compute the delta.
By default, it is set to the current value.
relative
Show relative change
valueformat
Sets the value formatting rule using d3
formatting mini-language which is similar to
those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
""",
),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/indicator/_delta.py
|
Python
|
mit
| 1,527
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-11-06 11:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rdrf', '0081_clinicaldata_active'),
]
operations = [
migrations.AlterField(
model_name='surveyrequest',
name='survey_name',
field=models.CharField(max_length=80),
),
]
|
muccg/rdrf
|
rdrf/rdrf/migrations/0082_auto_20181106_1100.py
|
Python
|
agpl-3.0
| 461
| 0
|
import os
import chardet
from humanfriendly import format_size
import pygments
import pygments.lexers
import pygments.lexers.special
import pygments.formatters
from pygments.util import ClassNotFound
from mako.lookup import TemplateLookup
from mfr.core import extension
from mfr.extensions.codepygments import settings
from mfr.extensions.codepygments import exceptions
class CodePygmentsRenderer(extension.BaseRenderer):
DEFAULT_LEXER = pygments.lexers.special.TextLexer
TEMPLATE = TemplateLookup(
directories=[
os.path.join(os.path.dirname(__file__), 'templates')
]).get_template('viewer.mako')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metrics.add('pygments_version', pygments.__version__)
def render(self):
file_size = os.path.getsize(self.file_path)
if file_size > settings.MAX_SIZE:
raise exceptions.FileTooLargeError(
'Text files larger than {} are not rendered. Please download '
'the file to view.'.format(format_size(settings.MAX_SIZE, binary=True)),
file_size=file_size,
max_size=settings.MAX_SIZE,
extension=self.metadata.ext,
)
with open(self.file_path, 'rb') as fp:
body = self._render_html(fp, self.metadata.ext)
return self.TEMPLATE.render(base=self.assets_url, body=body)
@property
def file_required(self):
return True
@property
def cache_result(self):
return True
def _render_html(self, fp, ext, *args, **kwargs):
"""Generate an html representation of the file
:param fp: File pointer
:param ext: File name extension
:return: Content html
"""
formatter = pygments.formatters.HtmlFormatter()
data = fp.read()
content, encoding = None, 'utf-8'
try:
content = data.decode(encoding)
except UnicodeDecodeError:
detected_encoding = chardet.detect(data)
encoding = detected_encoding.get('encoding', None)
if encoding is None:
raise exceptions.FileDecodingError(
message='Unable to detect encoding of source file.',
extension=ext,
category='undetectable_encoding',
code=400,
)
try:
content = data.decode(encoding)
except UnicodeDecodeError as err:
raise exceptions.FileDecodingError(
message='Unable to decode file as {}.'.format(encoding),
extension=ext,
category='undecodable',
original_exception=err,
code=400,
)
if content is None:
raise exceptions.FileDecodingError(
message='File decoded to undefined using encoding "{}"'.format(encoding),
extension=ext,
category='decoded_to_undefined',
code=500,
)
self.metrics.merge({'encoding': encoding, 'default_lexer': False})
try:
# check if there is a lexer available for more obscure file types
if ext in settings.lexer_lib.keys():
lexer = pygments.lexers.get_lexer_by_name(settings.lexer_lib[ext])
else:
lexer = pygments.lexers.guess_lexer_for_filename(ext, content)
except ClassNotFound:
self.metrics.add('default_lexer', True)
lexer = self.DEFAULT_LEXER()
self.metrics.add('lexer', lexer.name)
return pygments.highlight(content, lexer, formatter)
|
CenterForOpenScience/modular-file-renderer
|
mfr/extensions/codepygments/render.py
|
Python
|
apache-2.0
| 3,749
| 0.0008
|
import sys
from math import ceil
def main():
AREA = 1.76
w = float(input())
h = float(input())
print(ceil(w * h / AREA))
if __name__ == "__main__":
sys.exit(int(main() or 0))
|
ttitto/python
|
Basics/SampleExam/MostCommonCharacter/paint_bottles.py
|
Python
|
mit
| 196
| 0.015306
|
#!/usr/bin/env python
import json
import sys
import argparse
from clingo import Control, Number
class App:
def __init__(self, args):
self.control = Control()
self.args = args
self.horizon = 0
self.objects = 0
self.end = None
def show(self, model):
if not self.args.quiet:
print("Model: {}".format(model))
def ground(self, kind):
count = self.objects + self.horizon + 1
parts = [("expand", [Number(count)])]
if self.args.scratch and count > 1:
self.control = Control()
for source in self.args.file: self.control.load(source)
for i in range(0, self.objects): parts.append(("object", [Number(i + 1, count)]))
for i in range(0, self.horizon): parts.append(("horizon", [Number(i + 1, count)]))
if self.args.scratch or count == 1:
for option in self.args.option:
setattr(self.control.configuration, option[0], option[1])
parts.append(("base", []))
if kind:
self.objects += 1
parts.append(("object", [Number(self.objects), Number(count)]))
else:
self.horizon += 1
parts.append(("horizon", [Number(self.horizon), Number(count)]))
if self.args.verbose:
print("")
print("Objects: {}".format(Number(self.objects)))
print("Horizon: {}".format(Number(self.horizon)))
self.control.ground(parts)
if self.args.verbose:
print("Solving: {}".format(count))
def run(self):
for source in self.args.file:
self.control.load(source)
if self.args.maxobj is None:
self.end = self.control.get_const("n").number
else:
self.end = self.args.maxobj
while self.objects < self.end:
self.ground(True)
while True:
ret = self.control.solve(on_model=self.show)
if self.args.stats:
args = {"sort_keys": True, "indent": 0, "separators": (',', ': ')}
stats = {}
for x in ["step", "enumerated", "time_cpu", "time_solve", "time_sat", "time_unsat", "time_total"]:
stats[x] = self.control.statistics[x]
for x in ["lp", "ctx", "solvers"]:
for y in self.control.statistics[x]:
stats[y] = self.control.statistics[x][y]
print(json.dumps(stats, *args))
if ret.satisfiable:
break
self.ground(False)
parser = argparse.ArgumentParser(description="Gradually expand logic programs.", epilog="""Example: main.py -x -q -s -v -m 42 -o solve.models 0 encoding.lp instance.lp""")
parser.add_argument("-x", "--scratch", action='store_true', help="start each step from scratch (single-shot solving)")
parser.add_argument("-q", "--quiet", action='store_true', help="do not print models")
parser.add_argument("-s", "--stats", action='store_true', help="print solver statistics")
parser.add_argument("-v", "--verbose", action='store_true', help="print progress information")
parser.add_argument("-m", "--maxobj", type=int, metavar="NUM", default=None, help="maximum number of introduced objects")
parser.add_argument("-o", "--option", nargs=2, metavar=("OPT", "VAL"), action="append", default=[], help="set sover options")
parser.add_argument("file", nargs="*", default=[], help="gringo source files")
args = parser.parse_args()
if args.maxobj is not None and args.maxobj < 1:
parser.error("maximum number of objects must be positive")
App(args).run()
|
potassco/clingo
|
examples/clingo/expansion/main.py
|
Python
|
mit
| 3,702
| 0.006483
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""DoJSON model and rules for CDS to INSPIRE HEP MARC."""
from __future__ import absolute_import, division, print_function
from . import rules # noqa: F401
from .model import cds2hep_marc # noqa: F401
|
inspirehep/inspire-dojson
|
inspire_dojson/cds/__init__.py
|
Python
|
gpl-3.0
| 1,112
| 0
|
"""
Django settings for detest project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&^ck$n8qsz2e#s+z6%b%(f$r4)2!w4fvz7m9ks@blx=(hq*efu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'detest_ui',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'detest.urls'
WSGI_APPLICATION = 'detest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'detest',
'USER': 'detest',
'PASSWORD': 'detest',
'HOST': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
},
'detest': {
'handlers': ['console'],
'level': 'DEBUG',
},
'detest_ui': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
LOGGING = {}
|
tahpee/detest
|
detest/detest/settings.py
|
Python
|
mit
| 2,968
| 0.000337
|
from application import CONFIG, app
from .models import *
from flask import current_app, session
from flask.ext.login import login_user, logout_user, current_user
from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
import bcrypt
import re
import sendgrid
import time
from itsdangerous import URLSafeTimedSerializer
AuthenticationError = Exception("AuthenticationError", "Invalid credentials.")
UserExistsError = Exception("UserExistsError", "Email already exists in database.")
UserDoesNotExistError = Exception("UserDoesNotExistError", "Account with given email does not exist.")
login_manager = LoginManager()
login_manager.init_app(app)
principals = Principal(app)
sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"])
ts = URLSafeTimedSerializer(CONFIG["SECRET_KEY"])
@login_manager.user_loader
def load_user(user_id):
user_entries = StaffUserEntry.objects(id = user_id)
if user_entries.count() != 1:
return None
currUser = user_entries[0]
user = User(currUser.id, currUser.email, currUser.firstname, currUser.lastname, currUser.roles)
return user
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role))
def get_user(email):
entries = StaffUserEntry.objects(email = email)
if entries.count() == 1:
return entries[0]
return None
def verify_user(email, password):
currUser = get_user(email)
if currUser is None:
return None
hashed = currUser.hashed
if bcrypt.hashpw(password.encode("utf-8"), hashed.encode("utf-8")) == hashed.encode("utf-8"):
return load_user(currUser.id)
else:
return None
def login(email):
user = load_user(get_user(email).id)
if user != None:
login_user(user)
identity_changed.send(current_app._get_current_object(), identity = Identity(user.uid))
else:
raise UserDoesNotExistError
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(), identity = AnonymousIdentity())
def tokenize_email(email):
return ts.dumps(email, salt = CONFIG["EMAIL_TOKENIZER_SALT"])
def detokenize_email(token):
return ts.loads(token, salt = CONFIG["EMAIL_TOKENIZER_SALT"], max_age = 86400)
def send_recovery_email(email):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
token = tokenize_email(email)
message = sendgrid.Mail()
message.add_to(email)
message.set_from("noreply@hackbca.com")
message.set_subject("hackBCA III - Account Recovery")
message.set_html("<p></p>")
message.add_filter("templates", "enable", "1")
message.add_filter("templates", "template_id", CONFIG["SENDGRID_ACCOUNT_RECOVERY_TEMPLATE"])
message.add_substitution("prefix", "staff")
message.add_substitution("token", token)
status, msg = sg.send(message)
def change_name(email, firstname, lastname):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
account.firstname = firstname
account.lastname = lastname
account.save()
login(email) #To update navbar
def change_password(email, password):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
hashed = str(bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()))[2:-1]
account.hashed = hashed
account.save()
def get_user_attr(email, attr):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
return getattr(user, attr)
def set_user_attr(email, attr, value):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
setattr(user, attr, value)
user.save()
|
hackBCA/missioncontrol
|
application/mod_user/controllers.py
|
Python
|
mit
| 3,822
| 0.026688
|
from django.apps import AppConfig
class UserregistrationConfig(AppConfig):
name = 'userregistration'
|
Emilv2/mandarinrecording
|
registration/apps.py
|
Python
|
agpl-3.0
| 107
| 0
|
import utils
from flask import render_template, redirect, request, session, url_for, json, jsonify
from . import murmurbp
from .User import User
# User Views
@murmurbp.route("/users", methods = ['GET'])
def get_all_users():
u = User()
ul = utils.obj_to_dict(u.get_all())
data = [{'UserId': k, 'UserName': v} for k, v in ul.iteritems()]
resp = jsonify(users=data)
return resp, 200
@murmurbp.route("/users/<int:id>", methods = ['GET'])
def get_user(id):
u = User()
data = utils.obj_to_dict(u.get(id))
resp = jsonify(data)
return resp, 200
@murmurbp.route("/users", methods = ['POST'])
def add_user():
u = User()
user = json.loads('{"UserName": "TestUser7"}')
new_user = u.add(user)
data = utils.obj_to_dict(new_user)
resp = jsonify(data)
return resp, 200
@murmurbp.route("/users/<int:id>", methods = ['DELETE'])
def delete_user(id):
u = User()
u.delete(id)
return jsonify(), 201
from .Channel import Channel
# Channel Views
@murmurbp.route("/channels", methods = ['GET'])
def get_all_channels():
c = Channel()
cl = utils.obj_to_dict(c.get_all())
data = [ v for k, v in cl.iteritems()]
resp = jsonify(channels=data)
return resp, 200
@murmurbp.route("/channels", methods = ['POST'])
def add_channel():
c = Channel()
name = request.form['channelName']
parent = request.form['parent']
new_channel = c.add_channel(name, parent)
data = utils.obj_to_dict(new_channel)
resp = jsonify(data)
return resp, 200
@murmurbp.route("/channels/<int:id>", methods = ['DELETE'])
def delete_channel(id):
c = Channel()
c.delete(id)
return jsonify(), 201
from .ACLGroup import ACL, Group
# ACL and Group Views
@murmurbp.route("/acls/<int:channel_id>", methods = ['GET'])
def get_all_acls(channel_id):
a = ACL()
data = utils.obj_to_dict(a.get_all(channel_id))
resp = jsonify(acls=data)
return resp, 200
@murmurbp.route("/groups/<int:channel_id>", methods = ['GET'])
def get_all_groups(channel_id):
g = Group()
data = utils.obj_to_dict(g.get_all(channel_id))
resp = jsonify(groups=data)
return resp, 200
@murmurbp.route("/acls/<int:channel_id>", methods = ['POST'])
def add_acl_to_channel(channel_id):
# TODO: load json object
a = ACL()
acl = json.loads('{"applyHere": true,"applySubs": true,"userid": 1,"group": "admin","allow": 1024,"deny": 0}')
data = a.add(channel_id, acl)
resp = jsonify(data)
return resp, 200
|
aqisnotliquid/minder2
|
app/murmur/views.py
|
Python
|
mit
| 2,492
| 0.014045
|
#
# This file is part of TSmells
#
# TSmells is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# TSmells is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with TSmells; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Copyright 2007-2008 Manuel Breugelmans <manuel.breugelmans@student.ua.ac.be>
#
from com.hp.hpl.guess.ui import Dockable
class TDockable(JPanel, Dockable):
#
# Implementation of Dockable interface
#
def mouseEnterEdge(self, edge):
pass
def mouseLeaveNode(self, node):
pass
def mouseLeaveEdge(self, edge):
pass
def getPreferredSize(self):
return Dimension(200,600)
def getDefaultFrameBounds(self):
return Rectangle(50, 50, 300, 600)
def getDirectionPreference(self):
''' prefer vertical orientation '''
return 2 # vertical, see com.hp.hpl.guess.ui.MainUIWindow.java
def opening(self, state):
self.visible = state
def attaching(self, state):
pass
def getTitle(self):
return("")
def getWindow(self):
return self.myParent
def setWindow(self,gjf):
self.myParent = gjf
|
carvalhomb/tsmells
|
src/viz/gui/TDockable.py
|
Python
|
gpl-2.0
| 1,659
| 0.007836
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyWrapt(PythonPackage):
"""Module for decorators, wrappers and monkey patching."""
homepage = "https://github.com/GrahamDumpleton/wrapt"
url = "https://pypi.io/packages/source/w/wrapt/wrapt-1.10.10.tar.gz"
version('1.10.10', '97365e906afa8b431f266866ec4e2e18')
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/py-wrapt/package.py
|
Python
|
lgpl-2.1
| 1,546
| 0.000647
|
#encoding: utf-8
from __future__ import unicode_literals
TITLE_TO_SLUG = {
'איגוד הביטקוין': 'bitcoin-org-il',
'אליאב': 'eliav',
'* בדיקות פרודקשיין *': 'production-test',
'בי״ס עמית': 'amit',
'הבר קיימא': 'barkayma',
'הסדנא לידע ציבורי': 'hasadna',
'הפורום לממשל פתוח': 'open-government',
'התנועה לאיכות השלטון': 'mqg',
'מעיין ברוך': 'maayan-baruch',
'מרצ': 'meretz',
'נטף': 'nataf',
'נען': 'naan',
'קהילה פתוחה': 'open-community',
}
|
nonZero/OpenCommunity
|
src/communities/legacy_mapping.py
|
Python
|
bsd-3-clause
| 574
| 0.002227
|
#!/usr/bin/env python2.7
import numpy as np
import matplotlib.pyplot as plt
Freq=np.array([30,40,45,50,53,55,60,65,70,80,90,95,98,100,110,120])
Db=np.array([70.5,78.6,83.2,88.4,87.5,86.7,85.2,83.9,85.1,88,95.7,100.4,100.4,99.2,94.7,94.9])
plt.xlabel('Frecuencia')
plt.ylabel('Decibel')
plt.title('DecibelvsFreq a 0.1volts')
#for i in range(len(Freq)):
# plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i]))
plt.axis([0, 330, 50, 130])
plt.plot(Freq,Db,'bo',Freq,Db,'k')
plt.grid(True)
plt.show()
|
P1R/cinves
|
TrabajoFinal/tubo350cm/2-DbvsFreq/tubo2huecos/DbvsFreq-Ampde0.1v-2huequitos.py
|
Python
|
apache-2.0
| 511
| 0.078278
|
#import os
import pickle
import glob
#import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
#from data_control_no_images.read import Read
listing = glob.glob('F:/Project_Cars_Data/1lap-fullspeed/Watkins Glen International - Short Circuit' + '/*.pkl')
x = []
y = []
throttle = []
raw_throttle = []
brake = []
raw_brake = []
steering = []
raw_steering = []
xy = []
for filename in tqdm(listing):
with open(filename, 'rb') as file_data:
project_cars_state = pickle.load(file_data)
controller_state = pickle.load(file_data)
#remove none flying lap data
if project_cars_state.mParticipantInfo[0].mCurrentLapDistance == 0.0:
continue
position = project_cars_state.mParticipantInfo[0].mWorldPosition
x.append(round(position[0]))
y.append(round(position[2]))
throttle.append(controller_state['right_trigger']/255)# 0 - 255
brake.append(controller_state['left_trigger']/255) #0 - 255
steering.append(controller_state['thumb_lx']/32767) #-32768 - 32767
#steering.append(project_cars_state.mSteering)
raw_steering.append(project_cars_state.mUnfilteredSteering)
raw_brake.append(project_cars_state.mUnfilteredBrake)
raw_throttle.append(project_cars_state.mUnfilteredThrottle)
xy.append([position[0], position[2]])
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=steering)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller steering')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_steering)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw steering')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=throttle)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller throttle')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_throttle)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw throttle')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=brake)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller brake')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_brake)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw brake')
plt.show()
plt.close()
# get_data = Read(True)
# mean, std = get_data.load_mean_and_std('F:/Project_Cars_Data/Full_Speed_Training_none_image')
# print(mean)
# print(std)
# xy = (xy - mean) / std
# print(np.array(xy[:,0]).shape)
# plt.scatter(xy[:,0], xy[:,1])
# plt.axis('equal')
# plt.show()
|
ThisIsSoSteve/Project-Tensorflow-Cars
|
plot_course_data.py
|
Python
|
mit
| 2,616
| 0.006116
|
"""
Dec 10, 2015 Developed by Y.G.@CHX
yuzhang@bnl.gov
This module is for the necessary packages for the XPCS analysis
"""
from IPython.core.magics.display import Javascript
from skbeam.core.utils import multi_tau_lags
from skimage.draw import line_aa, line, polygon, ellipse, circle
from modest_image import ModestImage, imshow
from databroker import DataBroker as db, get_images, get_table, get_events, get_fields
from filestore.api import register_handler, deregister_handler
#from filestore.retrieve import _h_registry, _HANDLER_CACHE, HandlerBase
from eiger_io.pims_reader import EigerImages
from chxtools import handlers
from filestore.path_only_handlers import RawHandler
## Import all the required packages for Data Analysis
#* scikit-beam - data analysis tools for X-ray science
# - https://github.com/scikit-beam/scikit-beam
#* xray-vision - plotting helper functions for X-ray science
# - https://github.com/Nikea/xray-vision
import xray_vision
import matplotlib.cm as mcm
import copy
import xray_vision.mpl_plotting as mpl_plot
from xray_vision.mpl_plotting import speckle
from xray_vision.mask.manual_mask import ManualMask
import skbeam.core.roi as roi
import skbeam.core.correlation as corr
import skbeam.core.utils as utils
import numpy as np
from datetime import datetime
import h5py
import pims
from pandas import DataFrame
import os, sys, time
import getpass
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import pickle
from lmfit import Model
from lmfit import minimize, Parameters, Parameter, report_fit
from matplotlib.figure import Figure
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
import collections
import itertools
import random
from PIL import Image
import warnings
from eiger_io.fs_handler2 import EigerHandler2
from eiger_io.fs_handler import LazyEigerHandler
fs = db.event_sources[0].fs
fs.deregister_handler('AD_EIGER')
fs.register_handler('AD_EIGER', LazyEigerHandler)
fs.deregister_handler('AD_EIGER2')
fs.register_handler('AD_EIGER2', EigerHandler2)
mcolors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k','darkgoldenrod','oldlace', 'brown','dodgerblue' ])
markers = itertools.cycle(list(plt.Line2D.filled_markers))
lstyles = itertools.cycle(['-', '--', '-.','.',':'])
colors = itertools.cycle(["blue", "darkolivegreen", "brown", "m", "orange", "hotpink", "darkcyan", "red",
"gray", "green", "black", "cyan", "purple" , "navy"])
colors_copy = itertools.cycle(["blue", "darkolivegreen", "brown", "m", "orange", "hotpink", "darkcyan", "red",
"gray", "green", "black", "cyan", "purple" , "navy"])
markers = itertools.cycle( ["o", "2", "p", "1", "s", "*", "4", "+", "8", "v","3", "D", "H", "^",])
markers_copy = itertools.cycle( ["o", "2", "p", "1", "s", "*", "4", "+", "8", "v","3", "D", "H", "^",])
RUN_GUI = False #if True for gui setup; else for notebook; the main code difference is the Figure() or plt.figure(figsize=(8, 6))
markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H',
'h', '*', 'd',
'$I$','$L$', '$O$','$V$','$E$',
'$c$', '$h$','$x$','$b$','$e$','$a$','$m$','$l$','$i$','$n$', '$e$',
'8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',]
markers = np.array( markers *100 )
markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H',
'h', '*', 'd',
'8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',]
markers = np.array( markers *100 )
colors = np.array( ['darkorange', 'mediumturquoise', 'seashell', 'mediumaquamarine', 'darkblue',
'yellowgreen', 'mintcream', 'royalblue', 'springgreen', 'slategray',
'yellow', 'slateblue', 'darkslateblue', 'papayawhip', 'bisque', 'firebrick',
'burlywood', 'dodgerblue', 'dimgrey', 'chartreuse', 'deepskyblue', 'honeydew',
'orchid', 'teal', 'steelblue', 'limegreen', 'antiquewhite',
'linen', 'saddlebrown', 'grey', 'khaki', 'hotpink', 'darkslategray',
'forestgreen', 'lightsalmon', 'turquoise', 'navajowhite',
'darkgrey', 'darkkhaki', 'slategrey', 'indigo',
'darkolivegreen', 'aquamarine', 'moccasin', 'beige', 'ivory', 'olivedrab',
'whitesmoke', 'paleturquoise', 'blueviolet', 'tomato', 'aqua', 'palegoldenrod',
'cornsilk', 'navy', 'mediumvioletred', 'palevioletred', 'aliceblue', 'azure',
'orangered', 'lightgrey', 'lightpink', 'orange', 'wheat',
'darkorchid', 'mediumslateblue', 'lightslategray', 'green', 'lawngreen',
'mediumseagreen', 'darksalmon', 'pink', 'oldlace', 'sienna', 'dimgray', 'fuchsia',
'lemonchiffon', 'maroon', 'salmon', 'gainsboro', 'indianred', 'crimson',
'mistyrose', 'lightblue', 'darkgreen', 'lightgreen', 'deeppink',
'palegreen', 'thistle', 'lightcoral', 'lightgray', 'lightskyblue', 'mediumspringgreen',
'mediumblue', 'peru', 'lightgoldenrodyellow', 'darkseagreen', 'mediumorchid',
'coral', 'lightyellow', 'chocolate', 'lavenderblush', 'darkred', 'lightseagreen',
'darkviolet', 'lightcyan', 'cadetblue', 'blanchedalmond', 'midnightblue',
'lightsteelblue', 'darkcyan', 'floralwhite', 'darkgray',
'lavender', 'sandybrown', 'cornflowerblue', 'gray',
'mediumpurple', 'lightslategrey', 'seagreen',
'silver', 'darkmagenta', 'darkslategrey', 'darkgoldenrod', 'rosybrown',
'goldenrod', 'darkturquoise', 'plum',
'purple', 'olive', 'gold','powderblue', 'peachpuff','violet', 'lime', 'greenyellow', 'tan', 'skyblue',
'magenta', 'black', 'brown', 'green', 'cyan', 'red','blue'] *100 )
colors = colors[::-1]
colors_ = itertools.cycle( colors )
#colors_ = itertools.cycle(sorted_colors_ )
markers_ = itertools.cycle( markers )
import matplotlib as mpl
# Custom colormaps
################################################################################
# ROYGBVR but with Cyan-Blue instead of Blue
color_list_cyclic_spectrum = [
[ 1.0, 0.0, 0.0 ],
[ 1.0, 165.0/255.0, 0.0 ],
[ 1.0, 1.0, 0.0 ],
[ 0.0, 1.0, 0.0 ],
[ 0.0, 0.2, 1.0 ],
[ 148.0/255.0, 0.0, 211.0/255.0 ],
[ 1.0, 0.0, 0.0 ]
]
cmap_cyclic_spectrum = mpl.colors.LinearSegmentedColormap.from_list('cmap_cyclic_spectrum', color_list_cyclic_spectrum)
# classic jet, slightly tweaked
# (bears some similarity to mpl.cm.nipy_spectral)
color_list_jet_extended = [
[0, 0, 0],
[0.18, 0, 0.18],
[0, 0, 0.5],
[0, 0, 1],
[ 0. , 0.38888889, 1. ],
[ 0. , 0.83333333, 1. ],
[ 0.3046595 , 1. , 0.66308244],
[ 0.66308244, 1. , 0.3046595 ],
[ 1. , 0.90123457, 0. ],
[ 1. , 0.48971193, 0. ],
[ 1. , 0.0781893 , 0. ],
[1, 0, 0],
[ 0.5 , 0. , 0. ],
]
cmap_jet_extended = mpl.colors.LinearSegmentedColormap.from_list('cmap_jet_extended', color_list_jet_extended)
# Tweaked version of "view.gtk" default color scale
color_list_vge = [
[ 0.0/255.0, 0.0/255.0, 0.0/255.0],
[ 0.0/255.0, 0.0/255.0, 254.0/255.0],
[ 188.0/255.0, 2.0/255.0, 107.0/255.0],
[ 254.0/255.0, 55.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 254.0/255.0]
]
cmap_vge = mpl.colors.LinearSegmentedColormap.from_list('cmap_vge', color_list_vge)
# High-dynamic-range (HDR) version of VGE
color_list_vge_hdr = [
[ 255.0/255.0, 255.0/255.0, 255.0/255.0],
[ 0.0/255.0, 0.0/255.0, 0.0/255.0],
[ 0.0/255.0, 0.0/255.0, 255.0/255.0],
[ 188.0/255.0, 0.0/255.0, 107.0/255.0],
[ 254.0/255.0, 55.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 254.0/255.0]
]
cmap_vge_hdr = mpl.colors.LinearSegmentedColormap.from_list('cmap_vge_hdr', color_list_vge_hdr)
# Simliar to Dectris ALBULA default color-scale
color_list_hdr_albula = [
[ 255.0/255.0, 255.0/255.0, 255.0/255.0],
[ 0.0/255.0, 0.0/255.0, 0.0/255.0],
[ 255.0/255.0, 0.0/255.0, 0.0/255.0],
[ 255.0/255.0, 255.0/255.0, 0.0/255.0],
#[ 255.0/255.0, 255.0/255.0, 255.0/255.0],
]
cmap_hdr_albula = mpl.colors.LinearSegmentedColormap.from_list('cmap_hdr_albula', color_list_hdr_albula)
cmap_albula = cmap_hdr_albula
# Ugly color-scale, but good for highlighting many features in HDR data
color_list_cur_hdr_goldish = [
[ 255.0/255.0, 255.0/255.0, 255.0/255.0], # white
[ 0.0/255.0, 0.0/255.0, 0.0/255.0], # black
[ 100.0/255.0, 127.0/255.0, 255.0/255.0], # light blue
[ 0.0/255.0, 0.0/255.0, 127.0/255.0], # dark blue
#[ 0.0/255.0, 127.0/255.0, 0.0/255.0], # dark green
[ 127.0/255.0, 60.0/255.0, 0.0/255.0], # orange
[ 255.0/255.0, 255.0/255.0, 0.0/255.0], # yellow
[ 200.0/255.0, 0.0/255.0, 0.0/255.0], # red
[ 255.0/255.0, 255.0/255.0, 255.0/255.0], # white
]
cmap_hdr_goldish = mpl.colors.LinearSegmentedColormap.from_list('cmap_hdr_goldish', color_list_cur_hdr_goldish)
|
yugangzhang/chxanalys
|
chxanalys/chx_libs.py
|
Python
|
bsd-3-clause
| 9,246
| 0.025741
|
#!/usr/bin/env python
import sys,re,time,argparse
def main(args):
# print >>sys.stdout, "Start analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
convert(args.input,args.output)
# print >>sys.stdout, "Finish analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
def extract_exon_length_from_cigar(cigar):
cigar_m = ["0"] + re.findall(r"(\d+)M",cigar)
cigar_d = ["0"] + re.findall(r"(\d+)D",cigar)
cigar_m_s,cigar_d_s = [0,0]
for m in cigar_m:
cigar_m_s += int(m)
for d in cigar_d:
cigar_d_s += int(d)
exon_length = cigar_m_s+cigar_d_s
return exon_length
def extract_soft_clip_from_cigar(cigar):
cigar_5 = ["0"] + re.findall(r"^(\d+)S",cigar)
cigar_3 = ["0"] + re.findall(r"(\d+)S$",cigar)
cigar_5_s,cigar_3_s = [0,0]
for s5 in cigar_5:
cigar_5_s += int(s5)
for s3 in cigar_3:
cigar_3_s += int(s3)
return cigar_5_s,cigar_3_s
def convert(sam_file,gpd_file):
for line in sam_file:
if line[0] != "@":
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq = line.strip().split("\t")[:10]
tag = "\t".join(line.strip().split("\t")[11:])
if rname != "*" and re.search(r"XS:A:(\S)",tag):
s5,s3 = extract_soft_clip_from_cigar(cigar)
sf = str(s5)+"_"+str(s3)
strand = (re.search(r"XS:A:(\S)",tag)).group(1)
cigar_n_l = 0
exon_length = 0
exon_start = int(pos)-1
exon_end = 0
exon_start_list = []
exon_end_list = []
if "N" in cigar:
for exon in cigar.split("N"):
exon = exon + "N"
exon_start = exon_start + exon_length + cigar_n_l
exon_length = extract_exon_length_from_cigar(exon)
exon_end = exon_start + exon_length
if re.search(r"(\d+)N",exon):
cigar_n_l = int((re.search(r"(\d+)N",exon)).group(1))
exon_start_list.append(str(exon_start))
exon_end_list.append(str(exon_end))
else:
exon_start = exon_start
exon_length = extract_exon_length_from_cigar(cigar)
exon_end = exon_start + exon_length
exon_start_list.append(str(exon_start))
exon_end_list.append(str(exon_end))
exon_start_list.append("")
exon_end_list.append("")
print >>gpd_file, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (qname,qname,rname,strand,str(int(pos)-1),str(exon_end),mapq,sf,str(len(exon_start_list)-1),",".join(exon_start_list),",".join(exon_end_list))
sam_file.close()
gpd_file.close()
def do_inputs():
output_gpd_format = '''
1. read id
2. read id
3. chromosome id
4. strand
5. start site of alignment
6. end site of alignment
7. MAPQ
8. Number of nucleotides that are softly-clipped by aligner; left_right
9. exon count
10. exon start set
11. exon end set'''
parser = argparse.ArgumentParser(description="Function: convert sam to gpd.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i','--input',type=argparse.FileType('r'),required=True,help="Input: sam file")
parser.add_argument('-o','--output',type=argparse.FileType('w'),required=True,help="Output: gpd file")
args = parser.parse_args()
return args
if __name__=="__main__":
args = do_inputs()
main(args)
|
yunhaowang/IDP-APA
|
utilities/py_idpapa_sam2gpd.py
|
Python
|
apache-2.0
| 3,045
| 0.042365
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ecp.py part of cellery (ceRNAs linking inference)
#
# Copyright 2016 Oscar Bedoya Reina <obedoya@igmm-linux-005>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""
Methods to calculate ECP values (Endogenous Competition Potential)
"""
########################################################
#~ Import libraries.
########################################################
from cellery import exceptions
from itertools import product
from multiprocessing import Queue,Process
from numpy import array,empty,float32,float64,nan,zeros
from clcECP import rtrnECP,rtrnECPMskd,rtrnECPDnsty,rtrnECPDnstyMskd
import os
import sqlite3
########################################################
#~ Compute ECP values for all combinations of two arrays of arrays with
# values.
########################################################
def cmpECP(aMrnVlsDtA,aMrnVlsDtB,aANmsA,aANmsB,fldrOutECPPrws, \
aALenA=False,aALenB=False,aMskRef=False,nThrds=10,intrvlSz=700, \
sqlFl=False,pntrCnts=True):
"""
Input: aMrnVlsDtA is an array A of arrays with values for miRNAs.
aMrnVlsDtB is an array B of arrays with values for miRNAs. aANmsA is
the array of variable names in the same position as the numbers in
vrblAPos. aANmsB is the array of variable names in the same order as
vrblBPos. fldrOutECPPrws is a folder to store partial ECP results.
Optionally, aALenA is an array of object lengths in the same order
that aAVlsA. aALenB is an array of object lengths in the same order
that aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays
within array A and B). nThrds is the number of threads to run in
parallel. intrvlSz is the size of the interval to run in multithread.
sqlFl is a sql database to save the ECP values. If pntrCnts is True
aAVlsA and aAVlsB are counts so 0 values shall be considered
(excluded in shared counts).
Output: aECPVlsAVlsB is an array with the ECP values for all
combinations of array A and B.
NOTE: The subarrays in arrays A and B must have the same dimensions
(i.e. all the miRNA arrays must have the same size.).
NOTE: Null values shall be numpy.nan.
NOTE: aECPVlsAVlsB has arrays in A as rows and in B as columns.
NOTE: if aALenA and aALenB ECP density is going to be calculated.
NOTE: if aMskRef miRNA is going to be masked.
"""
def mltECPclc(qInJobs,qOutRslts,mthdECPclc,aMrnVlsDtA,aMrnVlsDtB, \
fldrOutECPPrws,aALenA,aALenB,aMskRef,pntrCnts):
"""
Input: qInJobs is a queue with pairs of intervals. qOutRslts is
the queue to store position in arrayA, position in arrayB, and
ECP value. mthdECPclc is the method to calculate the ECP value.
aMrnVlsDtA is an array A of arrays with values for miRNAs.
aMrnVlsDtB is an array B of arrays with values for miRNAs.
fldrOutECPPrws is a folder to store partial ECP results. aALenA
is an array of object lengths in the same order that aAVlsA.
aALenB is an array of object lengths in the same order that
aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays
within array A and B). If pntrCnts is True aAVlsA and aAVlsB
are counts so 0 values shall be considered (excluded in shared
counts).
Output: qOutRslts is the queue to store position in arrayA,
position in arrayB, and ECP values.
"""
for intrvlA,intrvB in iter(qInJobs.get,'STOP'):
lECPVlsAVlsB = mthdECPclc(aMrnVlsDtA,aMrnVlsDtB, \
fldrOutECPPrws,intrvlA,intrvB,pntrCnts,aMskRef,aALenA, \
aALenB)
qOutRslts.put(lECPVlsAVlsB)
#--------------------------
#~ Check if there is mask for miRNAs
if dir(aMskRef)[0]=='T':
assert len(aMskRef) == len(aALenB[0]) == len(aALenA[0])
if dir(aALenB)[0]=='T':
assert dir(aALenB)[1]=='T'
mthdECPclc = rtrnECPDnstyMskd
else:
assert not aALenA and not aALenB
mthdECPclc = rtrnECPMskd
else:
if dir(aALenB)[0]=='T':
assert dir(aALenB)[1]=='T'
mthdECPclc = rtrnECPDnsty
else:
assert not aALenA and not aALenB
mthdECPclc = rtrnECP
#--------------------------
#~ Create list of intervals for multithreading
lenaMrnVlsDtA = len(aMrnVlsDtA)
lenaMrnVlsDtB = len(aMrnVlsDtB)
intrvlsMrnVlsA = []
for strt in xrange(0,lenaMrnVlsDtA,intrvlSz):
cEnd = strt+intrvlSz
if cEnd<lenaMrnVlsDtA:
end = cEnd
else:
end = lenaMrnVlsDtA
intrvlsMrnVlsA.append([strt,end])
intrvlsMrnVlsB = []
for strt in xrange(0,lenaMrnVlsDtB,intrvlSz):
cEnd = strt+intrvlSz
if cEnd<lenaMrnVlsDtB:
end = cEnd
else:
end = lenaMrnVlsDtB
intrvlsMrnVlsB.append([strt,end])
#--------------------------
#~ Run in parallel.
aECPVlsAVlsB = zeros((lenaMrnVlsDtA,lenaMrnVlsDtB),dtype=float32)
aECPVlsAVlsB.fill(nan)#fill all ECP with nan to start
qInJobs = Queue()
qOutRslts = Queue()
cntVlABPrs=0
for intrvlA,intrvB in product(intrvlsMrnVlsA,intrvlsMrnVlsB):
qInJobs.put((intrvlA,intrvB))
cntVlABPrs += 1
for t in xrange(nThrds):
Process(target = mltECPclc,args=(qInJobs,qOutRslts,mthdECPclc, \
aMrnVlsDtA,aMrnVlsDtB,fldrOutECPPrws,aALenA,aALenB, \
aMskRef,pntrCnts)).start()
lECPVlsAVlsBGlbl = []#store global results
for cnt in range(cntVlABPrs):
if cnt%50==0:
print 'Running calculations on pair %s out of %s'%(cnt, \
cntVlABPrs)
lECPVlsAVlsB = qOutRslts.get()
lECPVlsAVlsBGlbl.extend(lECPVlsAVlsB)
for t in xrange(nThrds):
qInJobs.put('STOP')
#--------------------------
#~ create array: aMrnVlsDtA in rows, aMrnVlsDtB in columns.
for vlsAPos,vlsBPos,ECP in lECPVlsAVlsBGlbl:
aECPVlsAVlsB[vlsAPos,vlsBPos] = ECP
if sqlFl:
mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB)
return aECPVlsAVlsB
########################################################
#~ Make a sqlite3 database for ECP values between genes/lncRNAs of
# interest.
########################################################
def mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB):
"""
Input: lECPVlsAVlsBGlbl is a list of tuples (vrblAPos,vrblBPos,ECP).
vrblAPos is the position of the first variables, vrblBPos is the
position of the second variable, ECP is the ECP value between
vrblAPos and vrblBPos. A sqlite3 database will be created for the
input list. aANmsA is the array of variable names in the same
position as the numbers in vrblAPos. aANmsB is the array of variable
names in the same order as vrblBPos.
Output: A sqlite3 database will be created for the input list in the
file sqlFl.
"""
conn = sqlite3.connect(sqlFl)
c = conn.cursor()
c.execute \
('''CREATE TABLE records (id TEXT, vrblANm TEXT, vrblBNm TEXT, ECP REAL)''')
lCnt = 0
for vrblAPos,vrblBPos,ECP in lECPVlsAVlsBGlbl:
vrblANm,vrblBNm = aANmsA[vrblAPos],aANmsB[vrblBPos]
lCnt+=1
c.execute('insert into records VALUES (?,?,?,?)', (str(lCnt), \
vrblANm,vrblBNm,float64(ECP)))
# create indexes. Decrease complexity of querying
c.execute("CREATE INDEX index_records on records (id);")
conn.commit()
conn.close()
return 0
########################################################
#~ Read a sqlite3 database for correlations between genes/lncRNAs of
# interest.
########################################################
def rtrnSqlFlECP(sqlFl,srtdVrblANms,srtdVrblBNms,rtrnECPSgnd=False):
"""
Input: sqlFl is a sqlite3 database with the fields id, vrblANm,
vrblBNm, and ECP. srtdVrblANms is a sorted lists of names
present in the field vrblANm. srtdVrblBNms is a sorted lists of
names present in the field vrblBNm. Optionally, rtrnECPSgnd can have
values 'negative' or 'positive', in those cases only 'negative' or
'positive' ECP values are going to be retrieved respectively.
Output: aECPVlsAVlsB is an array of size len(srtdVrblANms) x
len(srtdVrblBNms) with correlation values ECP. In case the value is
not present nan is going to be incldued in the cell.
NOTE: If a name is not present in a database, nan values are going
to be returned.
NOTE: srtdVrblANms are going to be in rows, and srtdVrblBNms in
columns.
"""
if rtrnECPSgnd:
try:
if rtrnECPSgnd not in {'negative','positive'}:
raise exceptions.CelleryWarningObjct \
('"negative" or "positive" are values, not recognized', \
rtrnECPSgnd)
except exceptions.CelleryWarningObjct as err:
print err
#--------------------------
#~ make a dictionary of names and positions
lenaAVlsA = len(srtdVrblANms)
lenaAVlsB = len(srtdVrblBNms)
dVrblANmPos = dict([(vrblANm,pos) for pos,vrblANm in \
enumerate(srtdVrblANms)])
dVrblBNmPos = dict([(vrblBNm,pos) for pos,vrblBNm in \
enumerate(srtdVrblBNms)])
#--------------------------
#~ make a output array
aECPVlsAVlsB = zeros((lenaAVlsA,lenaAVlsB),dtype=float32)
aECPVlsAVlsB.fill(nan)#fill all correlations with nan to start
#--------------------------
#~ retrieve variable names
conn = sqlite3.connect(sqlFl)
c = conn.cursor()
sVrblANmsInSql = set([str(vrblANm[0]) for vrblANm in \
c.execute('SELECT vrblANm FROM records')])
sVrblBNmsInSql = set([str(vrblBNm[0]) for vrblBNm in \
c.execute('SELECT vrblBNm FROM records')])
lVrblANmInSql = list(set(srtdVrblANms).intersection(sVrblANmsInSql))
lVrblBNmInSql = list(set(srtdVrblBNms).intersection(sVrblBNmsInSql))
try:
lenSrtdVrblANms = len(srtdVrblANms)
lenlVrblANmInSql = len(lVrblANmInSql)
if lenSrtdVrblANms!=lenlVrblANmInSql:
raise exceptions.CelleryWarningObjct \
('Expression for %s variable A names were retrieved out of'% \
lenlVrblANmInSql,lenSrtdVrblANms)
except exceptions.CelleryWarningObjct as err:
print err
pass
try:
lenSrtdVrblBNms = len(srtdVrblBNms)
lenlVrblBNmInSql = len(lVrblBNmInSql)
if lenSrtdVrblBNms!=lenlVrblBNmInSql:
raise exceptions.CelleryWarningObjct \
('Expression for %s variable B names were retrieved out of'% \
lenlVrblBNmInSql,lenSrtdVrblBNms)
except exceptions.CelleryWarningObjct as err:
print err
pass
#--------------------------
#~ retrieve data
if rtrnECPSgnd == 'negative':
cmmnd = \
'SELECT * FROM records WHERE ECP<0 AND vrblANm in (%s) AND vrblBNm in (%s)' \
%(','.join(['"%s"'%v for v in lVrblANmInSql]),','.join(['"%s"'%v \
for v in lVrblBNmInSql]))
elif rtrnECPSgnd == 'positive':
cmmnd = \
'SELECT * FROM records WHERE ECP>0 AND vrblANm in (%s) AND vrblBNm in (%s)' \
%(','.join(['"%s"'%v for v in lVrblANmInSql]),','.join(['"%s"'%v \
for v in lVrblBNmInSql]))
else:
cmmnd = 'SELECT * FROM records WHERE vrblANm in (%s) AND vrblBNm in (%s)' \
%(','.join(['"%s"'%v for v in lVrblANmInSql]),','.join(['"%s"'%v \
for v in lVrblBNmInSql]))
for idX,vrblANm,vrblBNm,ECP in c.execute(cmmnd):
ECP = float32(ECP)
aECPVlsAVlsB[dVrblANmPos[vrblANm],dVrblBNmPos[vrblBNm]] = ECP
conn.close()
return aECPVlsAVlsB
|
oscarcbr/cellery
|
cellery/ecp.py
|
Python
|
gpl-3.0
| 11,283
| 0.042808
|
#!/usr/bin/env python
"""
Copyright (c) 2019 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import ptp
module = 'ptp_clock_cdc'
testbench = 'test_%s_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
TS_WIDTH = 64
NS_WIDTH = 4
FNS_WIDTH = 16
INPUT_PERIOD_NS = 0x6
INPUT_PERIOD_FNS = 0x6666
OUTPUT_PERIOD_NS = 0x6
OUTPUT_PERIOD_FNS = 0x6666
USE_SAMPLE_CLOCK = 1
LOG_FIFO_DEPTH = 3
LOG_RATE = 3
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_clk = Signal(bool(0))
input_rst = Signal(bool(0))
output_clk = Signal(bool(0))
output_rst = Signal(bool(0))
sample_clk = Signal(bool(0))
input_ts = Signal(intbv(0)[96:])
# Outputs
output_ts = Signal(intbv(0)[96:])
output_ts_step = Signal(bool(0))
output_pps = Signal(bool(0))
# PTP clock
ptp_clock = ptp.PtpClock(period_ns=INPUT_PERIOD_NS, period_fns=INPUT_PERIOD_FNS)
ptp_logic = ptp_clock.create_logic(
input_clk,
input_rst,
ts_64=input_ts
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
input_clk=input_clk,
input_rst=input_rst,
output_clk=output_clk,
output_rst=output_rst,
sample_clk=sample_clk,
input_ts=input_ts,
output_ts=output_ts,
output_ts_step=output_ts_step,
output_pps=output_pps
)
@always(delay(3200))
def clkgen():
clk.next = not clk
input_clk.next = not input_clk
output_clk_hp = Signal(int(3200))
@instance
def clkgen_output():
while True:
yield delay(int(output_clk_hp))
output_clk.next = not output_clk
@always(delay(5000))
def clkgen_sample():
sample_clk.next = not sample_clk
@instance
def check():
yield delay(100000)
yield clk.posedge
rst.next = 1
input_rst.next = 1
output_rst.next = 1
yield clk.posedge
yield clk.posedge
yield clk.posedge
input_rst.next = 0
output_rst.next = 0
yield clk.posedge
yield delay(100000)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: Same clock speed")
current_test.next = 1
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 2: Slightly faster")
current_test.next = 2
output_clk_hp.next = 3100
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 3: Slightly slower")
current_test.next = 3
output_clk_hp.next = 3300
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 4: Significantly faster")
current_test.next = 4
output_clk_hp.next = 2000
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 5: Significantly slower")
current_test.next = 5
output_clk_hp.next = 5000
yield clk.posedge
for i in range(30000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
alexforencich/verilog-ethernet
|
tb/test_ptp_clock_cdc_64.py
|
Python
|
mit
| 5,957
| 0.000671
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-19 06:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('condensed_urls', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='condensedurl',
name='visited_count',
field=models.IntegerField(default=0),
),
]
|
ezarowny/url-condenser
|
url_condenser/condensed_urls/migrations/0002_condensedurl_visited_count.py
|
Python
|
mit
| 456
| 0
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cyborg common internal object model"""
import netaddr
from oslo_utils import versionutils
from oslo_versionedobjects import base as object_base
from cyborg import objects
from cyborg.objects import fields as object_fields
class CyborgObjectRegistry(object_base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
# NOTE(jroll): blatantly stolen from nova
# NOTE(danms): This is called when an object is registered,
# and is responsible for maintaining cyborg.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
class CyborgObject(object_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'cyborg_object'
OBJ_PROJECT_NAMESPACE = 'cyborg'
fields = {
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
}
def as_dict(self):
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k))
@staticmethod
def _from_db_object(obj, db_obj):
"""Converts a database entity to a formal object.
:param obj: An object of the class.
:param db_obj: A DB model of the object
:return: The object of the class with the database entity added
"""
for field in obj.fields:
obj[field] = db_obj[field]
obj.obj_reset_changes()
return obj
@classmethod
def _from_db_object_list(cls, db_objs, context):
"""Converts a list of database entities to a list of formal objects."""
objs = []
for db_obj in db_objs:
objs.append(cls._from_db_object(cls(context), db_obj))
return objs
class CyborgObjectSerializer(object_base.VersionedObjectSerializer):
# Base class to use for object hydration
OBJ_BASE_CLASS = CyborgObject
CyborgObjectDictCompat = object_base.VersionedObjectDictCompat
class CyborgPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
'deleted_at': object_fields.DateTimeField(nullable=True),
'deleted': object_fields.BooleanField(default=False),
}
class ObjectListBase(object_base.ObjectListBase):
@classmethod
def _obj_primitive_key(cls, field):
return 'cyborg_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=object_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == object_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A CyborgObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, CyborgObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['cyborg_object.changes'] + ignore
else:
keys = ['cyborg_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
class DriverObjectBase(CyborgObject):
@staticmethod
def _from_db_object(obj, db_obj):
fields = obj.fields
fields.pop("updated_at")
fields.pop("created_at")
for field in fields:
obj[field] = db_obj[field]
obj.obj_reset_changes()
return obj
|
openstack/nomad
|
cyborg/objects/base.py
|
Python
|
apache-2.0
| 6,515
| 0
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Tag editing module for Gramps.
"""
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..managedwindow import ManagedWindow
from gramps.gen.const import URL_MANUAL_PAGE
from ..display import display_help
from ..listmodel import ListModel, TOGGLE
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Entering_and_Editing_Data:_Detailed_-_part_3' % \
URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Tags')
#-------------------------------------------------------------------------
#
# EditTagList
#
#-------------------------------------------------------------------------
class EditTagList(ManagedWindow):
"""
Dialog to allow the user to edit a list of tags.
"""
def __init__(self, tag_list, full_list, uistate, track):
"""
Initiate and display the dialog.
"""
ManagedWindow.__init__(self, uistate, track, self)
self.namemodel = None
top = self._create_dialog()
self.set_window(top, None, _('Tag selection'))
for tag in full_list:
self.namemodel.add([tag[0], tag in tag_list, tag[1]])
self.namemodel.connect_model()
# The dialog is modal. We don't want to have several open dialogs of
# this type, since then the user will loose track of which is which.
self.return_list = None
self.show()
while True:
response = self.window.run()
if response == Gtk.ResponseType.HELP:
display_help(webpage=WIKI_HELP_PAGE,
section=WIKI_HELP_SEC)
elif response == Gtk.ResponseType.DELETE_EVENT:
break
else:
if response == Gtk.ResponseType.OK:
self.return_list = [(row[0], row[2])
for row in self.namemodel.model
if row[1]]
self.close()
break
def _create_dialog(self):
"""
Create a dialog box to select tags.
"""
# pylint: disable-msg=E1101
title = _("%(title)s - Gramps") % {'title': _("Edit Tags")}
top = Gtk.Dialog(title)
top.set_default_size(360, 400)
top.set_modal(True)
top.vbox.set_spacing(5)
columns = [('', -1, 300),
(' ', -1, 25, TOGGLE, True, None),
(_('Tag'), -1, 300)]
view = Gtk.TreeView()
self.namemodel = ListModel(view, columns)
slist = Gtk.ScrolledWindow()
slist.add_with_viewport(view)
slist.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
top.vbox.pack_start(slist, 1, 1, 5)
top.add_button(Gtk.STOCK_HELP, Gtk.ResponseType.HELP)
top.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
top.add_button(Gtk.STOCK_OK, Gtk.ResponseType.OK)
top.show_all()
return top
def build_menu_names(self, obj):
"""
Define the menu entry for the ManagedWindows.
"""
return (_("Tag selection"), None)
|
pmghalvorsen/gramps_branch
|
gramps/gui/editors/edittaglist.py
|
Python
|
gpl-2.0
| 4,483
| 0.003792
|
from unittest import TestCase
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import trtools.core.topper as topper
import imp
imp.reload(topper)
arr = np.random.randn(10000)
s = pd.Series(arr)
df = tm.makeDataFrame()
class TestTopper(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_topn_largest(self):
# get the n largest
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10]
np.testing.assert_almost_equal(bn_res, pd_res)
# change result to biggest to smallest
bn_res = topper.bn_topn(arr, 10, ascending=True)
assert bn_res[-1] == max(arr) # sanity check
pd_res = s.order(ascending=True)[-10:] # grab from end since we reversed
np.testing.assert_almost_equal(bn_res, pd_res)
def test_topn_big_N(self):
"""
When calling topn where N is greater than the number of non-nan values.
This can happen if you're tracking a Frame of returns where not all series start at the same time.
It's possible that in the begining or end, or anytime for that matter, you might not have enough
values. This screws up the logic.
"""
# test data
arr = np.random.randn(100)
arr[5:] = np.nan # only first four are non-na
s = pd.Series(arr)
# top
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10].dropna()
tm.assert_almost_equal(bn_res, pd_res.values)
# bottom
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10].dropna() # grab from end since we reversed
tm.assert_almost_equal(bn_res, pd_res.values)
def test_top_smallest(self):
# get the nsmallest
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# change ordering
bn_res = topper.bn_topn(arr, -10, ascending=False)
assert bn_res[-1] == min(arr) # sanity check
pd_res = s.order(ascending=False)[-10:] # grab from end since we reversed
tm.assert_almost_equal(bn_res, pd_res.values)
def test_top_arg(self):
# get the nlargest
bn_res = topper.bn_topn(arr, 10)
bn_args = topper.bn_topargn(arr, 10)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
# get the nsmallest
bn_res = topper.bn_topn(arr, -10)
bn_args = topper.bn_topargn(arr, -10)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
# get the nsmallest
bn_res = topper.bn_topn(arr, -10, ascending=False)
bn_args = topper.bn_topargn(arr, -10, ascending=False)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
def test_nans(self):
"""
bottleneck.partsort doesn't handle nans. We need to correct for them.
the arg version is trickiers since we need to make sure to
translate back into the nan-filled array
"""
nanarr = np.arange(10).astype(float)
nanarr[nanarr % 2 == 0] = np.nan
test = topper.topn(nanarr, 3)
correct = [9,7,5]
tm.assert_almost_equal(test, correct)
test = topper.topn(nanarr, -3)
correct = [1,3,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, 3)
correct = [9,7,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, -3)
correct = [1,3,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, -3, ascending=False)
correct = [5,3,1]
tm.assert_almost_equal(test, correct)
def test_df_topn(self):
# long way of getting the topn
tops = df.apply(lambda s: s.topn(2, ascending=False), axis=1)
correct = pd.DataFrame(tops, index=df.index)
test = topper.topn_df(df, 2, ascending=False)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order()[-1]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
# bottom 2
tops = df.apply(lambda s: s.topn(-2), axis=1)
correct = pd.DataFrame(tops, index=df.index)
test = topper.topn_df(df, -2)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order()[0]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
def test_df_topindexn(self):
# long way of getting the topindexn
top_pos = df.apply(lambda s: s.topargn(2, ascending=False), axis=1)
correct = df.columns[top_pos.values]
correct = pd.DataFrame(correct, index=df.index)
test = topper.topindexn_df(df, 2, ascending=False)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order().index[-1]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
# bottom 2
top_pos = df.apply(lambda s: s.topargn(-2), axis=1)
correct = df.columns[top_pos.values]
correct = pd.DataFrame(correct, index=df.index)
test = topper.topindexn_df(df, -2)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order().index[0]
t = test.iloc[0][0]
tm.assert_frame_equal(test, correct)
def test_df_topargn(self):
# really this is tested via topindexn indirectly
pass
def test_default_ascending(self):
"""
Changed ascending to change based on N
More intuitive, by default you'd expect the greatest or lowest
value would be first, depending on which side you are looking for
"""
# top should default to asc=False
bn_res = topper.bn_topn(arr, 10)
pd_res = s.order(ascending=False)[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# make sure ascending is still respected
bn_res = topper.bn_topn(arr, 10, ascending=True)
pd_res = s.order(ascending=True)[-10:]
tm.assert_almost_equal(bn_res, pd_res.values)
# bottom defaults asc=True
bn_res = topper.bn_topn(arr, -10)
pd_res = s.order()[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# make sure ascending is still respected
bn_res = topper.bn_topn(arr, -10, ascending=False)
pd_res = s.order()[:10][::-1]
tm.assert_almost_equal(bn_res, pd_res.values)
def test_test_ndim(self):
"""
Make sure topn and topargn doesn't accept DataFrame
"""
try:
topper.topn(df, 1)
except:
pass
else:
assert False
try:
topper.topargn(df, 1)
except:
pass
else:
assert False
def test_too_big_n_df(self):
df = pd.DataFrame(np.random.randn(100, 10))
df[df > 0] = np.nan
testdf = topper.topn_df(df, 10)
for x in range(len(df)):
correct = df.iloc[x].order(ascending=False).reset_index(drop=True)
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
testdf = topper.topn_df(df, 2)
for x in range(len(df)):
correct = df.iloc[x].order(ascending=False).reset_index(drop=True)[:2]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
# bottom
testdf = topper.topn_df(df, -2)
for x in range(len(df)):
correct = df.iloc[x].order().reset_index(drop=True)[:2]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
# bottom
testdf = topper.topn_df(df, -20)
for x in range(len(df)):
correct = df.iloc[x].order().reset_index(drop=True)[:20]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
|
dalejung/trtools
|
trtools/core/tests/test_topper.py
|
Python
|
mit
| 8,499
| 0.004118
|
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
|
RTHMaK/RPGOne
|
circleci-demo-python-flask-master/app/auth/views.py
|
Python
|
apache-2.0
| 6,044
| 0
|
#!/usr/bin/python
import sys
from LAPS.MsgBus.Bus import Bus
# Create queue with a unique name
# insert message
# receive msg
# delete queue
if __name__ == "__main__":
# If invoked directly, parse command line arguments for logger information
# and pass the rest to the run() method defined above
# --------------------------------------------------------------------------
try:
unique_queue_name = sys.argv[1]
except:
print "Not enough command line arguments: this test needs a unique queue name"
exit(1)
#msgbus = Bus(broker="lhd002", address=unique_queue_name)
#parset = """
#key=value
#"""
#msgbus.send(parset,"Observation123456")
|
jjdmol/LOFAR
|
SubSystems/LAPS_CEP/test/startPythonFromMsg.py
|
Python
|
gpl-3.0
| 725
| 0.015172
|
"""distutils.cmd
Provides the Command class, the base class for the command classes
in the distutils.command package.
"""
# This module should be kept compatible with Python 1.5.2.
__revision__ = "$Id: cmd.py,v 1.34 2003/02/20 02:10:08 gvanrossum Exp $"
import sys, os, string, re
from types import *
from distutils.errors import *
from distutils import util, dir_util, file_util, archive_util, dep_util
from distutils import log
class Command:
"""Abstract base class for defining command classes, the "worker bees"
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called "options". The options
are "declared" in 'initialize_options()' and "defined" (given their
final values, aka "finalized") in 'finalize_options()', both of which
must be defined by every command class. The distinction between the
two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed *after* these outside influences have
been processed -- hence 'finalize_options()'. The "body" of the
subroutine, where it does all its work based on the values of its
options, is the 'run()' method, which must also be implemented by every
command class.
"""
# 'sub_commands' formalizes the notion of a "family" of commands,
# eg. "install" as the parent with sub-commands "install_lib",
# "install_headers", etc. The parent of a family of commands
# defines 'sub_commands' as a class attribute; it's a list of
# (command_name : string, predicate : unbound_method | string | None)
# tuples, where 'predicate' is a method of the parent command that
# determines whether the corresponding command is applicable in the
# current situation. (Eg. we "install_headers" is only applicable if
# we have any C header files to install.) If 'predicate' is None,
# that command is always applicable.
#
# 'sub_commands' is usually defined at the *end* of a class, because
# predicates can be unbound methods, so they must already have been
# defined. The canonical example is the "install" command.
sub_commands = []
# -- Creation/initialization methods -------------------------------
def __init__ (self, dist):
"""Create and initialize a new Command object. Most importantly,
invokes the 'initialize_options()' method, which is the real
initializer and depends on the actual command being
instantiated.
"""
# late import because of mutual dependence between these classes
from distutils.dist import Distribution
if not isinstance(dist, Distribution):
raise TypeError, "dist must be a Distribution instance"
if self.__class__ is Command:
raise RuntimeError, "Command is an abstract class"
self.distribution = dist
self.initialize_options()
# Per-command versions of the global flags, so that the user can
# customize Distutils' behaviour command-by-command and let some
# commands fallback on the Distribution's behaviour. None means
# "not defined, check self.distribution's copy", while 0 or 1 mean
# false and true (duh). Note that this means figuring out the real
# value of each flag is a touch complicated -- hence "self._dry_run"
# will be handled by __getattr__, below.
# XXX This needs to be fixed.
self._dry_run = None
# verbose is largely ignored, but needs to be set for
# backwards compatibility (I think)?
self.verbose = dist.verbose
# Some commands define a 'self.force' option to ignore file
# timestamps, but methods defined *here* assume that
# 'self.force' exists for all commands. So define it here
# just to be safe.
self.force = None
# The 'help' flag is just used for command-line parsing, so
# none of that complicated bureaucracy is needed.
self.help = 0
# 'finalized' records whether or not 'finalize_options()' has been
# called. 'finalize_options()' itself should not pay attention to
# this flag: it is the business of 'ensure_finalized()', which
# always calls 'finalize_options()', to respect/update it.
self.finalized = 0
# __init__ ()
# XXX A more explicit way to customize dry_run would be better.
def __getattr__ (self, attr):
if attr == 'dry_run':
myval = getattr(self, "_" + attr)
if myval is None:
return getattr(self.distribution, attr)
else:
return myval
else:
raise AttributeError, attr
def ensure_finalized (self):
if not self.finalized:
self.finalize_options()
self.finalized = 1
# Subclasses must define:
# initialize_options()
# provide default values for all options; may be customized by
# setup script, by options from config file(s), or by command-line
# options
# finalize_options()
# decide on the final values for all options; this is called
# after all possible intervention from the outside world
# (command-line, option file, etc.) has been processed
# run()
# run the command: do whatever it is we're here to do,
# controlled by the command's various option values
def initialize_options (self):
"""Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, 'initialize_options()' implementations
are just a bunch of "self.foo = None" assignments.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def finalize_options (self):
"""Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to to code option dependencies: if
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
long as 'foo' still has the same value it was assigned in
'initialize_options()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def dump_options (self, header=None, indent=""):
from distutils.fancy_getopt import longopt_xlate
if header is None:
header = "command options for '%s':" % self.get_command_name()
print indent + header
indent = indent + " "
for (option, _, _) in self.user_options:
option = string.translate(option, longopt_xlate)
if option[-1] == "=":
option = option[:-1]
value = getattr(self, option)
print indent + "%s = %s" % (option, value)
def run (self):
"""A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
'initialize_options()', customized by other commands, the setup
script, the command-line, and config files, and finalized in
'finalize_options()'. All terminal output and filesystem
interaction should be done by 'run()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def announce (self, msg, level=1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
"""
log.log(level, msg)
def debug_print (self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print msg
sys.stdout.flush()
# -- Option validation methods -------------------------------------
# (these are very handy in writing the 'finalize_options()' method)
#
# NB. the general philosophy here is to ensure that a particular option
# value meets certain type and value constraints. If not, we try to
# force it into conformance (eg. if we expect a list but have a string,
# split the string on comma and/or whitespace). If we can't force the
# option into conformance, raise DistutilsOptionError. Thus, command
# classes need do nothing more than (eg.)
# self.ensure_string_list('foo')
# and they can be guaranteed that thereafter, self.foo will be
# a list of strings.
def _ensure_stringlike (self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif type(val) is not StringType:
raise DistutilsOptionError, \
"'%s' must be a %s (got `%s`)" % (option, what, val)
return val
def ensure_string (self, option, default=None):
"""Ensure that 'option' is a string; if not defined, set it to
'default'.
"""
self._ensure_stringlike(option, "string", default)
def ensure_string_list (self, option):
"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif type(val) is StringType:
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if type(val) is ListType:
types = map(type, val)
ok = (types == [StringType] * len(val))
else:
ok = 0
if not ok:
raise DistutilsOptionError, \
"'%s' must be a list of strings (got %s)" % \
(option, `val`)
def _ensure_tested_string (self, option, tester,
what, error_fmt, default=None):
val = self._ensure_stringlike(option, what, default)
if val is not None and not tester(val):
raise DistutilsOptionError, \
("error in '%s' option: " + error_fmt) % (option, val)
def ensure_filename (self, option):
"""Ensure that 'option' is the name of an existing file."""
self._ensure_tested_string(option, os.path.isfile,
"filename",
"'%s' does not exist or is not a file")
def ensure_dirname (self, option):
self._ensure_tested_string(option, os.path.isdir,
"directory name",
"'%s' does not exist or is not a directory")
# -- Convenience methods for commands ------------------------------
def get_command_name (self):
if hasattr(self, 'command_name'):
return self.command_name
else:
return self.__class__.__name__
def set_undefined_options (self, src_cmd, *option_pairs):
"""Set the values of any "undefined" options from corresponding
option values in some other command object. "Undefined" here means
"is None", which is the convention used to indicate that an option
has not been changed between 'initialize_options()' and
'finalize_options()'. Usually called from 'finalize_options()' for
options that depend on some other command rather than another
option of the same command. 'src_cmd' is the other command from
which option values will be taken (a command object will be created
for it if necessary); the remaining arguments are
'(src_option,dst_option)' tuples which mean "take the value of
'src_option' in the 'src_cmd' command object, and copy it to
'dst_option' in the current command object".
"""
# Option_pairs: list of (src_option, dst_option) tuples
src_cmd_obj = self.distribution.get_command_obj(src_cmd)
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
setattr(self, dst_option,
getattr(src_cmd_obj, src_option))
def get_finalized_command (self, command, create=1):
"""Wrapper around Distribution's 'get_command_obj()' method: find
(create if necessary and 'create' is true) the command object for
'command', call its 'ensure_finalized()' method, and return the
finalized command object.
"""
cmd_obj = self.distribution.get_command_obj(command, create)
cmd_obj.ensure_finalized()
return cmd_obj
# XXX rename to 'get_reinitialized_command()'? (should do the
# same in dist.py, if so)
def reinitialize_command (self, command, reinit_subcommands=0):
return self.distribution.reinitialize_command(
command, reinit_subcommands)
def run_command (self, command):
"""Run some other command: uses the 'run_command()' method of
Distribution, which creates and finalizes the command object if
necessary and then invokes its 'run()' method.
"""
self.distribution.run_command(command)
def get_sub_commands (self):
"""Determine the sub-commands that are relevant in the current
distribution (ie., that need to be run). This is based on the
'sub_commands' class attribute: each tuple in that list may include
a method that we call to determine if the subcommand needs to be
run for the current distribution. Return a list of command names.
"""
commands = []
for (cmd_name, method) in self.sub_commands:
if method is None or method(self):
commands.append(cmd_name)
return commands
# -- External world manipulation -----------------------------------
def warn (self, msg):
sys.stderr.write("warning: %s: %s\n" %
(self.get_command_name(), msg))
def execute (self, func, args, msg=None, level=1):
util.execute(func, args, msg, dry_run=self.dry_run)
def mkpath (self, name, mode=0777):
dir_util.mkpath(name, mode, dry_run=self.dry_run)
def copy_file (self, infile, outfile,
preserve_mode=1, preserve_times=1, link=None, level=1):
"""Copy a file respecting verbose, dry-run and force flags. (The
former two default to whatever is in the Distribution object, and
the latter defaults to false for commands that don't define it.)"""
return file_util.copy_file(
infile, outfile,
preserve_mode, preserve_times,
not self.force,
link,
dry_run=self.dry_run)
def copy_tree (self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
"""
return dir_util.copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run)
def move_file (self, src, dst, level=1):
"""Move a file respectin dry-run flag."""
return file_util.move_file(src, dst, dry_run = self.dry_run)
def spawn (self, cmd, search_path=1, level=1):
"""Spawn an external command respecting dry-run flag."""
from distutils.spawn import spawn
spawn(cmd, search_path, dry_run= self.dry_run)
def make_archive (self, base_name, format,
root_dir=None, base_dir=None):
return archive_util.make_archive(
base_name, format, root_dir, base_dir, dry_run=self.dry_run)
def make_file (self, infiles, outfile, func, args,
exec_msg=None, skip_msg=None, level=1):
"""Special case of 'execute()' for operations that process one or
more input files and generate one output file. Works just like
'execute()', except the operation is skipped and a different
message printed if 'outfile' already exists and is newer than all
files listed in 'infiles'. If the command defined 'self.force',
and it is true, then the command is unconditionally run -- does no
timestamp checks.
"""
if exec_msg is None:
exec_msg = "generating %s from %s" % \
(outfile, string.join(infiles, ', '))
if skip_msg is None:
skip_msg = "skipping %s (inputs unchanged)" % outfile
# Allow 'infiles' to be a single string
if type(infiles) is StringType:
infiles = (infiles,)
elif type(infiles) not in (ListType, TupleType):
raise TypeError, \
"'infiles' must be a string, or a list or tuple of strings"
# If 'outfile' must be regenerated (either because it doesn't
# exist, is out-of-date, or the 'force' flag is true) then
# perform the action that presumably regenerates it
if self.force or dep_util.newer_group (infiles, outfile):
self.execute(func, args, exec_msg, level)
# Otherwise, print the "skip" message
else:
log.debug(skip_msg)
# make_file ()
# class Command
# XXX 'install_misc' class not currently used -- it was the base class for
# both 'install_scripts' and 'install_data', but they outgrew it. It might
# still be useful for 'install_headers', though, so I'm keeping it around
# for the time being.
class install_misc (Command):
"""Common base class for installing some files in a subdirectory.
Currently used by install_data and install_scripts.
"""
user_options = [('install-dir=', 'd', "directory to install the files to")]
def initialize_options (self):
self.install_dir = None
self.outfiles = []
def _install_dir_from (self, dirname):
self.set_undefined_options('install', (dirname, 'install_dir'))
def _copy_files (self, filelist):
self.outfiles = []
if not filelist:
return
self.mkpath(self.install_dir)
for f in filelist:
self.copy_file(f, self.install_dir)
self.outfiles.append(os.path.join(self.install_dir, f))
def get_outputs (self):
return self.outfiles
if __name__ == "__main__":
print "ok"
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/distutils/cmd.py
|
Python
|
mit
| 19,279
| 0.004201
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
ConfigSet: a special dict
The values put in :py:class:`ConfigSet` must be lists
"""
import copy, re, os
from waflib import Logs, Utils
re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
class ConfigSet(object):
"""
A dict that honor serialization and parent relationships. The serialization format
is human-readable (python-like) and performed by using eval() and repr().
For high performance prefer pickle. Do not store functions as they are not serializable.
The values can be accessed by attributes or by keys::
from waflib.ConfigSet import ConfigSet
env = ConfigSet()
env.FOO = 'test'
env['FOO'] = 'test'
"""
__slots__ = ('table', 'parent')
def __init__(self, filename=None):
self.table = {}
"""
Internal dict holding the object values
"""
#self.parent = None
if filename:
self.load(filename)
def __contains__(self, key):
"""
Enable the *in* syntax::
if 'foo' in env:
print(env['foo'])
"""
if key in self.table: return True
try: return self.parent.__contains__(key)
except AttributeError: return False # parent may not exist
def keys(self):
"""Dict interface (unknown purpose)"""
keys = set()
cur = self
while cur:
keys.update(cur.table.keys())
cur = getattr(cur, 'parent', None)
keys = list(keys)
keys.sort()
return keys
def __str__(self):
"""Text representation of the ConfigSet (for debugging purposes)"""
return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in self.keys()])
def __getitem__(self, key):
"""
Dictionary interface: get value from key::
def configure(conf):
conf.env['foo'] = {}
print(env['foo'])
"""
try:
while 1:
x = self.table.get(key, None)
if not x is None:
return x
self = self.parent
except AttributeError:
return []
def __setitem__(self, key, value):
"""
Dictionary interface: get value from key
"""
self.table[key] = value
def __delitem__(self, key):
"""
Dictionary interface: get value from key
"""
self[key] = []
def __getattr__(self, name):
"""
Attribute access provided for convenience. The following forms are equivalent::
def configure(conf):
conf.env.value
conf.env['value']
"""
if name in self.__slots__:
return object.__getattr__(self, name)
else:
return self[name]
def __setattr__(self, name, value):
"""
Attribute access provided for convenience. The following forms are equivalent::
def configure(conf):
conf.env.value = x
env['value'] = x
"""
if name in self.__slots__:
object.__setattr__(self, name, value)
else:
self[name] = value
def __delattr__(self, name):
"""
Attribute access provided for convenience. The following forms are equivalent::
def configure(conf):
del env.value
del env['value']
"""
if name in self.__slots__:
object.__delattr__(self, name)
else:
del self[name]
def derive(self):
"""
Returns a new ConfigSet deriving from self. The copy returned
will be a shallow copy::
from waflib.ConfigSet import ConfigSet
env = ConfigSet()
env.append_value('CFLAGS', ['-O2'])
child = env.derive()
child.CFLAGS.append('test') # warning! this will modify 'env'
child.CFLAGS = ['-O3'] # new list, ok
child.append_value('CFLAGS', ['-O3']) # ok
Use :py:func:`ConfigSet.detach` to detach the child from the parent.
"""
newenv = ConfigSet()
newenv.parent = self
return newenv
def detach(self):
"""
Detach self from its parent (if existing)
Modifying the parent :py:class:`ConfigSet` will not change the current object
Modifying this :py:class:`ConfigSet` will not modify the parent one.
"""
tbl = self.get_merged_dict()
try:
delattr(self, 'parent')
except AttributeError:
pass
else:
keys = tbl.keys()
for x in keys:
tbl[x] = copy.deepcopy(tbl[x])
self.table = tbl
return self
def get_flat(self, key):
"""
Return a value as a string. If the input is a list, the value returned is space-separated.
:param key: key to use
:type key: string
"""
s = self[key]
if isinstance(s, str): return s
return ' '.join(s)
def _get_list_value_for_modification(self, key):
"""
Return a list value for further modification.
The list may be modified inplace and there is no need to do this afterwards::
self.table[var] = value
"""
try:
value = self.table[key]
except KeyError:
try: value = self.parent[key]
except AttributeError: value = []
if isinstance(value, list):
value = value[:]
else:
value = [value]
else:
if not isinstance(value, list):
value = [value]
self.table[key] = value
return value
def append_value(self, var, val):
"""
Appends a value to the specified config key::
def build(bld):
bld.env.append_value('CFLAGS', ['-O2'])
The value must be a list or a tuple
"""
if isinstance(val, str): # if there were string everywhere we could optimize this
val = [val]
current_value = self._get_list_value_for_modification(var)
current_value.extend(val)
def prepend_value(self, var, val):
"""
Prepends a value to the specified item::
def configure(conf):
conf.env.prepend_value('CFLAGS', ['-O2'])
The value must be a list or a tuple
"""
if isinstance(val, str):
val = [val]
self.table[var] = val + self._get_list_value_for_modification(var)
def append_unique(self, var, val):
"""
Append a value to the specified item only if it's not already present::
def build(bld):
bld.env.append_unique('CFLAGS', ['-O2', '-g'])
The value must be a list or a tuple
"""
if isinstance(val, str):
val = [val]
current_value = self._get_list_value_for_modification(var)
for x in val:
if x not in current_value:
current_value.append(x)
def get_merged_dict(self):
"""
Compute the merged dictionary from the fusion of self and all its parent
:rtype: a ConfigSet object
"""
table_list = []
env = self
while 1:
table_list.insert(0, env.table)
try: env = env.parent
except AttributeError: break
merged_table = {}
for table in table_list:
merged_table.update(table)
return merged_table
def store(self, filename):
"""
Write the :py:class:`ConfigSet` data into a file. See :py:meth:`ConfigSet.load` for reading such files.
:param filename: file to use
:type filename: string
"""
try:
os.makedirs(os.path.split(filename)[0])
except OSError:
pass
buf = []
merged_table = self.get_merged_dict()
keys = list(merged_table.keys())
keys.sort()
try:
fun = ascii
except NameError:
fun = repr
for k in keys:
if k != 'undo_stack':
buf.append('%s = %s\n' % (k, fun(merged_table[k])))
Utils.writef(filename, ''.join(buf))
def load(self, filename):
"""
Retrieve the :py:class:`ConfigSet` data from a file. See :py:meth:`ConfigSet.store` for writing such files
:param filename: file to use
:type filename: string
"""
tbl = self.table
code = Utils.readf(filename, m='rU')
for m in re_imp.finditer(code):
g = m.group
tbl[g(2)] = eval(g(3))
Logs.debug('env: %s' % str(self.table))
def update(self, d):
"""
Dictionary interface: replace values from another dict
:param d: object to use the value from
:type d: dict-like object
"""
for k, v in d.items():
self[k] = v
def stash(self):
"""
Store the object state, to provide a kind of transaction support::
env = ConfigSet()
env.stash()
try:
env.append_value('CFLAGS', '-O3')
call_some_method(env)
finally:
env.revert()
The history is kept in a stack, and is lost during the serialization by :py:meth:`ConfigSet.store`
"""
orig = self.table
tbl = self.table = self.table.copy()
for x in tbl.keys():
tbl[x] = copy.deepcopy(tbl[x])
self.undo_stack = self.undo_stack + [orig]
def revert(self):
"""
Reverts the object to a previous state. See :py:meth:`ConfigSet.stash`
"""
self.table = self.undo_stack.pop(-1)
|
evancich/apm_motor
|
modules/waf/waflib/ConfigSet.py
|
Python
|
gpl-3.0
| 8,007
| 0.037842
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from measurements import smoothness
import page_sets
@test.Disabled('linux') # crbug.com/368767
class SchedulerToughSchedulingCases(test.Test):
"""Measures rendering statistics while interacting with pages that have
challenging scheduling properties.
https://docs.google.com/a/chromium.org/document/d/
17yhE5Po9By0sCdM1yZT3LiUECaUr_94rQt9j-4tOQIM/view"""
test = smoothness.Smoothness
page_set = page_sets.ToughSchedulingCasesPageSet
# Pepper plugin is not supported on android.
@test.Disabled('android', 'win') # crbug.com/384733
class SchedulerToughPepperCases(test.Test):
"""Measures rendering statistics while interacting with pages that have
pepper plugins"""
test = smoothness.Smoothness
page_set = page_sets.ToughPepperCasesPageSet
def CustomizeBrowserOptions(self, options):
# This is needed for testing pepper plugin.
options.AppendExtraBrowserArgs('--enable-pepper-testing')
|
TeamEOS/external_chromium_org
|
tools/perf/benchmarks/scheduler.py
|
Python
|
bsd-3-clause
| 1,116
| 0.006272
|
import numpy as np
import pylab as pl
import sys
sys.path.append('../../lablib')
import ourgui
def smoothList(list,strippedXs=False,degree=10):
if strippedXs==True: return Xs[0:-(len(list)-(len(list)-degree+1))]
smoothed=[0]*(len(list)-degree+1)
for i in range(len(smoothed)):
smoothed[i]=sum(list[i:i+degree])/float(degree)
return smoothed
def smoothListGaussian(list,strippedXs=False,degree=5):
window=degree*2-1
weight=np.array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(np.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=np.array(weightGauss)*weight
smoothed=[0.0]*(len(list)-window)
for i in range(len(smoothed)):
smoothed[i]=sum(np.array(list[i:i+window])*weight)/sum(weight)
return smoothed
filename = ourgui.openFile()
dtypes = {'names': ['date', 'value', 'unit'],
'formats': ['f8', 'f4', 'S1']}
data = np.loadtxt(filename,
delimiter=",",
dtype=dtypes,
)
date = data['date']
date -= date[0]
scale = 60
date /= scale
pl.plot(date, data['value'], '.')
degree = 200
sdataG = smoothListGaussian(data['value'], degree=degree)
sdateG = date[degree:(-degree+1)]
sdata = smoothList(data['value'], degree=degree)
sdate = date[degree/2:-degree/2+1]
pl.plot(sdate, sdata, 'g-')
pl.plot(sdateG, sdataG, 'r-')
pl.xlabel("time (min)")
pl.ylabel("thermistor resistance")
pl.show()
|
UltracoldAtomsLab/labhardware
|
projects/gwmultilog/quickplot.py
|
Python
|
mit
| 1,511
| 0.023825
|
"""passlib.bcrypt -- implementation of OpenBSD's BCrypt algorithm.
TODO:
* support 2x and altered-2a hashes?
http://www.openwall.com/lists/oss-security/2011/06/27/9
* deal with lack of PY3-compatibile c-ext implementation
"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement, absolute_import
# core
import os
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
try:
from bcrypt import hashpw as pybcrypt_hashpw
except ImportError: # pragma: no cover
pybcrypt_hashpw = None
try:
from bcryptor.engine import Engine as bcryptor_engine
except ImportError: # pragma: no cover
bcryptor_engine = None
# pkg
from passlib.exc import PasslibHashWarning
from passlib.utils import bcrypt64, safe_crypt, repeat_string, \
classproperty, rng, getrandstr, test_crypt
from passlib.utils.compat import bytes, b, u, uascii_to_str, unicode, str_to_uascii
import passlib.utils.handlers as uh
# local
__all__ = [
"bcrypt",
]
#=============================================================================
# support funcs & constants
#=============================================================================
_builtin_bcrypt = None
def _load_builtin():
global _builtin_bcrypt
if _builtin_bcrypt is None:
from passlib.utils._blowfish import raw_bcrypt as _builtin_bcrypt
IDENT_2 = u("$2$")
IDENT_2A = u("$2a$")
IDENT_2X = u("$2x$")
IDENT_2Y = u("$2y$")
_BNULL = b('\x00')
#=============================================================================
# handler
#=============================================================================
class bcrypt(uh.HasManyIdents, uh.HasRounds, uh.HasSalt, uh.HasManyBackends, uh.GenericHandler):
"""This class implements the BCrypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 22 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 12, must be between 4 and 31, inclusive.
This value is logarithmic, the actual number of iterations used will be :samp:`2**{rounds}`
-- increasing the rounds by +1 will double the amount of time taken.
:type ident: str
:param ident:
Specifies which version of the BCrypt algorithm will be used when creating a new hash.
Typically this option is not needed, as the default (``"2a"``) is usually the correct choice.
If specified, it must be one of the following:
* ``"2"`` - the first revision of BCrypt, which suffers from a minor security flaw and is generally not used anymore.
* ``"2a"`` - latest revision of the official BCrypt algorithm, and the current default.
* ``"2y"`` - format specific to the *crypt_blowfish* BCrypt implementation,
identical to ``"2a"`` in all but name.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
This class now supports ``"2y"`` hashes, and recognizes
(but does not support) the broken ``"2x"`` hashes.
(see the :ref:`crypt_blowfish bug <crypt-blowfish-bug>`
for details).
.. versionchanged:: 1.6
Added a pure-python backend.
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "bcrypt"
setting_kwds = ("salt", "rounds", "ident")
checksum_size = 31
checksum_chars = bcrypt64.charmap
#--HasManyIdents--
default_ident = u("$2a$")
ident_values = (u("$2$"), IDENT_2A, IDENT_2X, IDENT_2Y)
ident_aliases = {u("2"): u("$2$"), u("2a"): IDENT_2A, u("2y"): IDENT_2Y}
#--HasSalt--
min_salt_size = max_salt_size = 22
salt_chars = bcrypt64.charmap
# NOTE: 22nd salt char must be in bcrypt64._padinfo2[1], not full charmap
#--HasRounds--
default_rounds = 12 # current passlib default
min_rounds = 4 # bcrypt spec specified minimum
max_rounds = 31 # 32-bit integer limit (since real_rounds=1<<rounds)
rounds_cost = "log2"
#===================================================================
# formatting
#===================================================================
@classmethod
def from_string(cls, hash):
ident, tail = cls._parse_ident(hash)
if ident == IDENT_2X:
raise ValueError("crypt_blowfish's buggy '2x' hashes are not "
"currently supported")
rounds_str, data = tail.split(u("$"))
rounds = int(rounds_str)
if rounds_str != u('%02d') % (rounds,):
raise uh.exc.MalformedHashError(cls, "malformed cost field")
salt, chk = data[:22], data[22:]
return cls(
rounds=rounds,
salt=salt,
checksum=chk or None,
ident=ident,
)
def to_string(self):
hash = u("%s%02d$%s%s") % (self.ident, self.rounds, self.salt,
self.checksum or u(''))
return uascii_to_str(hash)
def _get_config(self, ident=None):
"internal helper to prepare config string for backends"
if ident is None:
ident = self.ident
if ident == IDENT_2Y:
ident = IDENT_2A
else:
assert ident != IDENT_2X
config = u("%s%02d$%s") % (ident, self.rounds, self.salt)
return uascii_to_str(config)
#===================================================================
# specialized salt generation - fixes passlib issue 25
#===================================================================
@classmethod
def _bind_needs_update(cls, **settings):
return cls._needs_update
@classmethod
def _needs_update(cls, hash, secret):
if isinstance(hash, bytes):
hash = hash.decode("ascii")
# check for incorrect padding bits (passlib issue 25)
if hash.startswith(IDENT_2A) and hash[28] not in bcrypt64._padinfo2[1]:
return True
# TODO: try to detect incorrect $2x$ hashes using *secret*
return False
@classmethod
def normhash(cls, hash):
"helper to normalize hash, correcting any bcrypt padding bits"
if cls.identify(hash):
return cls.from_string(hash).to_string()
else:
return hash
def _generate_salt(self, salt_size):
# override to correct generate salt bits
salt = super(bcrypt, self)._generate_salt(salt_size)
return bcrypt64.repair_unused(salt)
def _norm_salt(self, salt, **kwds):
salt = super(bcrypt, self)._norm_salt(salt, **kwds)
assert salt is not None, "HasSalt didn't generate new salt!"
changed, salt = bcrypt64.check_repair_unused(salt)
if changed:
# FIXME: if salt was provided by user, this message won't be
# correct. not sure if we want to throw error, or use different warning.
warn(
"encountered a bcrypt salt with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; see Passlib 1.5.3 changelog.",
PasslibHashWarning)
return salt
def _norm_checksum(self, checksum):
checksum = super(bcrypt, self)._norm_checksum(checksum)
if not checksum:
return None
changed, checksum = bcrypt64.check_repair_unused(checksum)
if changed:
warn(
"encountered a bcrypt hash with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; see Passlib 1.5.3 changelog.",
PasslibHashWarning)
return checksum
#===================================================================
# primary interface
#===================================================================
backends = ("pybcrypt", "bcryptor", "os_crypt", "builtin")
@classproperty
def _has_backend_pybcrypt(cls):
return pybcrypt_hashpw is not None
@classproperty
def _has_backend_bcryptor(cls):
return bcryptor_engine is not None
@classproperty
def _has_backend_builtin(cls):
if os.environ.get("PASSLIB_BUILTIN_BCRYPT") not in ["enable","enabled"]:
return False
# look at it cross-eyed, and it loads itself
_load_builtin()
return True
@classproperty
def _has_backend_os_crypt(cls):
# XXX: what to do if only h2 is supported? h1 is *very* rare.
h1 = '$2$04$......................1O4gOrCYaqBG3o/4LnT2ykQUt1wbyju'
h2 = '$2a$04$......................qiOQjkB8hxU8OzRhS.GhRMa4VUnkPty'
return test_crypt("test",h1) and test_crypt("test", h2)
@classmethod
def _no_backends_msg(cls):
return "no bcrypt backends available - please install py-bcrypt"
def _calc_checksum_os_crypt(self, secret):
config = self._get_config()
hash = safe_crypt(secret, config)
if hash:
assert hash.startswith(config) and len(hash) == len(config)+31
return hash[-31:]
else:
# NOTE: it's unlikely any other backend will be available,
# but checking before we bail, just in case.
for name in self.backends:
if name != "os_crypt" and self.has_backend(name):
func = getattr(self, "_calc_checksum_" + name)
return func(secret)
raise uh.exc.MissingBackendError(
"password can't be handled by os_crypt, "
"recommend installing py-bcrypt.",
)
def _calc_checksum_pybcrypt(self, secret):
# py-bcrypt behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: not supported (patch submitted)
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise uh.exc.NullPasswordError(self)
config = self._get_config()
hash = pybcrypt_hashpw(secret, config)
assert hash.startswith(config) and len(hash) == len(config)+31
return str_to_uascii(hash[-31:])
def _calc_checksum_bcryptor(self, secret):
# bcryptor behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: not supported
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
# NOTE: especially important to forbid NULLs for bcryptor,
# since it happily accepts them, and then silently truncates
# the password at first one it encounters :(
raise uh.exc.NullPasswordError(self)
if self.ident == IDENT_2:
# bcryptor doesn't support $2$ hashes; but we can fake $2$ behavior
# using the $2a$ algorithm, by repeating the password until
# it's at least 72 chars in length.
if secret:
secret = repeat_string(secret, 72)
config = self._get_config(IDENT_2A)
else:
config = self._get_config()
hash = bcryptor_engine(False).hash_key(secret, config)
assert hash.startswith(config) and len(hash) == len(config)+31
return str_to_uascii(hash[-31:])
def _calc_checksum_builtin(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise uh.exc.NullPasswordError(self)
chk = _builtin_bcrypt(secret, self.ident.strip("$"),
self.salt.encode("ascii"), self.rounds)
return chk.decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#=============================================================================
|
ioram7/keystone-federado-pgid2013
|
build/passlib/passlib/handlers/bcrypt.py
|
Python
|
apache-2.0
| 13,180
| 0.003794
|
import collections
import jinja2
import requests
from flask import (
current_app,
Blueprint,
render_template,
request,
jsonify,
abort
)
frontend = Blueprint('frontend', __name__, template_folder='templates')
headers = {"Content-type": "application/json"}
@jinja2.contextfilter
@frontend.app_template_filter()
def format_link(context, value):
items = value.split(':')
register = current_app.config['POAO_SECTION_REGISTER']
return "<a href='%s/products-of-animal-origin-section/%s'>%s</a> %s" % (register, items[0],items[0],items[1])
@frontend.route('/')
def index():
premises_url = current_app.config['PREMISES_REGISTER']
url = "%s/search?_representation=json" % premises_url
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
abort(resp.status_code)
return render_template('index.html', data=resp.json())
@frontend.route('/search')
def search():
query = request.args.get('query', '')
page = request.args.get('page', 0)
premises_url = current_app.config['PREMISES_REGISTER']
url = "%s/search?_query=%s&_page=%s&_representation=json" % (premises_url, query, page)
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
abort(resp.status_code)
current_app.logger.info(resp.json())
return jsonify(resp.json())
@frontend.route('/premises/<int:id>')
def premises(id):
premises_register = current_app.config['PREMISES_REGISTER']
poao_premises_register = current_app.config['POAO_PREMISES_REGISTER']
address_register = current_app.config['ADDRESS_REGISTER']
food_category_register = current_app.config['FOOD_ESTABLISHMENT_CATEGORY_REGISTER']
try:
premises_url = '%s/premises/%d.json' % (premises_register, id)
resp = requests.get(premises_url, headers=headers)
resp.raise_for_status()
premises = resp.json()
poao_premises_url = '%s/premises/%d.json' % (poao_premises_register, id)
resp = requests.get(poao_premises_url, headers=headers)
resp.raise_for_status()
poao_premises = resp.json()
category_details = _get_category_details(poao_premises)
address_url = '%s/address/%d.json' % (address_register, id)
resp = requests.get(address_url, headers=headers)
resp.raise_for_status()
address = resp.json()
except requests.exceptions.HTTPError as e:
current_app.logger.info(e)
abort(resp.status_code)
return render_template('premises.html',
poao_premises_register=poao_premises_register,
premises=premises, poao_premises=poao_premises,
address=address,
category_details=category_details,
food_category_register=food_category_register)
Category = collections.namedtuple('Category', 'category_key, section_name, activity_name')
# This sort of stuff is a mess.
def _get_category_details(premises):
category_details = []
try:
for category in premises['entry']['food-establishment-categories']:
section_key, activity_key = category.split(':')
section_url = "%s/products-of-animal-origin-section/%s.json" % (current_app.config['POAO_SECTION_REGISTER'], section_key)
activity_url = "%s/products-of-animal-origin-activity/%s.json" % (current_app.config['POAO_ACTIVITY_REGISTER'], activity_key)
section_resp = requests.get(section_url, headers=headers)
activity_resp = requests.get(activity_url, headers=headers)
section_resp.raise_for_status()
activity_resp.raise_for_status()
section = section_resp.json()['entry']
activity = activity_resp.json()['entry']
category = Category(category_key=category,
section_name=section['name'],
activity_name=activity['name'])
category_details.append(category)
current_app.logger.info(category_details)
except requests.exceptions.HTTPError as e:
current_app.logger.info(e)
current_app.logger.info('Not much we can do at this point but return empty category_details')
return category_details
|
openregister/food-premises-demo
|
fsa_approved_premises/frontend/views.py
|
Python
|
mit
| 4,293
| 0.002795
|
from math import log10, floor
# N = 12
N = 101
# N = 1000001
def n_squares(n):
return [i**2 for i in range(2, n)]
# print(n_squares(11))
# print(n_squares(100))
##### This block from stackoverflow:
# https://stackoverflow.com/questions/37023774/all-ways-to-partition-a-string
import itertools
memo = {}
def multiSlice(s,cutpoints):
k = len(cutpoints)
if k == 0:
return [s]
else:
multislices = [s[:cutpoints[0]]]
multislices.extend(s[cutpoints[i]:cutpoints[i+1]] for i in range(k-1))
multislices.append(s[cutpoints[k-1]:])
return multislices
def allPartitions(s):
# if s in memo:
# return memo[s]
n = len(s)
cuts = list(range(1,n))
for k in range(1, n):
for cutpoints in itertools.combinations(cuts,k):
yield multiSlice(s,cutpoints)
##### End block
# print(list(allPartitions([int(i) for i in str(1234)])))
def list_sum(num_list):
outer_sum = 0
for sub_list in num_list:
inner_sum = 0
power = 1
for digit in sub_list[::-1]:
inner_sum += power * digit
power *= 10
outer_sum += inner_sum
return outer_sum
# print(list_sum([[1, 2], [3, 4]]))
# print(list_sum([[1, 2, 3, 4]]))
# print(list_sum([[1], [2], [3], [4]]))
def is_s_num(num):
sqrt = num**0.5
for part in allPartitions([int(i) for i in str(num)]):
if sqrt == list_sum(part):
return True
return False
# print(81, is_s_num(81))
# print(64, is_s_num(64))
# print(8281, is_s_num(8281))
# print(9801, is_s_num(9801))
def T(N):
squares = n_squares(N)
sum = 0
for n in squares:
if is_s_num(n):
print(n, "is true")
sum += n
return sum
print(T(N))
|
kylebegovich/ProjectEuler
|
Python/Progress/Problem719.py
|
Python
|
gpl-3.0
| 1,757
| 0.007968
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.