repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
waseem18/oh-mainline
|
vendor/packages/twisted/twisted/internet/test/test_sigchld.py
|
Python
|
agpl-3.0
| 6,400
| 0.002344
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sigchld}, an alternate, superior SIGCHLD
monitoring API.
"""
import os, signal, errno
from twisted.python.log import msg
from twisted.trial.unittest import TestCase
from twisted.internet.fdesc import setNonBlocking
from twisted.internet._signals import installHandler, isDefaultHandler
from twisted.internet._signals import _extInstallHandler, _extIsDefaultHandler
from twisted.internet._signals import _installHandlerUsingSetWakeup, \
_installHandlerUsingSignal, _isDefaultHandler
class SIGCHLDTestsMixin:
"""
Mixin for L{TestCase} subclasses which defines several tests for
I{installHandler} and I{isDefaultHandler}. Subclasses are expected to
define C{self.installHandler} and C{self.isDefaultHandler} to invoke the
implementation to be tested.
"""
if getattr(signal, 'SIGCHLD', None) is None:
skip = "Platform does not have SIGCHLD"
def installHandler(self, fd):
"""
Override in a subclass to install a SIGCHLD handler which writes a byte
to the given file descriptor. Return the previously registered file
descriptor.
"""
raise NotImplementedError()
def isDefaultHandler(self):
"""
Override in a subclass to determine if the current SIGCHLD handler is
SIG_DFL or not. Return True if it is SIG_DFL, False otherwise.
"""
raise NotImplementedError()
def pipe(self):
"""
Create a non-blocking pipe which will be closed after the currently
running test.
"""
read, write = os.pipe()
self.addCleanup(os.close, read)
self.addCleanup(os.close, write)
setNonBlocking(read)
setNonBlocking(write)
return read, write
def setUp(self):
"""
Save the current SIGCHLD handler as reported by L{signal.signal} and
the current file descriptor registered with L{installHandler}.
"""
handler = signal.getsignal(signal.SIGCHLD)
if handler != signal.SIG_DFL:
self.signalModuleHandler = handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
self.signalModuleHandler = None
self.oldFD = self.installHandler(-1)
if self.signalModuleHandler is not None and self.oldFD != -1:
msg("SIGCHLD setup issue: %r %r" % (self.signalModuleHandler, self.oldFD))
raise RuntimeError("You used some signal APIs wrong! Try again.")
def tearDown(self):
"""
Restore whatever signal handler was present when setUp ran.
"""
# If tests set up any kind of handlers, clear them out.
self.installHandler(-1)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Now restore whatever the setup was before the test ran.
if self.signalModuleHandler is not None:
signal.signal(signal.SIGCHLD, self.signalModuleHandler)
elif self.oldFD != -1:
self.installHandler(self.oldFD)
def test_isDefaultHandler(self):
"""
L{isDefaultHandler} returns t
|
rue if the SIGCHLD handler is SIG_DFL,
false otherwise.
"""
self.assertTrue(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
self.assertFalse(self.isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
self.assertTrue(self.isDefaultHandler())
signal
|
.signal(signal.SIGCHLD, lambda *args: None)
self.assertFalse(self.isDefaultHandler())
def test_returnOldFD(self):
"""
L{installHandler} returns the previously registered file descriptor.
"""
read, write = self.pipe()
oldFD = self.installHandler(write)
self.assertEqual(self.installHandler(oldFD), write)
def test_uninstallHandler(self):
"""
C{installHandler(-1)} removes the SIGCHLD handler completely.
"""
read, write = self.pipe()
self.assertTrue(self.isDefaultHandler())
self.installHandler(write)
self.assertFalse(self.isDefaultHandler())
self.installHandler(-1)
self.assertTrue(self.isDefaultHandler())
def test_installHandler(self):
"""
The file descriptor passed to L{installHandler} has a byte written to
it when SIGCHLD is delivered to the process.
"""
read, write = self.pipe()
self.installHandler(write)
exc = self.assertRaises(OSError, os.read, read, 1)
self.assertEqual(exc.errno, errno.EAGAIN)
os.kill(os.getpid(), signal.SIGCHLD)
self.assertEqual(len(os.read(read, 5)), 1)
class DefaultSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for whatever implementation is selected for the L{installHandler}
and L{isDefaultHandler} APIs.
"""
installHandler = staticmethod(installHandler)
isDefaultHandler = staticmethod(isDefaultHandler)
class ExtensionSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{twisted.internet._sigchld} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
try:
import twisted.internet._sigchld
except ImportError:
skip = "twisted.internet._sigchld is not available"
installHandler = _extInstallHandler
isDefaultHandler = _extIsDefaultHandler
class SetWakeupSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{signal.set_wakeup_fd} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
# Check both of these. On Ubuntu 9.10 (to take an example completely at
# random), Python 2.5 has set_wakeup_fd but not siginterrupt.
if (getattr(signal, 'set_wakeup_fd', None) is None
or getattr(signal, 'siginterrupt', None) is None):
skip = "signal.set_wakeup_fd is not available"
installHandler = staticmethod(_installHandlerUsingSetWakeup)
isDefaultHandler = staticmethod(_isDefaultHandler)
class PlainSignalModuleSIGCHLDTests(SIGCHLDTestsMixin, TestCase):
"""
Tests for the L{signal.signal} implementation of the L{installHandler}
and L{isDefaultHandler} APIs.
"""
installHandler = staticmethod(_installHandlerUsingSignal)
isDefaultHandler = staticmethod(_isDefaultHandler)
|
rwl/PyCIM
|
CIM15/IEC61970/Generation/Production/FossilFuel.py
|
Python
|
mit
| 7,612
| 0.004992
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A
|
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class Foss
|
ilFuel(IdentifiedObject):
"""The fossil fuel consumed by the non-nuclear thermal generating units, e.g., coal, oil, gasThe fossil fuel consumed by the non-nuclear thermal generating units, e.g., coal, oil, gas
"""
def __init__(self, fuelSulfur=0.0, fuelCost=0.0, fossilFuelType="oil", lowBreakpointP=0.0, fuelDispatchCost=0.0, fuelHandlingCost=0.0, fuelHeatContent=0.0, fuelEffFactor=0.0, fuelMixture=0.0, highBreakpointP=0.0, ThermalGeneratingUnit=None, FuelAllocationSchedules=None, *args, **kw_args):
"""Initialises a new 'FossilFuel' instance.
@param fuelSulfur: The fuel's fraction of pollution credit per unit of heat content
@param fuelCost: The cost in terms of heat value for the given type of fuel
@param fossilFuelType: The type of fossil fuel, such as coal, oil, or gas. Values are: "oil", "coal", "lignite", "gas"
@param lowBreakpointP: The active power output level of the unit at which the given type of fuel is switched off. This fuel (e.g., oil) is sometimes used to stabilize the base fuel (e.g., coal) at low active power output levels.
@param fuelDispatchCost: The cost of fuel used for economic dispatching which includes: fuel cost, transportation cost, and incremental maintenance cost
@param fuelHandlingCost: Handling and processing cost associated with this fuel
@param fuelHeatContent: The amount of heat per weight (or volume) of the given type of fuel
@param fuelEffFactor: The efficiency factor for the fuel (per unit) in terms of the effective energy absorbed
@param fuelMixture: Relative amount of the given type of fuel, when multiple fuels are being consumed.
@param highBreakpointP: The active power output level of the unit at which the given type of fuel is switched on. This fuel (e.g., oil) is sometimes used to supplement the base fuel (e.g., coal) at high active power output levels.
@param ThermalGeneratingUnit: A thermal generating unit may have one or more fossil fuels
@param FuelAllocationSchedules: A fuel allocation schedule must have a fossil fuel
"""
#: The fuel's fraction of pollution credit per unit of heat content
self.fuelSulfur = fuelSulfur
#: The cost in terms of heat value for the given type of fuel
self.fuelCost = fuelCost
#: The type of fossil fuel, such as coal, oil, or gas. Values are: "oil", "coal", "lignite", "gas"
self.fossilFuelType = fossilFuelType
#: The active power output level of the unit at which the given type of fuel is switched off. This fuel (e.g., oil) is sometimes used to stabilize the base fuel (e.g., coal) at low active power output levels.
self.lowBreakpointP = lowBreakpointP
#: The cost of fuel used for economic dispatching which includes: fuel cost, transportation cost, and incremental maintenance cost
self.fuelDispatchCost = fuelDispatchCost
#: Handling and processing cost associated with this fuel
self.fuelHandlingCost = fuelHandlingCost
#: The amount of heat per weight (or volume) of the given type of fuel
self.fuelHeatContent = fuelHeatContent
#: The efficiency factor for the fuel (per unit) in terms of the effective energy absorbed
self.fuelEffFactor = fuelEffFactor
#: Relative amount of the given type of fuel, when multiple fuels are being consumed.
self.fuelMixture = fuelMixture
#: The active power output level of the unit at which the given type of fuel is switched on. This fuel (e.g., oil) is sometimes used to supplement the base fuel (e.g., coal) at high active power output levels.
self.highBreakpointP = highBreakpointP
self._ThermalGeneratingUnit = None
self.ThermalGeneratingUnit = ThermalGeneratingUnit
self._FuelAllocationSchedules = []
self.FuelAllocationSchedules = [] if FuelAllocationSchedules is None else FuelAllocationSchedules
super(FossilFuel, self).__init__(*args, **kw_args)
_attrs = ["fuelSulfur", "fuelCost", "fossilFuelType", "lowBreakpointP", "fuelDispatchCost", "fuelHandlingCost", "fuelHeatContent", "fuelEffFactor", "fuelMixture", "highBreakpointP"]
_attr_types = {"fuelSulfur": float, "fuelCost": float, "fossilFuelType": str, "lowBreakpointP": float, "fuelDispatchCost": float, "fuelHandlingCost": float, "fuelHeatContent": float, "fuelEffFactor": float, "fuelMixture": float, "highBreakpointP": float}
_defaults = {"fuelSulfur": 0.0, "fuelCost": 0.0, "fossilFuelType": "oil", "lowBreakpointP": 0.0, "fuelDispatchCost": 0.0, "fuelHandlingCost": 0.0, "fuelHeatContent": 0.0, "fuelEffFactor": 0.0, "fuelMixture": 0.0, "highBreakpointP": 0.0}
_enums = {"fossilFuelType": "FuelType"}
_refs = ["ThermalGeneratingUnit", "FuelAllocationSchedules"]
_many_refs = ["FuelAllocationSchedules"]
def getThermalGeneratingUnit(self):
"""A thermal generating unit may have one or more fossil fuels
"""
return self._ThermalGeneratingUnit
def setThermalGeneratingUnit(self, value):
if self._ThermalGeneratingUnit is not None:
filtered = [x for x in self.ThermalGeneratingUnit.FossilFuels if x != self]
self._ThermalGeneratingUnit._FossilFuels = filtered
self._ThermalGeneratingUnit = value
if self._ThermalGeneratingUnit is not None:
if self not in self._ThermalGeneratingUnit._FossilFuels:
self._ThermalGeneratingUnit._FossilFuels.append(self)
ThermalGeneratingUnit = property(getThermalGeneratingUnit, setThermalGeneratingUnit)
def getFuelAllocationSchedules(self):
"""A fuel allocation schedule must have a fossil fuel
"""
return self._FuelAllocationSchedules
def setFuelAllocationSchedules(self, value):
for x in self._FuelAllocationSchedules:
x.FossilFuel = None
for y in value:
y._FossilFuel = self
self._FuelAllocationSchedules = value
FuelAllocationSchedules = property(getFuelAllocationSchedules, setFuelAllocationSchedules)
def addFuelAllocationSchedules(self, *FuelAllocationSchedules):
for obj in FuelAllocationSchedules:
obj.FossilFuel = self
def removeFuelAllocationSchedules(self, *FuelAllocationSchedules):
for obj in FuelAllocationSchedules:
obj.FossilFuel = None
|
sserrot/champion_relationships
|
venv/Lib/site-packages/win32/Demos/service/pipeTestServiceClient.py
|
Python
|
mit
| 4,134
| 0.008224
|
# A Test Program for pipeTestService.py
#
# Install and start the Pipe Test service, then run this test
# either from the same machine, or from another using the "-s" param.
#
# Eg: pipeTestServiceClient.py -s server_name Hi There
# Should work.
from win32pipe import *
from win32file import *
from win32event import *
import pywintypes
import win32api
import winerror
import sys, os, traceback
verbose = 0
#def ReadFromPipe(pipeName):
# Could (Should?) use CallNamedPipe, but this technique allows variable size
# messages (whereas you must supply a buffer size for CallNamedPipe!
# hPipe = CreateFile(pipeName, GENERIC_WRITE, 0, None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0)
# more = 1
# while more:
# hr = ReadFile(hPipe, 256)
# if hr==0:
# more = 0
# except win32api.error (hr, fn, desc):
# if hr==winerror.ERROR_MORE_DATA:
# data = dat
#
def CallPipe(fn, args):
ret = None
retryCount = 0
while retryCount < 8: # Keep looping until user cancels.
retryCount = retryCount + 1
try:
return fn(*args)
except win32api.error as exc:
if exc.winerror==winerror.ERROR_PIPE_BUSY:
win32api.Sleep(5000)
continue
else:
raise
raise RuntimeError("Could not make a connection to the server")
def testClient(server,msg):
if verbose:
print("Sending", msg)
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 256, NMPWAIT_WAIT_FOREVER))
if verbose:
print("Server sent back '%s'" % data)
print("Sent and received a message!")
def testLargeMessage(server, size = 4096):
if verbose:
print("Sending message of size %d" % (size))
msg = "*" * size
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 512, NMPWAIT_WAIT_FOREVER))
if len(data)-size:
print("Sizes are all wrong - send %d, got back %d" % (si
|
ze, len(data)))
def stressThread(server, numMessages, wait):
|
try:
try:
for i in range(numMessages):
r = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, "#" * 512, 1024, NMPWAIT_WAIT_FOREVER))
except:
traceback.print_exc()
print("Failed after %d messages" % i)
finally:
SetEvent(wait)
def stressTestClient(server, numThreads, numMessages):
import _thread
thread_waits = []
for t_num in range(numThreads):
# Note I could just wait on thread handles (after calling DuplicateHandle)
# See the service itself for an example of waiting for the clients...
wait = CreateEvent(None, 0, 0, None)
thread_waits.append(wait)
_thread.start_new_thread(stressThread, (server,numMessages, wait))
# Wait for all threads to finish.
WaitForMultipleObjects(thread_waits, 1, INFINITE)
def main():
import sys, getopt
server = "."
thread_count = 0
msg_count = 500
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:m:vl')
for o,a in opts:
if o=='-s':
server = a
if o=='-m':
msg_count = int(a)
if o=='-t':
thread_count = int(a)
if o=='-v':
global verbose
verbose = 1
if o=='-l':
testLargeMessage(server)
msg = " ".join(args).encode("mbcs")
except getopt.error as msg:
print(msg)
my_name = os.path.split(sys.argv[0])[1]
print("Usage: %s [-v] [-s server] [-t thread_count=0] [-m msg_count=500] msg ..." % my_name)
print(" -v = verbose")
print(" Specifying a value for -t will stress test using that many threads.")
return
testClient(server, msg)
if thread_count > 0:
print("Spawning %d threads each sending %d messages..." % (thread_count, msg_count))
stressTestClient(server, thread_count, msg_count)
if __name__=='__main__':
main()
|
abilian/abilian-core
|
demo/config.py
|
Python
|
lgpl-2.1
| 1,105
| 0
|
class Config:
# specific (for this development instance)
# SERVER_NAME = 'localhost:5000'
SQLALCHEMY_DATABASE_URI = "sqlite:///data.db"
ANTIVIRUS_CHECK_REQUIRED = False
SECRET_KEY = "toto"
# develop settings
DEBUG = True
ASSETS_DEBUG = True
DEBUG_TB_ENABLED = True
# TEMPLATE_DEBUG = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
DEBUG_TB_PROFILER_ENABLED = False
# Generic for this project
SITE_NAME = "Abilian Core Demo"
MAIL_SENDER = "sender@example.com"
SESSION_COOKIE_NAME = "abilian-core-session"
PRIVATE_SITE = True
MAIL_ASCII_ATTACHMENTS = True
BABEL_ACCEPT_LANGUAGES = ("fr", "en", "es", "tr", "zh")
# celery settings
REDIS_URI = "redis://localhost/0"
BROKER_URL = REDIS_URI
CELERY_RESULT_BACKEND = REDIS_URI
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ALWAYS_EAGER = False # True: run tasks locally, no async
CELERY_EAGER_PROPAGATE
|
S_EXCEPTIONS = True
# uncomment if you don't want to use system timez
|
one
# CELERY_TIMEZONE = 'Europe/Paris'
|
f-prettyland/angr
|
angr/sim_state.py
|
Python
|
bsd-2-clause
| 30,608
| 0.003921
|
#!/usr/bin/env python
import functools
import itertools
import contextlib
import weakref
import logging
l = logging.getLogger("angr.sim_state")
import claripy
import ana
from archinfo import arch_from_id
from .misc.ux import deprecated
def arch_overrideable(f):
@functools.wraps(f)
def wrapped_f(self, *args, **kwargs):
if hasattr(self.arch, f.__name__):
arch_f = getattr(self.arch, f.__name__)
return arch_f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return wrapped_f
from .state_plugins import default_plugins
# This is a counter for the state-merging symbolic variables
merge_counter = itertools.count()
class SimState(ana.Storable): # pylint: disable=R0904
"""
The SimState represents the state of a program, including its memory, registers, and so forth.
:ivar regs: A convenient view of the state's registers, where each register is a property
:ivar mem: A convenient view of the state's memory, a :class:`angr.state_plugins.view.SimMemView`
:ivar registers: The state's register file as a flat memory region
:ivar memory: The state's memory as a flat memory region
:ivar solver: The symbolic solver and variable manager for this state
:ivar inspect: The breakpoint manager, a :class:`angr.state_plugins.inspect.SimInspector`
:ivar log: Information about the state's history
:ivar scratch: Information about the current execution step
:ivar posix: MISNOMER: information about the operating system or environment model
:ivar libc: Information about the standard library we are emulating
:ivar cgc: Information about the cgc environment
:ivar uc_manager: Control of under-constrained symbolic execution
:ivar unicorn: Control of the Unicorn Engine
"""
def __init__(self, project=None, arch=None, plugins=None, memory_backer=None, permissions_backer=None, mode=None, options=None,
add_options=None, remove_options=None, special_memory_filler=None, os_name=None):
self.project = project
self.arch = arch if arch is not None else project.arch.copy() if project is not None else None
if type(self.arch) is str:
self.arch = arch_from_id(self.arch)
# the options
if options is None:
if mode is None:
l.warning("SimState defaulting to symbolic mode.")
mode = "symbolic"
options = o.modes[mode]
options = set(options)
if add_options is not None:
options |= add_options
if remove_options is not None:
options -= remove_options
self.options = options
self.mode = mode
# plugins
self.plugins = { }
if plugins is not None:
for n,p in plugins.iteritems():
self.register_plugin(n, p)
if not self.has_plugin('memory'):
# we don't set the memory endness because, unlike registers, it's hard to understand
# which endness the data should be read
if o.ABSTRACT_MEMORY in self.options:
# We use SimAbstractMemory in static mode
# Convert memory_backer into 'global' region
if memory_backer is not None:
memory_backer = {'global': memory_backer}
# TODO: support permissions backer in SimAbstractMemory
self.register_plugin('memory', SimAbstractMemory(memory_backer=memory_backer, memory_id="mem"))
elif o.FAST_MEMORY in self.options:
self.register_plugin('memory', SimFastMemory(memory_backer=memory_backer, memory_id="mem"))
else:
self.register_plugin('memory', SimSymbolicMemory(memory_backer=memory_backer, permissions_backer=permissions_backer, memory_id="mem"))
if not self.has_plugin('registers'):
if o.FAST_REGISTERS in self.options:
self.register_plugin('registers', SimFastMemory(memory_id="reg", endness=self.arch.register_endness))
else:
self.register_plugin('registers', SimSymbolicMemory(memory_id="reg", endness=self.arch.register_endness))
# OS name
self.os_name = os_name
# This is used in static mode as we don't have any constraints there
self._satisfiable = True
# states are big, so let's give them UUIDs for ANA right away to avoid
# extra pickling
self.make_uuid()
self.uninitialized_access_handler = None
self._special_memory_filler = special_memory_filler
# this is a global condition, applied to all added constraints, memory reads, etc
self._global_condition = None
self.ip_constraints = []
def _ana_getstate(self):
s = dict(ana.Storable._ana_getstate(self))
s['plugins'] = { k:v for k,v in s['plugins'].iteritems() if k not in ('inspector', 'regs', 'mem') }
return s
def _ana_setstate(self, s):
ana.Storable._ana_setstate(self, s)
for p in self.plugins.values():
p.set_state(self._get_weakref() if not isinstance(p, SimAbstractMemory) else self)
if p.STRONGREF_STATE:
p.set_strongref_state(self)
def _get_weakref(self):
return weakref.proxy(self)
def _get_strongref(self):
return self
def __repr__(self):
try:
ip_str = "%#x" % self.addr
except (SimValueError, SimSo
|
lverModeError):
ip_str = repr(self.regs.ip)
return "<SimState @ %s>" % ip_str
#
# Easier access to some properties
#
@property
def ip(self):
"""
Get the instruction pointer expression, tri
|
gger SimInspect breakpoints, and generate SimActions.
Use ``_ip`` to not trigger breakpoints or generate actions.
:return: an expression
"""
return self.regs.ip
@ip.setter
def ip(self, val):
self.regs.ip = val
@property
def _ip(self):
"""
Get the instruction pointer expression without triggering SimInspect breakpoints or generating SimActions.
:return: an expression
"""
return self.regs._ip
@_ip.setter
def _ip(self, val):
"""
Set the instruction pointer without triggering SimInspect breakpoints or generating SimActions.
:param val: The new instruction pointer.
:return: None
"""
self.regs._ip = val
@property
def addr(self):
"""
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
SimActions. An integer is returned, or an exception is raised if the instruction pointer is symbolic.
:return: an int
"""
return self.se.eval_one(self.regs._ip)
#
# Plugin accessors
#
def __getattr__(self, v):
try:
return self.get_plugin(v)
except KeyError:
raise AttributeError(v)
@property
def memory(self):
return self.get_plugin('memory')
@property
def registers(self):
return self.get_plugin('registers')
@property
def se(self):
return self.get_plugin('solver_engine')
@property
def solver(self):
return self.get_plugin('solver_engine')
@property
def inspect(self):
return self.get_plugin('inspector')
@property
def log(self):
return self.get_plugin('log')
@property
def scratch(self):
return self.get_plugin('scratch')
@property
def history(self):
return self.get_plugin('history')
@property
def posix(self):
return self.get_plugin('posix')
@property
def libc(self):
return self.get_plugin('libc')
@property
def cgc(self):
return self.get_plugin('cgc')
@property
def regs(self):
return self.get_plugin('regs')
@property
def mem(self):
return self.get_plugin('mem')
@property
def gdb(self):
|
addgene/research
|
toolkit/parameters.py
|
Python
|
gpl-3.0
| 2,405
| 0.003742
|
########## recombination.py parameters
class Recombination_Parameters(object):
# Change these two values to the folders you prefer - use an absolute path e.g. /Users/Harry/fastq-data and
# /Users/Harry/csv-data or a path relative to the tools directory.
# You may use the same folder for input and output.
input_folder = "data"
output_folder = "data"
# The number of bases to retrieve before the seed sequence
HEAD = 10
# The number of bases to retrieve after the seed sequences
T
|
AIL = 10
seed_sequences = {
"loxP": "ATAACTTCGTATAGCATACATTATAC
|
GAAGTTAT",
"lox2272": "ATAACTTCGTATAGGATACTTTATACGAAGTTAT",
}
########## serotypes.py parameters
class Serotypes_Parameters(object):
# Change these two values to the folders you prefer - use an absolute path e.g. /Users/Harry/fastq-data and
# /Users/Harry/csv-data or a path relative to the tools directory.
# You may use the same folder for input and output.
input_folder = "data"
output_folder = "data"
# These are the signatures that will be matched. The first part is the name, the part in brackets contains the
# actual signatures, separated by a comma (each serotype can have multiple signatures)
signatures = {
"AAV1": [
"AGTGCTTCAACGGGGGCCAG",
"GGGCGTGAATCCATCATCAACCCTGG",
"CCGGAGCTTCAAACACTGCATTGGACAAT"
],
"AAV2": [
"AGGCAACAGACAAGCAGCTACC",
"AACAGACAAGCAGCTACCGCA"
],
"AAV5": [
"TCCAAGCCTTCCACCTCGTCAGACGCCGAA",
"CACCAACAACCAGAGCTCCACCACTG",
"GCCCGTCAGCAGCTTCATC"
],
"AAV7": [
"AGTGAAACTGCAGGTAGTACC"
],
"AAV8": [
"GCAAAACACGGCTCCTCAAAT",
"CAGCAAGCGCTGGAACCCCGAGATCCAGTA",
"AAATACCATCTGAATGGAAGAAATTCATTG",
"CGTGGCAGATAACTTGCAGC",
"ATCCTCCGACCACCTTCAACC"
],
"AAV9": [
"AGTGCCCAAGCACAGGCGCA",
"ATCTCTCAAAGACTATTAAC",
"GGCGAGCAGTCTTCCAGGCA"
],
"AAVrh10": [
"CTACAAATCTACAAATGTGGACTTTG"
],
"PHPeB": [
"CTTTGGCGGTGCCTTTTAAGGCACAGGCGCAGA"
],
"PHPs": [
"AGGCGGTTAGGACGTCTTTGGCACAGGCGCAGA"
],
"AAVrg": [
"TAGCAGACCAAGACTACACAAAAACTGCT"
],
}
|
frreiss/tensorflow-fred
|
tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py
|
Python
|
apache-2.0
| 16,160
| 0.008168
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for the ResNet50 model, executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
import tempfile
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.client import device_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager.benchmarks.resnet50 import resnet50
from tensorflow.python.eager.benchmarks.resnet50 import resnet50_test_util
from tensorflow.python.framework import test_util
def compute_gradients(model, images, labels, num_replicas=1):
with tf.GradientTape() as grad_tape:
logits = model(images, training=True)
loss = tf.compat.v1.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.compat.v2.summary.write('loss', loss)
if num_replicas != 1:
loss /= num_replicas
# TODO(b/110991947): We can mistakenly trace the gradient call in
# multi-threaded environment. Explicitly disable recording until
# this is fixed.
with tape.stop_recording():
grads = grad_tape.gradient(loss, model.variables)
return grads
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
def _events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.compat.v1.Event protos in the event file.
"""
records = list(tf.compat.v1.python_io.tf_record_iterator(filepath))
result = []
for r in records:
event = tf.compat.v1.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.compat.v1.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert tf.io.gfile.exists(logdir)
files = tf.io.gfile.listdir(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return _events_from_file(os.path.join(logdir, files[0]))
class ResNet50Test(tf.test.TestCase):
def _apply(self, defun=False, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
if defun:
model.call = tf.function(model.call)
with tf.device(device), context.execution_mode(execution_mode):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
context.async_wait()
self.assertEqual((2, 1000), output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply(self):
self._apply(defun=False)
def test_apply_async(self):
self._apply(defun=False, execution_mode=context.ASYNC)
def test_apply_with_defun(self):
self._apply(defun=True)
def test_apply_with_defun_async(self):
self._apply(defun=True, execution_mode=context.ASYNC)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_no_top(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1)
if data_format == 'channels_first' else (2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_with_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False, pooling='avg')
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
self.assertEqual((2, 2048), output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_no_average_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, average_pooling=False, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 7, 7) if data_format == 'channels_first' else
(2, 7, 7, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_block3_strides(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_retrieve_intermediates(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
intermediates_dict = {}
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False,
intermediates_dict=intermediates_dict)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
if data_format == 'channels_first':
block_shapes = {
'block0': (2, 64, 112, 112),
|
'block0
|
mp': (2, 64, 55, 55),
'block1': (2, 256, 55, 55),
'block2': (2, 512, 28, 28),
'block3': (2, 1024, 7, 7),
'block4': (2, 2048, 1, 1),
}
else:
block_shapes = {
'block0': (2, 112, 112, 64),
'block0mp': (2, 55, 55, 64),
'block1': (2, 55, 55, 256),
'block2': (2, 28, 28, 512),
'block3': (2, 7, 7, 1024),
'block4': (2, 1, 1, 2048),
}
for (block_name, block) in intermediates_dict.items():
self.assertEqual(block_shapes[block_name], block.shape)
def _test_train(self, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
tf.compat.v2.summary.experimental.set_step(
tf.compat.v1.train.get_or_create_global_step())
logdir = tempfile.mkdtemp()
with tf.compat.v2.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), tf.compat.v2.summary.record_if(True):
with tf.device(device), context.execution_mode(execution_mode):
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
images, labels = resnet50_test_util.random_batch(2, data_format)
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
self.assertEqual(320, len(model.variables))
context.async_wait()
events = events_from_logdir(logdir)
self.
|
snsokolov/contests
|
codeforces/574A_bear.py
|
Python
|
unlicense
| 2,847
| 0.000702
|
#!/usr/bin/env python3
# 574A_bear.py - Codeforces.com/problemset/problem/574/A Bear program by Sergey 2015
import unittest
import sys
###############################################################################
# Bear Class
###############################################################################
class Bear:
""" Bear representation """
def __init__(self, test_inputs=None):
""" Default constructor """
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
return next(it) if it else sys.stdin.readline().rstrip()
# Reading single elements
self.n = int(uinput())
# Reading a single line of multiple elements
self.nums = list(map(int, uinput().split()))
def calculate(self):
""" Main calcualtion function of the class """
lamak = self.nums[0]
srt = sorted(self.nums[1:])
result = 0
while lamak <= srt[-1]:
srt[-1] -= 1
lamak += 1
result += 1
srt = sorted(srt)
return str(result)
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_single_test(self):
""" Bear class testing """
# Constructor test
test = "5\n5 1 11 2 8"
d = Bear(test)
self.assertEqual(d.n, 5)
self.assertEqual(d.nums, [5, 1, 11, 2, 8])
# Sample test
self.assertEqual(Bear(test).calculate(), "4")
# Sample test
test = "4\n1 8 8 8"
self.assertEqual(Bear(test).calculate(), "6")
|
# Sample test
test = "2\n7 6"
self.assertEqual(Bear(test).calculate(), "0")
# My tests
test = "4\n0 1 1 1"
self.assertEqual(Bear(test).calculate(), "2")
# Time limit test
self.time_limit_test(100)
def time_limit_test(self, nmax):
""" Timelimit testing """
import random
import timeit
# Random in
|
puts
test = str(nmax) + "\n"
test += "0 "
nums = [1000 for i in range(nmax-1)]
test += " ".join(map(str, nums)) + "\n"
# Run the test
start = timeit.default_timer()
d = Bear(test)
calc = timeit.default_timer()
d.calculate()
stop = timeit.default_timer()
print("\nTimelimit Test: " +
"{0:.3f}s (init {1:.3f}s calc {2:.3f}s)".
format(stop-start, calc-start, stop-calc))
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
# Print the result string
sys.stdout.write(Bear().calculate())
|
gentoo/webapp-config
|
WebappConfig/ebuild.py
|
Python
|
gpl-2.0
| 6,937
| 0.010379
|
#!/usr/bin/python -O
#
# /usr/sbin/webapp-config
# Python script for managing the deployment of web-based
# applications
#
# Originally written for the Gentoo Linux distribution
#
# Copyright (c) 1999-2007 Authors
# Released under v2 of the GNU GPL
#
# Author(s) Stuart Herbert
# Renat Lumpau <rl03@gentoo.org>
# Gunnar Wrobel <wrobel@gentoo.org>
#
# ========================================================================
''' Provides a class that handles ebuild related tasks. '''
# ========================================================================
# Dependencies
# ------------------------------------------------------------------------
import os.path, re, pwd, grp
from WebappConfig.debug import OUT
import WebappConfig.wrapper as wrapper
from WebappConfig.sandbox import Sandbox
# ========================================================================
# Handler for ebuild related tasks
# ------------------------------------------------------------------------
class Ebuild:
'''
This class handles all ebuild related task. Currently this includes
displaying the post install instruction as well as running hooks
provided by the ebuild.
'''
def __init__(self, config):
self.config = config
self.__root = wrapper.get_root(self.config)
self.__re = re.compile('/+')
self.__sourced = self.__re.sub('/', self.__root
+ self.get_config('my_appdir'))
self.__hooksd = self.__re.sub('/', self.__root
+ self.get_config('my_hookscriptsdir'))
def get_config(self, option):
''' Return a config option.'''
return self.config.config.get('USER', option)
def run_hooks(self, type, server):
'''
Run the hook scripts - if there are any
'''
if self.config.pretend():
return
sandbox = Sandbox(self.config)
# save list of environment variables to set
env_map = self.run_vars(server)
if os.path.isdir(self.__hooksd):
for x in os.listdir(self.__hooksd):
if (os.path.isfile(self.__hooksd + '/' + x) and
os.access(self.__hooksd + '/' + x, os.X_OK)):
OUT.debug('Running hook script', 7)
sandbox.spawn(self.__hooksd + '/' + x + ' ' + type, env_map)
|
def show_post(self, filename, ptype, server = None):
|
'''
Display one of the post files.
'''
post_file = self.__sourced + '/' + filename
OUT.debug('Check for instruction file', 7)
if not os.path.isfile(post_file):
return
self.run_vars(server)
post_instructions = open(post_file).readlines()
OUT.debug('Read post instructions', 7)
post = [
'',
'=================================================================',
'POST-' + ptype.upper() + ' INSTRUCTIONS',
'=================================================================',
'']
for i in post_instructions:
i = i.replace('"', '\\"')
post.append(os.popen('printf "' + i + '"\n').read()[:-1])
post = post + [
'',
'=================================================================',
'']
for i in post:
OUT.notice(i)
def show_postinst(self, server = None):
'''
Display any post-installation instructions, if there are any.
'''
OUT.debug('Running show_postinst', 6)
self.show_post(filename = 'postinst-en.txt', ptype = 'install', server = server)
def show_postupgrade(self, server = None):
'''
Display any post-upgrade instructions, if there are any.
'''
OUT.debug('Running show_postupgrade', 6)
self.show_post(filename = 'postupgrade-en.txt', ptype = 'upgrade', server = server)
def run_vars(self, server = None):
'''
This function exports the necessary variables to the shell
environment so that they are accessible within the shell scripts
and/or files provided by the ebuild.
'''
v_root = self.get_config('vhost_root')
v_cgi = self.get_config('g_cgibindir')
v_conf = self.get_config('vhost_config_dir')
v_err = v_root + '/' + self.get_config('my_errorsbase')
v_icon = v_root + '/' + self.get_config('my_iconsbase')
g_inst = self.get_config('g_installdir')
g_htd = self.get_config('g_htdocsdir')
g_orig = self.get_config('g_orig_installdir')
vsu = None
vsg = None
if server:
vsu = pwd.getpwuid(server.vhost_server_uid)[0]
vsg = grp.getgrgid(server.vhost_server_gid)[0]
OUT.debug('Exporting variables', 7)
export_map = {'MY_HOSTROOTDIR' : None,
'MY_HTDOCSDIR' : None,
'MY_CGIBINDIR' : None,
'MY_INSTALLDIR' : g_inst,
'MY_ICONSDIR' : None,
'MY_SERVERCONFIGDIR' : None,
'MY_ERRORSDIR' : None,
'MY_SQLSCRIPTSDIR' : None,
'VHOST_ROOT' : None,
'VHOST_HTDOCSDIR' : g_htd,
'VHOST_CGIBINDIR' : v_cgi,
'VHOST_CONFDIR' : v_conf,
'VHOST_ERRORSDIR' : v_err,
'VHOST_ICONSDIR' : v_icon,
'VHOST_HOSTNAME' : None,
'VHOST_SERVER' : None,
'VHOST_APPDIR' : g_orig,
'VHOST_CONFIG_UID' : None,
'VHOST_CONFIG_GID' : None,
'VHOST_SERVER_UID' : vsu,
'VHOST_SERVER_GID' : vsg,
'VHOST_DEFAULT_UID' : None,
'VHOST_DEFAULT_GID' : None,
'VHOST_PERMS_SERVEROWNED_DIR' : None,
'VHOST_PERMS_SERVEROWNED_FILE' : None,
'VHOST_PERMS_CONFIGOWNED_DIR' : None,
'VHOST_PERMS_CONFIGOWNED_FILE' : None,
'VHOST_PERMS_DEFAULTOWNED_DIR' : None,
'VHOST_PERMS_VIRTUALOWNED_FILE': None,
'VHOST_PERMS_INSTALLDIR' : None,
'ROOT' : self.__root,
'PN' : None,
'PVR': None}
result = {}
for i in list(export_map.keys()):
value = export_map[i]
if not value:
value = self.get_config(i.lower())
os.putenv(i, str(value))
result[i] = str(value)
return result
|
capybaralet/Sequential-Generation
|
TestImpGPSI_MNIST.py
|
Python
|
mit
| 18,331
| 0.004582
|
##################################################################
# Code for testing the variational Multi-Stage Generative Model. #
##################################################################
# basic python
import numpy as np
import numpy.random as npr
import cPickle
# theano business
import theano
import theano.tensor as T
# phil's sweetness
import utils
from NetLayers import relu_actfun, softplus_actfun, tanh_actfun
from InfNet import InfNet
from HydraNet import HydraNet
from GPSImputer import GPSImputer, load_gpsimputer_from_file
from load_data import load_udm, load_tfd, load_svhn_gray, load_binarized_mnist
from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \
row_shuffle, to_fX
RESULT_PATH = "IMP_MNIST_GPSI/"
###############################
###############################
## TEST GPS IMPUTER ON MNIST ##
###############################
###############################
def test_mnist(step_type='add',
imp_steps=6,
occ_dim=15,
drop_prob=0.0):
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}RELU_GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
Xtr, Xva, Xte = load_binarized_mnist(data_path='./data/')
Xtr = np.vstack((Xtr, Xva))
Xva = Xte
#del Xte
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
##########################
# Get some training data #
##########################
# rng = np.random.RandomState(1234)
# dataset = 'data/mnist.pkl.gz'
# datasets = load_udm(dataset, as_shared=False, zero_mean=False)
# Xtr = datasets[0][0]
# Xva = datasets[1][0]
# Xte = datasets[2][0]
# # Merge validation set and training set, and test on test set.
# #Xtr = np.concatenate((Xtr, Xva), axis=0)
# #Xva = Xte
# Xtr = to_fX(shift_and_scale_into_01(Xtr))
# Xva = to_fX(shift_and_scale_into_01(Xva))
# tr_samples = Xtr.shape[0]
# va_samples = Xva.shape[0]
batch_size = 200
batch_reps = 1
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) )
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
x_dim = Xtr.shape[1]
s_dim = x_dim
#s_dim = 300
z_dim = 100
init_scale = 0.6
x_in_sym = T.matrix('x_in_sym')
x_out_sym = T.matrix('x_out_sym')
x_mask_sym = T.matrix('x_mask_sym')
#################
# p_zi_given_xi #
#################
params = {}
shared_config = [(x_dim + x_dim), 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_zi_given_xi.init_biases(0.0)
###################
# p_sip1_given_zi #
###################
params = {}
shared_config = [z_dim, 500, 500]
output_config = [s_dim, s_dim, s_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_sip1_given_zi = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dic
|
ts=None)
p_sip1_given_zi.init_biases(0.0)
################
# p_x_given_si #
################
params = {}
shared_config = [s_dim]
output_config = [x_dim, x_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0
|
.0
params['build_theano_funcs'] = False
p_x_given_si = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_x_given_si.init_biases(0.0)
#################
# q_zi_given_xi #
#################
params = {}
shared_config = [(x_dim + x_dim), 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
q_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
q_zi_given_xi.init_biases(0.0)
###########################################################
# Define parameters for the GPSImputer, and initialize it #
###########################################################
print("Building the GPSImputer...")
gpsi_params = {}
gpsi_params['x_dim'] = x_dim
gpsi_params['z_dim'] = z_dim
gpsi_params['s_dim'] = s_dim
# switch between direct construction and construction via p_x_given_si
gpsi_params['use_p_x_given_si'] = False
gpsi_params['imp_steps'] = imp_steps
gpsi_params['step_type'] = step_type
gpsi_params['x_type'] = 'bernoulli'
gpsi_params['obs_transform'] = 'sigmoid'
GPSI = GPSImputer(rng=rng,
x_in=x_in_sym, x_out=x_out_sym, x_mask=x_mask_sym, \
p_zi_given_xi=p_zi_given_xi, \
p_sip1_given_zi=p_sip1_given_zi, \
p_x_given_si=p_x_given_si, \
q_zi_given_xi=q_zi_given_xi, \
params=gpsi_params, \
shared_param_dicts=None)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_RESULTS.txt".format(result_tag)
out_file = open(log_name, 'wb')
costs = [0. for i in range(10)]
learn_rate = 0.0002
momentum = 0.5
batch_idx = np.arange(batch_size) + tr_samples
for i in range(250000):
scale = min(1.0, ((i+1) / 5000.0))
lam_scale = 1.0 - min(1.0, ((i+1) / 100000.0)) # decays from 1.0->0.0
if (((i + 1) % 15000) == 0):
learn_rate = learn_rate * 0.93
if (i > 10000):
momentum = 0.90
else:
momentum = 0.75
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
GPSI.set_sgd_params(lr=scale*learn_rate, \
mom_1=scale*momentum, mom_2=0.98)
GPSI.set_train_switch(1.0)
GPSI.set_lam_nll(lam_nll=1.0)
GPSI.set_lam_kld(lam_kld_p=0.05, lam_kld_q=0.95, lam_kld_g=(0.1 * lam_scale))
GPSI.set_lam_l2w(1e-5)
# perform a minibatch update and record the cost for this batch
xb = to_fX( Xtr.take(batch_idx, axis=0) )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
result = GPSI.train_joint(xi, xo, xm, batch_reps)
# do diagnostics and
|
adamcharnock/django-hordak
|
hordak/migrations/0008_auto_20161209_0129.py
|
Python
|
mit
| 353
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-09 01:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migr
|
ation):
dependencies = [("hordak", "0007_auto_20161209_0111")]
operations = [
migratio
|
ns.RenameField("Account", "has_statements", "is_bank_account")
]
|
sadolit/pafy
|
pafy.py
|
Python
|
gpl-3.0
| 8,660
| 0.000577
|
#!/usr/bin/python
''' Python API for YouTube
Copyright (C) 2013 nagev
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. '''
__version__ = "0.2"
__author__ = "nagev"
__license__ = "GPLv3"
import re
import sys
import time
import json
import urllib
import urllib2
from urlparse import parse_qs
def _decrypt_signature(s):
if len(s) == 92:
return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + \
s[80:83]
elif len(s) == 90:
return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + \
s[78:81]
elif len(s) == 88:
return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1]\
+ s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12]
elif len(s) == 87:
return s[4:23] + s[86] + s[24:85]
elif len(s) == 86:
return s[83:85] + s[26] + s[79:46:-1] + s[85] + s[45:36:-1] + s[30] + \
s[35:30:-1] + s[46] + s[29:26:-1] + s[82] + s[25:1:-1]
elif len(s) == 85:
return s[2:8] + s[0] + s[9:21] + s[65] + s[22:65] + s[84] + s[66:82] +\
s[21]
elif len(s) == 84:
return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26]
elif len(s) == 83:
return s[:15] + s[80] + s[16:80] + s[15]
elif len(s) == 82:
return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1]\
+ s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34]
elif len(s) == 81:
return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1]\
+ s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
elif len(s) == 79:
return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1]\
+ s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
else:
raise NameError("Unable to decode video url - sig len %s" % len(s))
class Stream():
resolutions = {
'5': ('240x400', 'flv'),
'17': ('144x176', '3gp'),
'18': ('360x640', 'mp4'),
'22': ('720x1280', 'mp4'),
'34': ('360x640', 'flv'),
'35': ('480x854', 'flv'),
'36': ('320x240', '3gp'),
'37': ('1080x1920', 'mp4'),
'38': ('3072x4096', 'superHD'),
'43': ('360x640', 'webm'),
'44': ('480x854', 'webm'),
'45': ('720x1280', 'webm'),
'46': ('1080x1920', 'webm'
|
),
|
'82': ('640x360-3D', 'mp4'),
'84': ('1280x720-3D', 'mp4'),
'100': ('640x360-3D', 'webm'),
'102': ('1280x720-3D', 'webm')}
def __init__(self, streammap, opener, title="ytvid"):
if not streammap.get("sig", ""):
streammap['sig'] = [_decrypt_signature(streammap['s'][0])]
self.url = streammap['url'][0] + '&signature=' + streammap['sig'][0]
self.vidformat = streammap['type'][0].split(';')[0]
self.resolution = self.resolutions[streammap['itag'][0]][0]
self.extension = self.resolutions[streammap['itag'][0]][1]
self.itag = streammap['itag'][0]
self.title = title
self.filename = self.title + "." + self.extension
self._opener = opener
def get_filesize(self):
opener = self._opener
return int(opener.open(self.url).headers['content-length'])
def download(self, progress=True, filepath=""):
response = self._opener.open(self.url)
total = int(response.info().getheader('Content-Length').strip())
print u"-Downloading '{}' [{:,} Bytes]".format(self.filename, total)
status_string = (' {:,} Bytes [{:.2%}] received. Rate: [{:4.0f} '
'kbps]. ETA: [{:.0f} secs]')
chunksize, bytesdone, t0 = 16834, 0, time.time()
outfh = open(filepath or self.filename, 'wb')
while 1:
chunk = response.read(chunksize)
elapsed = time.time() - t0
outfh.write(chunk)
bytesdone += len(chunk)
if not chunk:
outfh.close()
break
if progress:
rate = (bytesdone / 1024) / elapsed
eta = (total - bytesdone) / (rate * 1024)
display = (bytesdone, bytesdone * 1.0 / total, rate, eta)
status = status_string.format(*display)
sys.stdout.write("\r" + status + ' ' * 4 + "\r")
sys.stdout.flush
print "\nDone"
class Pafy():
def __len__(self):
return self.length
def __repr__(self):
out = ""
keys = "Title Author ID Duration Rating Views Thumbnail Keywords"
keys = keys.split(" ")
keywords = ", ".join(self.keywords).decode("utf8")
length = time.strftime('%H:%M:%S', time.gmtime(self.length))
info = dict(Title=self.title,
Author=self.author,
Views=self.viewcount,
Rating=self.rating,
Duration=length,
ID=self.videoid,
Thumbnail=self.thumb,
Keywords=keywords)
for k in keys:
try:
out += "%s: %s\n" % (k, info[k])
except KeyError:
pass
return out.encode("utf8", "ignore")
def __init__(self, video_url):
infoUrl = 'https://www.youtube.com/get_video_info?video_id='
vidid = re.search(r'v=([a-zA-Z0-9-_]*)', video_url).group(1)
infoUrl += vidid + "&asv=3&el=detailpage&hl=en_US"
self.urls = []
opener = urllib2.build_opener()
ua = ("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64;"
"Trident/5.0)")
opener.addheaders = [('User-Agent', ua)]
self.keywords = ""
self.rawinfo = opener.open(infoUrl).read()
self.allinfo = parse_qs(self.rawinfo)
self.title = self.allinfo['title'][0].decode('utf-8')
self.author = self.allinfo['author'][0]
self.videoid = self.allinfo['video_id'][0]
if 'keywords' in self.allinfo:
self.keywords = self.allinfo['keywords'][0].split(',')
self.rating = float(self.allinfo['avg_rating'][0])
self.length = int(self.allinfo['length_seconds'][0])
self.duration = time.strftime('%H:%M:%S', time.gmtime(self.length))
self.viewcount = int(self.allinfo['view_count'][0])
self.thumb = urllib.unquote_plus(self.allinfo['thumbnail_url'][0])
self.formats = self.allinfo['fmt_list'][0].split(",")
self.formats = [x.split("/") for x in self.formats]
if self.allinfo.get('iurlsd'):
self.bigthumb = self.allinfo['iurlsd'][0]
if self.allinfo.get('iurlmaxres'):
self.bigthumbhd = self.allinfo['iurlmaxres'][0]
streamMap = self.allinfo['url_encoded_fmt_stream_map'][0].split(',')
smap = [parse_qs(sm) for sm in streamMap]
if not smap[0].get("sig", ""): # vevo!
watchurl = "https://www.youtube.com/watch?v=" + vidid
watchinfo = opener.open(watchurl).read()
match = re.search(r';ytplayer.config = ({.*?});', watchinfo)
try:
myjson = json.loads(match.group(1))
except:
raise NameError('Problem handling this video')
args = myjson['args']
streamMap = args['url_encoded_fmt_stream_map'].split(",")
smap = [parse_qs(sm) for sm in streamMap]
self.streams = [Stream(sm, opener, self.title) for sm in smap]
def getbest(self, preftype="any", ftypestrict=True):
# set ftype
|
makos/sshchan-oop
|
chan_mark.py
|
Python
|
gpl-3.0
| 1,058
| 0.004726
|
"""
Markup class allows the use of easy-to-write characters to style the text
instead of using escape codes.
==text== --> reverse video
'''text''' --> bold
~~text~~ --> strik
|
ethrough
Copyright (c) 2015
makos <https://github.com/makos>, chibi <http://neetco.de/chibi>
under GNU GPL v3, see LICENSE for details
"""
import re
class Marker():
def esc(self, input_text):
input_text = input_text.replace('\033', '\\033')
return input_text
def demarkify(self, input_text):
"""Prints out a marked-up piece of text."""
output_text = self.esc(input_text)
# strikethrough
output_text = r
|
e.sub(
'~~(?P<substring>.*?)~~', '\033[0;9m\g<substring>\033[0m',
output_text)
# bold
output_text = re.sub(
'\'\'\'(?P<substring>.*?)\'\'\'', '\033[0;1m\g<substring>\033[0m',
output_text)
# rv
output_text = re.sub(
'==(?P<substring>.*?)==', '\033[0;7m\g<substring>\033[0m',
output_text)
return output_text
|
nharsch/django-scheduler
|
schedule/feeds/atom.py
|
Python
|
bsd-3-clause
| 22,525
| 0.002042
|
from django.utils.six.moves.builtins import str
#
# django-atompub by James Tauber <http://jtauber.com/>
# http://code.google.com/p/django-atompub/
# An implementation of the Atom format and protocol for Django
#
# For instructions on how to use this module to generate Atom feeds,
# see http://code.google.com/p/django-atompub/wiki/UserGuide
#
#
# Copyright (c) 2007, James Tauber
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
from xml.sax.saxutils import XMLGenerator
from django.utils import timezone
GENERATOR_TEXT = 'django-atompub'
GENERATOR_ATTR = {
'uri': 'http://code.google.com/p/django-atompub/',
'version': 'r33'
}
# based on django.utils.xmlutils.SimplerXMLGenerator
class SimplerXMLGenerator(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None:
attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
# based on django.utils.feedgenerator.rfc3339_date
def rfc3339_date(date):
return '%s-%s-%sT%s:%s:%sZ' % (date.year, date.month, date.day, date.hour, d
|
ate.minute, date.second)
# based on django.utils.feedgenerator.get_tag_uri
def get_tag_uri(url, date):
"Creates a TagURI. See http://diveintomark.org/archives/2
|
004/05/28/howto-atom-id"
tag = re.sub('^http://', '', url)
if date is not None:
tag = re.sub('/', ',%s-%s-%s:/' % (date.year, date.month, date.day), tag, 1)
tag = re.sub('#', '/', tag)
return 'tag:' + tag
# based on django.contrib.syndication.feeds.Feed
class Feed(object):
VALIDATE = True
def __init__(self, slug, feed_url):
# @@@ slug and feed_url are not used yet
pass
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def get_feed(self, extra_params=None):
if extra_params:
try:
obj = self.get_object(extra_params.split('/'))
except (AttributeError, LookupError):
raise LookupError('Feed does not exist')
else:
obj = None
feed = AtomFeed(
atom_id=self.__get_dynamic_attr('feed_id', obj),
title=self.__get_dynamic_attr('feed_title', obj),
updated=self.__get_dynamic_attr('feed_updated', obj),
icon=self.__get_dynamic_attr('feed_icon', obj),
logo=self.__get_dynamic_attr('feed_logo', obj),
rights=self.__get_dynamic_attr('feed_rights', obj),
subtitle=self.__get_dynamic_attr('feed_subtitle', obj),
authors=self.__get_dynamic_attr('feed_authors', obj, default=[]),
categories=self.__get_dynamic_attr('feed_categories', obj, default=[]),
contributors=self.__get_dynamic_attr('feed_contributors', obj, default=[]),
links=self.__get_dynamic_attr('feed_links', obj, default=[]),
extra_attrs=self.__get_dynamic_attr('feed_extra_attrs', obj),
hide_generator=self.__get_dynamic_attr('hide_generator', obj, default=False)
)
items = self.__get_dynamic_attr('items', obj)
if items is None:
raise LookupError('Feed has no items field')
for item in items:
feed.add_item(
atom_id=self.__get_dynamic_attr('item_id', item),
title=self.__get_dynamic_attr('item_title', item),
updated=self.__get_dynamic_attr('item_updated', item),
content=self.__get_dynamic_attr('item_content', item),
published=self.__get_dynamic_attr('item_published', item),
rights=self.__get_dynamic_attr('item_rights', item),
source=self.__get_dynamic_attr('item_source', item),
summary=self.__get_dynamic_attr('item_summary', item),
authors=self.__get_dynamic_attr('item_authors', item, default=[]),
categories=self.__get_dynamic_attr('item_categories', item, default=[]),
contributors=self.__get_dynamic_attr('item_contributors', item, default=[]),
links=self.__get_dynamic_attr('item_links', item, default=[]),
extra_attrs=self.__get_dynamic_attr('item_extra_attrs', None, default={}),
)
if self.VALIDATE:
feed.validate()
return feed
class ValidationError(Exception):
pass
# based on django.utils.feedgenerator.SyndicationFeed and django.utils.feedgenerator.Atom1Feed
class AtomFeed(object):
mime_type = 'application/atom+xml'
ns = u'http://www.w3.org/2005/Atom'
def __init__(self, atom_id, title, updated=None, icon=None, logo=None, rights=None, subtitle=None, authors=None, categories=None, contributors=None, links=None, extra_attrs={}, hide_generator=False):
if atom_id is None:
raise LookupError('Feed has no feed_id field')
if title is None:
raise LookupError('Feed has no feed_title field')
# if updated == None, we'll calculate it
self.feed = {
'id': atom_id,
'title': title,
'updated': updated,
'icon': icon,
'logo': logo,
'rights': rights,
'subtitle': subtitle,
'authors': authors or [],
'categories': categories or [],
'contributors': contributors or [],
'links': links or [],
'extra_attrs': extra_attrs,
'hide_generator': hide_generator,
}
self.items = []
def add_item(self, atom_id, title, updated, content=None, published=None, rights=None, source=None, summary=None, authors=None, categories=None, contributors=None, links=None, extra_attrs={}):
if atom_id is None:
raise LookupError('Feed has no item_id method')
if title is None:
raise LookupError('Feed has no item_title method')
if updated is None:
raise LookupError('Feed has no item_updated method')
self.items.append({
'id': atom_id,
'title': title,
'updated': updated,
'content': content,
'published': published,
'rights': rights,
'source': source,
'summary': summary,
'authors': authors or [],
'categories': categories or [],
'contributors':
|
cgchemlab/chemlab
|
src/tests/test_topology_reader.py
|
Python
|
gpl-3.0
| 2,938
| 0.003744
|
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ChemLab
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import
|
chemlab.gromacs_topology
class TestTopologyReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.topol_file = 'topol.top'
cls.gt = chemlab.gromacs_topology.GromacsTopology(cls.topol_file, generate_exclusions=True)
cls.gt.read()
def test_replicated_molecules(self):
"""Test the molecule replication"""
total_nr_atoms = len(self.gt.atoms)
expected_nr_atoms = 0
for mol_name, nmols in self
|
.gt.gt.molecules:
mol_atoms = len(self.gt.gt.molecules_data[mol_name]['atoms'])
expected_nr_atoms += nmols * mol_atoms
self.assertEqual(total_nr_atoms, expected_nr_atoms)
total_nr_bonds = len(self.gt.bonds)
expected_nr_bonds = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_bonds = len(self.gt.gt.molecules_data[mol_name].get('bonds', []))
expected_nr_bonds += nmols * mol_bonds
self.assertEqual(total_nr_bonds, expected_nr_bonds)
total_nr_angles = len(self.gt.angles)
expected_nr_angles = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_angles = len(self.gt.gt.molecules_data[mol_name].get('angles', []))
expected_nr_angles += nmols * mol_angles
self.assertEqual(total_nr_angles, expected_nr_angles)
total_nr_dihedrals = len(self.gt.dihedrals)
expected_nr_dihedrals = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_dihedrals = len(self.gt.gt.molecules_data[mol_name].get('dihedrals',[]))
expected_nr_dihedrals += nmols * mol_dihedrals
self.assertEqual(total_nr_dihedrals, expected_nr_dihedrals)
total_nr_pairs = len(self.gt.pairs)
expected_nr_pairs = 0
for mol_name, nmols in self.gt.gt.molecules:
mol_pairs = len(self.gt.gt.molecules_data[mol_name].get('pairs', []))
expected_nr_pairs += nmols * mol_pairs
self.assertEqual(total_nr_pairs, expected_nr_pairs)
if __name__ == '__main__':
unittest.main()
|
samn/spectral-workbench
|
webserver/public/lib/bespin-0.9a2/lib/dryice/path.py
|
Python
|
gpl-3.0
| 33,721
| 0.000919
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Bespin.
#
# The Initial Developer of the Original Code is
# Mozilla.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate yo
|
ur
# decis
|
ion by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.5 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 9 Mar 2007
Slightly modified to eliminate the deprecationwarning for the md5 module.
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison
# sent me a patch for this.
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib
__version__ = '2.2'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
|
lino-framework/xl
|
lino_xl/lib/humanlinks/__init__.py
|
Python
|
bsd-2-clause
| 1,211
| 0.004129
|
# Copyright 2014-2015 Rumma & Ko Ltd
#
# License: GN
|
U Affero General Public License v3 (see file COPYING for details)
"""Defines "parency links" between two "persons", and a user interface
to manage them.
This module is probably useful in combination with
:mod:`lino_xl.lib.households`.
.. autosummary::
:toctree:
choicelist
|
s
models
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"Extends :class:`lino.core.plugin.Plugin`."
verbose_name = _("Parency links")
## settings
person_model = 'contacts.Person'
"""
A string referring to the model which represents a human in your
application. Default value is ``'contacts.Person'`` (referring to
:class:`lino_xl.lib.contacts.Person`).
"""
def on_site_startup(self, site):
self.person_model = site.models.resolve(self.person_model)
super(Plugin, self).on_site_startup(site)
def setup_explorer_menu(self, site, user_type, m):
# mg = site.plugins.contacts
mg = site.plugins[self.person_model._meta.app_label]
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('humanlinks.Links')
m.add_action('humanlinks.LinkTypes')
|
moehle/investor_lifespan_model
|
investor_lifespan_model/__init__.py
|
Python
|
mit
| 282
| 0
|
from investor_lifespan_model.investor import Investor
f
|
rom investor_lifespan_model.market import Market
from investor_lifespan_model.insurer import Insurer
from
|
investor_lifespan_model.lifespan_model import LifespanModel
from investor_lifespan_model.mortality_data import π, G, tf
|
ActiveState/code
|
recipes/Python/137551_Using_RegObj_Automatiaccess_MSW/recipe-137551.py
|
Python
|
mit
| 1,024
| 0.03125
|
RegObj.dll is an ActiveX server--and, hence, has an automation interface--that is available with documentation in
the distribution file known as RegObji.exe, from the following page:
http://msdn.microsoft.com/vbasic/downloads/addins.asp
To provide early binding for RegObj use
>>> from win32com.client import gencache
>>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0)
or the MakePy utility within PythonWin, referring to "Regstration Manipulation Classes (1.0)" (Please notice
the spelling error.)
Sample use, to determine what command is associated with a Python file:
>>> from win32com.client import Dispatch, gencache
>>> fr
|
om win32con import HKEY_CLASSES_ROOT
>>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0)
>>> regobj = Dispatch ( 'RegObj.Registry' )
>>> HKCR = regobj.RegKeyFromHKey ( HKEY_CLASSES_ROOT )
>>> PythonFileKey = HKCR.ParseKeyName('Python.File\Shell\Open\command')
>>> PythonFileKey.Value
u'J:\\Python22\\pythonw.exe "%1" %*
|
'
|
VitalPet/c2c-rd-addons
|
hr_timesheet_product/__openerp__.py
|
Python
|
agpl-3.0
| 1,555
| 0.020579
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- Gm
|
bH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that i
|
t will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
'name' : 'Displays product in hr analytic timesheet'
, 'version' : '0.7'
, 'category' : 'HR'
, 'description' : """
This module displays the hidden field product_id
"""
, 'author' : 'ChriCar Beteiligungs- und Beratungs- GmbH'
, 'depends' : ['hr_timesheet' ]
, 'data' : ['hr_timesheet_product.xml']
, 'demo_xml' : []
, 'installable': False
, 'active' : False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
slaweet/autoskola
|
main/geography/management/commands/add_places.py
|
Python
|
mit
| 985
| 0.00203
|
from django.core.management.base import BaseCommand, CommandError
from geography.models import Place
class Command(BaseCommand):
help = u"""Add new places"""
usage_str = 'USAGE: ./manage.py a
|
dd_places map_name STATE|CITY|RIVER|LAKE|... [difficulty]'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(self.usage_str)
if not args[1] in Place.PLACE_TYPE_SLUGS:
ra
|
ise CommandError(self.usage_str)
place_type = self.Place.PLACE_TYPE_SLUGS[args[1]]
map_name = args[0]
state_file = open(map_name.lower() + ".txt")
states = state_file.read()
ss = states.split("\n")
for s in ss:
place = s.split("\t")
if(len(place) == 2):
name = place[1]
code = place[0]
p = Place(code=code, name=name, difficulty=500, type=place_type)
p.save()
self.stdout.write(name + " added")
|
floemker/django-wiki
|
src/wiki/plugins/notifications/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 881
| 0.00227
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_nyt', '0006_auto_20141229_1630'),
('wiki', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ArticleSubscription',
fields=[
('articleplugin_ptr', models.OneToOneField(auto_created=True, to='wiki.ArticlePlugin', primary_key=True, parent_link=True, serialize=False, on_delete=models.CASCADE)),
|
('subscription', models.OneToOneField(to='django_nyt.Subscription', on_delete=models.CASCADE)),
],
options={
},
bases=('wiki.articleplugin',),
),
migrations.AlterUniqueTogether(
name='articlesubscription',
unique_together=set([('subscription',
|
'articleplugin_ptr')]),
),
]
|
Smart-Torvy/torvy-home-assistant
|
homeassistant/components/cover/vera.py
|
Python
|
mit
| 1,970
| 0
|
"""
Support for Vera cover - curtains, rollershutters etc.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.vera/
"""
import logging
from homeassistant.components.cover import CoverDevice
from homeassistant.components.vera import (
VeraDevice, VERA_DEVICES, VERA_CONTROLLER)
DEPENDENCIES = ['vera']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Find and return Vera covers."""
add_devices_callback(
VeraCover(device, VERA_CONTROLLER) for
device in VERA_DEVICES['cover'])
# pylint: disable=abstract-method
class VeraCover(VeraDevice, CoverDevice):
"""Represents a Vera Cover in Home Assistant."""
def __init__(self, vera_device, controller):
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller)
@property
def current_cover_position(self):
"""
Return current position of cover.
0 is closed, 100 is fully open.
"""
position = self.vera_device.get_level()
if position <= 5:
return 0
if position >= 95:
return 100
return position
def set_cover_position(self, position, **kwargs):
"""Move the cover to a specific position."""
self.vera_device.set_level(position)
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is not None:
if se
|
lf.current_cover_position > 0:
return False
else:
return True
def open_cover(self, **kwargs):
"""Open the cover."""
self.vera_device.open()
def close_cover(self, **kwargs):
"""Close the cover."""
self.vera_device.close()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.vera_dev
|
ice.stop()
|
702nADOS/sumo
|
tools/assign/costMemory.py
|
Python
|
gpl-3.0
| 7,283
| 0.00151
|
# -*- coding: utf-8 -*-
"""
@file costMemory.py
@author Jakob Erdmann
@author Michael Behrisch
@date 2012-03-14
@version $Id: costMemory.py 22608 2017-01-17 06:28:54Z behrisch $
Perform smoothing of edge costs across successive iterations of duaIterate
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2012-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from xml.sax import saxutils, make_parser, handler
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from sumolib.net import readNet
class EdgeMemory:
def __init__(self, cost):
self.cost = cost
self.seen = True
def update(self, cost, memory_weight, new_weight, pessimism):
p = (cost / self.cost) ** pessimism if self.cost > 0 else 1
memory_factor = memory_weight / (memory_weight + new_weight * p)
self.cost = self.cost * memory_factor + cost * (1 - memory_factor)
self.seen = True
class CostMemory(handler.ContentHandler):
# memorize the weighted average of edge costs
def __init__(self, cost_attribute, pessimism=0, network_file=None):
# the cost attribute to parse (i.e. 'traveltime')
self.cost_attribute = cost_attribute.encode('utf8')
# the duaIterate iteration index
self.iteration = None
# the main data store: for every interval and edge id we store costs and
# whether data was seen in the last call of load_costs()
# start -> (edge_id -> EdgeMemory)
self.intervals = defaultdict(dict)
# the intervall length (only known for certain if multiple intervals
# have been seen)
self.interval_length = 214748 # SUMOTIME_MAXSTRING
# the intervall currently being parsed
self.current_interval = None
# the combined weigth of all previously loaded costs
self.memory_weight = 0.0
# update is done according to: memory * memory_factor + new * (1 -
# memory_factor)
self.memory_factor = None
# differences between the previously loaded costs and the memorized
# costs
self.errors = None
# some statistics
self.num_loaded = 0
self.num_decayed = 0
# travel times without obstructing traffic
# XXX could use the minimum known traveltime
self.traveltime_free = defaultdict(lambda: 0)
if network_file is not None:
# build a map of default weights for decaying edges assuming the
# attribute is traveltime
self.traveltime_free = dict([(e.getID(), e.getLength() / e.getSpeed())
for e in readNet(network_file).getEdges()])
self.pessimism = pessimism
def startElement(self, name, attrs):
if name == 'interval':
self.current_interval = self.intervals[float(attrs['begin'])]
if name == 'edge':
id = attrs['id']
# may be missing for some
if self.cost_attribute.decode('utf-8') in attrs:
self.num_loaded += 1
cost = float(attrs[self.cost_attribute.decode('utf-8')])
if id in self.current_interval:
edgeMemor
|
y = self.current_interval[id]
self.errors.append(edgeMemory.cost - cost)
edgeMemory.update(
cost, self.memory_weight, self.new_weight, self.pessimism)
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, edgeMemory.cost, file=f)
else:
self.errors.append(0)
sel
|
f.current_interval[id] = EdgeMemory(cost)
def load_costs(self, dumpfile, iteration, weight):
# load costs from dumpfile and update memory according to weight and
# iteration
if weight <= 0:
sys.stderr.write(
"Skipped loading of costs because the weight was %s but should have been > 0\n" % weight)
return
assert(weight > 0)
if self.iteration == None and iteration != 0:
print("Warning: continuing with empty memory")
# update memory weights. memory is a weighted average across all runs
self.new_weight = float(weight)
self.iteration = iteration
self.errors = []
# mark all edges as unseen
for edges in self.intervals.values():
for edgeMemory in edges.values():
edgeMemory.seen = False
# parse costs
self.num_loaded = 0
parser = make_parser()
parser.setContentHandler(self)
parser.parse(dumpfile)
# decay costs of unseen edges
self.num_decayed = 0
for edges in self.intervals.values():
for id, edgeMemory in edges.items():
if not edgeMemory.seen:
edgeMemory.update(
self.traveltime_free[id], self.memory_weight, self.new_weight, self.pessimism)
self.num_decayed += 1
# if id == "4.3to4.4":
# with open('debuglog', 'a') as f:
# print(self.memory_factor, 'decay', edgeMemory.cost, file=f)
# figure out the interval length
if len(self.intervals.keys()) > 1:
sorted_begin_times = sorted(self.intervals.keys())
self.interval_length = sorted_begin_times[
1] - sorted_begin_times[0]
self.memory_weight += self.new_weight
def write_costs(self, weight_file):
with open(weight_file, 'w') as f:
f.write('<netstats>\n')
for start, edge_costs in self.intervals.items():
f.write(' <interval begin="%d" end="%d">\n' %
(start, start + self.interval_length))
for id, edgeMemory in edge_costs.items():
f.write(' <edge id="%s" %s="%s"/>\n' %
(id, self.cost_attribute.decode('utf-8'), edgeMemory.cost))
f.write(' </interval>\n')
f.write('</netstats>\n')
def avg_error(self, values=None):
if not values:
values = self.errors
l = len(list(values))
if l > 0:
return (sum(list(values)) / l)
else:
return 0
def avg_abs_error(self):
return self.avg_error(list(map(abs, self.errors)))
def mean_error(self, values=None):
if not values:
values = self.errors
values.sort()
if values:
return values[len(values) // 2]
def mean_abs_error(self):
return self.mean_error(list(map(abs, self.errors)))
def loaded(self):
return self.num_loaded
def decayed(self):
return self.num_decayed
|
Ledoux/ShareYourSystem
|
Pythonlogy/ShareYourSystem/Standards/Tutorials/Sumer/Test.py
|
Python
|
mit
| 388
| 0.025773
|
#<ImportSpecificModules>
import Shar
|
eYourSystem as SYS
#</ImportSpecificModules>
#print(SYS.SumClass().insert('Parameter').hdfview().HdformatedConsoleStr)
#print(SYS.SumClass().insert('Result').hdfview().HdformatedConsoleStr)
#print(SYS.Sum.attest_insert())
#print(SYS
|
.Sum.attest_retrieve())
#print(SYS.Sum.attest_find())
#print(SYS.Sum.attest_recover())
#print(SYS.Sum.attest_scan())
|
Digilent/u-boot-digilent
|
tools/dtoc/test_dtoc.py
|
Python
|
gpl-2.0
| 28,025
| 0.000178
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2012 The Chromium OS Authors.
#
"""Tests for the dtb_platdata module
This includes unit tests for some functions and functional tests for the dtoc
tool.
"""
import collections
import os
import struct
import sys
import tempfile
import unittest
from dtoc import dtb_platdata
from dtb_platdata import conv_name_to_c
from dtb_platdata import get_compat_name
from dtb_platdata import get_value
from dtb_platdata import tab_to
from dtoc import fdt
from dtoc import fdt_util
from patman import test_util
from patman import tools
our_path = os.path.dirname(os.path.realpath(__file__))
HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
#include <stdbool.h>
#include <linux/libfdt.h>'''
C_HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
/* Allow use of U_BOOT_DEVICE() in this file */
#define DT_PLATDATA_C
#include <common.h>
#include <dm.h>
#include <dt-structs.h>
'''
C_EMPTY_POPULATE_PHANDLE_DATA = '''void dm_populate_phandle_data(void) {
}
'''
def get_dtb_file(dts_fname, capture_stderr=False):
"""Compile a .dts file to a .dtb
Args:
dts_fname: Filename of .dts file in the current directory
capture_stderr: True to capture and discard stderr output
Returns:
Filename of compiled file in output directory
"""
return fdt_util.EnsureCompiled(os.path.join(our_path, dts_fname),
capture_stderr=capture_stderr)
class TestDtoc(unittest.TestCase):
"""Tests for dtoc"""
@classmethod
def setUpClass(cls):
tools.PrepareOutputDir(None)
cls.maxDiff = None
@classmethod
def tearDownClass(cls):
tools._RemoveOutputDir()
def _WritePythonString(self, fname, data):
"""Write a string with tabs expanded as done in this Python file
Args:
fname: Filename to write to
data: Raw string to convert
"""
data = data.replace('\t', '\\t')
with open(fname, 'w') as fd:
fd.write(data)
def _CheckStrings(self, expected, actual):
"""Check that a string matches its expected value
If the strings do not match, they are written to the /tmp directory in
the same Python format as is used here in the test. This allows for
easy comparison and update of the tests.
Args:
expected: Expected string
actual: Actual string
"""
if expected != actual:
self._WritePythonString('/tmp/binman.expected', expected)
self._WritePythonString('/tmp/binman.actual', actual)
print('Failures written to /tmp/binman.{expected,actual}')
self.assertEquals(expected, actual)
def run_test(self, args, dtb_file, output):
dtb_platdata.run_steps(args, dtb_file, False, output, True)
def test_name(self):
"""Test conversion of device tree names to C identifiers"""
self.assertEqual('serial_at_0x12', conv_name_to_c('serial@0x12'))
self.assertEqual('vendor_clock_frequency',
conv_name_to_c('vendor,clock-frequency'))
self.assertEqual('rockchip_rk3399_sdhci_5_1',
conv_name_to_c('rockchip,rk3399-sdhci-5.1'))
def test_tab_to(self):
"""Test operation of tab_to() function"""
self.assertEqual('fred ', tab_to(0, 'fred'))
self.assertEqual('fred\t', tab_to(1, 'fred'))
self.assertEqual('fred was here ', tab_to(1, 'fred was here'))
self.assertEqual('fred was here\t\t', tab_to(3, 'fred was here'))
self.assertEqual('exactly8 ', tab_to(1, 'exactly8'))
self.assertEqual('exactly8\t', tab_to(2, 'exactly8'))
def test_get_value(self):
"""Test operation of get_value() function"""
self.assertEqual('0x45',
get_value(fdt.TYPE_INT, struct.pack('>I', 0x45)))
self.assertEqual('0x45',
get_value(fdt.TYPE_BYTE, struct.pack('<I', 0x45)))
self.assertEqual('0x0',
get_value(fdt.TYPE_BYTE, struct.pack('>I', 0x45)))
self.assertEqual('"test"', get_value(fdt.TYPE_STRING, 'test'))
self.assertEqual('true', get_value(fdt.TYPE_BOOL, None))
def test_get_compat_name(self):
"""Test operation of get_compat_name() function"""
Prop = collections.namedtuple('Prop', ['value'])
Node = collections.namedtuple('Node', ['props'])
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1', 'arasan_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1', 'third'])
node = Node({'compatible': prop})
self.assertEqua
|
l((['rockchip_rk3399_sdhci_5_1',
'arasan_sdhci_5_1', 'third']),
get_compat_name(node))
def test_empty_file(self):
"""Test output from a device tree file with no nodes"""
dtb_file = get_dtb_file('dtoc_test_empty.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
lines
|
= infile.read().splitlines()
self.assertEqual(HEADER.splitlines(), lines)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
lines = infile.read().splitlines()
self.assertEqual(C_HEADER.splitlines() + [''] +
C_EMPTY_POPULATE_PHANDLE_DATA.splitlines(), lines)
def test_simple(self):
"""Test output from some simple nodes with various types of data"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_i2c_test {
};
struct dtd_sandbox_pmic_test {
\tbool\t\tlow_power;
\tfdt64_t\t\treg[2];
};
struct dtd_sandbox_spl_test {
\tconst char * acpi_name;
\tbool\t\tboolval;
\tunsigned char\tbytearray[3];
\tunsigned char\tbyteval;
\tfdt32_t\t\tintarray[4];
\tfdt32_t\t\tintval;
\tunsigned char\tlongbytearray[9];
\tunsigned char\tnotstring[5];
\tconst char *\tstringarray[3];
\tconst char *\tstringval;
};
struct dtd_sandbox_spl_test_2 {
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /i2c@0 index 0 */
static struct dtd_sandbox_i2c_test dtv_i2c_at_0 = {
};
U_BOOT_DEVICE(i2c_at_0) = {
\t.name\t\t= "sandbox_i2c_test",
\t.platdata\t= &dtv_i2c_at_0,
\t.platdata_size\t= sizeof(dtv_i2c_at_0),
\t.parent_idx\t= -1,
};
/* Node /i2c@0/pmic@9 index 1 */
static struct dtd_sandbox_pmic_test dtv_pmic_at_9 = {
\t.low_power\t\t= true,
\t.reg\t\t\t= {0x9, 0x0},
};
U_BOOT_DEVICE(pmic_at_9) = {
\t.name\t\t= "sandbox_pmic_test",
\t.platdata\t= &dtv_pmic_at_9,
\t.platdata_size\t= sizeof(dtv_pmic_at_9),
\t.parent_idx\t= 0,
};
/* Node /spl-test index 2 */
static struct dtd_sandbox_spl_test dtv_spl_test = {
\t.boolval\t\t= true,
\t.bytearray\t\t= {0x6, 0x0, 0x0},
\t.byteval\t\t= 0x5,
\t.intarray\t\t= {0x2, 0x3, 0x4, 0x0},
\t.intval\t\t\t= 0x1,
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
\t\t0x11},
\t.notstring\t\t= {0x20, 0x21, 0x22, 0x10, 0x0},
\t.stringarray\t\t= {"multi-word", "message", ""},
\t.stringval\t\t= "message",
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
/* Node /spl-test2 index 3 */
static struct dtd_sandbox_spl
|
monikagrabowska/osf.io
|
kinto/tests/test_config.py
|
Python
|
apache-2.0
| 6,371
| 0
|
import codecs
import mock
import os
import tempfile
import unittest
from time import strftime
import six
from kinto import config
from kinto import __version__
class ConfigTest(unittest.TestCase):
def test_transpose_parameters_into_template(self):
self.maxDiff = None
template = "kinto.tpl"
dest = tempfile.mktemp()
config.render_template(template, dest,
secret='secret',
storage_backend='storage_backend',
cache_backend='cache_backend',
permission_backend='permission_backend',
storage_url='storage_url',
cache_url='cache_url',
permission_url='permission_url',
kinto_version='kinto_version',
config_file_timestamp='config_file_timestamp')
with codecs.open(dest, 'r', encoding='utf-8') as d:
destination_temp = d.read()
sample_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
"test_configuration/test.ini")
with codecs.open(sample_path, 'r', encoding='utf-8') as c:
sample = c.read()
self.assertEqual(destination_temp, sample)
def test_create_destination_directory(self):
dest = os.path.join(tempfile.mkdtemp(), 'config', 'kinto.ini')
config.render_template("kinto.tpl", dest,
secret='secret',
storage_backend='storage_backend',
cache_backend='cache_backend',
permission_backend='permission_backend',
storage_url='storage_url',
cache_url='cache_url',
permission_url='permission_url',
kinto_version='kinto_version',
config_file_timestamp='config_file_timestamp')
self.assertTrue(os.path.exists(dest))
@mock.patch('kinto.config.render_template')
def test_hmac_secret_is_text(self, mocked_render_template):
config.init('kinto.ini', 'postgresql')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(type(kwargs['secret']), six.text_type)
@mock.patch('kinto.config.render_template')
def test_init_postgresql_values(self, mocked_render_template):
config.init('kinto.ini', 'postgresql')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))
postgresql_url = "postgres://postgres:postgres@localhost/postgres"
self.assertDictEqual(kwargs, {
'secret': kwargs['secret'],
'storage_backend': 'kinto.core.storage.postgresql',
'cache_backend': 'kinto.core.cache.postgresql',
'permission_backend': 'kinto.core.permission.postgresql',
'storage_url': postgresql_url,
'cache_url': postgresql_url,
'permission_url': postgresql_url,
'kinto_version': __version__,
'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
})
@mock.patch('kinto.config.render_template')
def test_init_redis_values(self, mocked_render_template):
config.init('kinto.ini', 'redis')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))
redis_url = "redis://localhost:6379"
self.maxDiff = None # See the full diff in case of error
self.assertDictEqual(kwargs, {
'secret': kwargs['secret'],
'storage_backend': 'kinto_redis.storage',
'cache_backend': 'kinto_redis.cache',
'permission_backend': 'kinto_redis.permission',
'storage_url': redis_url + '/1',
|
'cache_url': redis_url + '/2',
'permission_url': redis_url + '/3',
'kinto_version': __version__,
'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
})
@mock.patch('kinto.config.render_template')
def test_init_memory_values(self, mocked_render_template):
config.init('kinto.ini', 'memory')
args, kwargs = list(mocked_render_template.call_args)
self.assertEquals(args, ('kinto.tpl', 'kinto.ini'))
|
self.assertDictEqual(kwargs, {
'secret': kwargs['secret'],
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
'storage_url': '',
'cache_url': '',
'permission_url': '',
'kinto_version': __version__,
'config_file_timestamp': strftime('%a, %d %b %Y %H:%M:%S %z')
})
def test_render_template_creates_directory_if_necessary(self):
temp_path = tempfile.mkdtemp()
destination = os.path.join(temp_path, 'config/kinto.ini')
config.render_template('kinto.tpl', destination, **{
'secret': "abcd-ceci-est-un-secret",
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
'storage_url': '',
'cache_url': '',
'permission_url': '',
'kinto_version': '',
'config_file_timestamp': ''
})
self.assertTrue(os.path.exists(destination))
def test_render_template_works_with_file_in_cwd(self):
temp_path = tempfile.mkdtemp()
os.chdir(temp_path)
config.render_template('kinto.tpl', 'kinto.ini', **{
'secret': "abcd-ceci-est-un-secret",
'storage_backend': 'kinto.core.storage.memory',
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
'storage_url': '',
'cache_url': '',
'permission_url': '',
'kinto_version': '',
'config_file_timestamp': ''
})
self.assertTrue(os.path.exists(
os.path.join(temp_path, 'kinto.ini')
))
|
shenfei/oj_codes
|
leetcode/python/n85_Maximal_Rectangle.py
|
Python
|
mit
| 1,123
| 0.001781
|
def max_rectangle(heights):
res = 0
heights.append(0)
stack = [0]
for i in range(1, len(heights)):
while stack and heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i if not stack else i - stack[-1] - 1
res = max(res, h * w)
stack.append(i)
return res
class Solution:
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix or not matrix[0]:
return 0
m = len(matrix)
n = len(matrix[0])
heights = [1 if x == '1' else 0 for x i
|
n matrix[0]]
ans = max_rectangle(heights)
for i in range(1, m):
for j in range(n):
heights[j] = 0 if matrix[i][j] == '0' else heights[j] + 1
ans = max(ans, max_rectangle(heights))
return ans
if __name__ == "__main__":
sol = Solution()
M = [['1', '0', '1', '0', '0'],
['1', '0', '1', '1', '1
|
'],
['1', '1', '1', '1', '1'],
['1', '0', '0', '1', '0']]
print(sol.maximalRectangle(M))
|
Alignak-monitoring-contrib/alignak-checks-nrpe
|
alignak_checks_nrpe/__init__.py
|
Python
|
agpl-3.0
| 200
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
|
# Frederic Mohier, frederic.mohier@alignak.net
#
"""
Alignak - Checks pack for NRPE monitored Linux hosts/services
""
|
"
|
puttarajubr/commcare-hq
|
custom/tdh/sqldata.py
|
Python
|
bsd-3-clause
| 25,506
| 0.004822
|
from sqlagg.columns import SimpleColumn
from sqlagg.filters import BETWEEN, IN, EQ
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.sqlreport import SqlData, DataFormatter, TableDataFormat, DatabaseColumn
from custom.tdh.reports import UNNECESSARY_FIELDS, CHILD_HEADERS_MAP, INFANT_HEADERS_MAP, NEWBORN_HEADERS_MAP
def merge_rows(classification_sql_data, enroll_sql_data, treatment_sql_data):
result = []
classification_case_id_index = [id for id, column in enumerate(classification_sql_data.columns)
if column.slug == 'case_id'][0]
enroll_case_id_index = [id for id, column in enumerate(enroll_sql_data.columns)
if column.slug == 'case_id'][0]
treatment_case_id_index = [id for id, column in enumerate(treatment_sql_data.columns)
if column.slug == 'case_id'][0]
enroll_map = {row[enroll_case_id_index]: row for row in enroll_sql_data.rows}
treatment_map = {row[treatment_case_id_index]: row[:treatment_case_id_index]
+ row[treatment_case_id_index + 1:] for row in treatment_sql_data.rows}
for classification_row in classification_sql_data.rows:
row = classification_row[:classification_case_id_index] + classification_row[
classification_case_id_index + 1:]
classification_case_id = classification_row[classification_case_id_index]
if classification_case_id in enroll_map:
row = enroll_map[classification_case_id] + row
else:
row = [classification_case_id] + ['' for i in range(len(enroll_sql_data.headers) - 1)] + row
if classification_case_id in treatment_map:
row.extend(treatment_map[classification_case_id])
else:
row.extend(['' for i in range(len(treatment_sql_data.headers))])
result.append(row)
return result
class BaseSqlData(SqlData):
datatables = True
no_value = {'sort_key': 0, 'html': 0}
def header(self, header):
if self.__class__.__name__[0] == 'N':
return NEWBORN_HEADERS_MAP[header] if header in NEWBORN_HEADERS_MAP else header
elif self.__class__.__name__[0] == 'I':
return INFANT_HEADERS_MAP[header] if header in INFANT_HEADERS_MAP else header
else:
return CHILD_HEADERS_MAP[header] if header in CHILD_HEADERS_MAP else header
@property
def filters(self):
filters = [BETWEEN("date", "startdate", "enddate"), EQ('domain', 'domain')]
if self.config['emw']:
filters.append(IN('user_id', 'emw'))
return filters
@property
def group_by(self):
return []
@property
def columns(self):
columns = []
for k in self.group_by:
if k in ['zscore_hfa', 'zscore_wfa', 'zscore_wfh', 'mean_hfa', 'mean_wfa', 'mean_wfh']:
columns.append(DatabaseColumn(k, SimpleColumn(k),
format_fn=lambda x: "%.2f" % float(x if x else 0)))
else:
columns.append(DatabaseColumn(k, SimpleColumn(k)))
return columns
@property
def headers(self):
return [DataTablesColumn(self.header(k)) for k in self.group_by[1:]]
@property
def rows(self):
formatter = DataFormatter(TableDataFormat(self.columns, no_value=self.no_value))
return list(formatter.format(self.data, keys=self.keys, group_by=self.group_by))
class InfantConsultationHistory(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_consultation_history'
title = 'Infant Consultation History'
@property
def columns(self):
return EnrollChild().columns + InfantClassification(config=self.config).columns + InfantTreatment().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + InfantClassification(config=self.config).headers + InfantTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + InfantClassification(
config=self.config).group_by + InfantTreatment().group_by
@property
def rows(self):
return merge_rows(InfantClassification(config=self.config), EnrollChild(), InfantTreatment())
class InfantConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHInfantClassificationFluff"
slug = 'infant_consultation_history'
title = 'Infant Consultation History'
@property
def columns(self):
return EnrollChild().columns + InfantClassificationExtended(
config=self.config).columns + InfantTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + InfantClassificationExtended(
config=self.config).headers + InfantTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + InfantClassificationExtended(
config=self.config).group_by + InfantTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(InfantClassificationExtended(config=self.config), EnrollChild(),
InfantTreatmentExtended())
class NewbornConsultationHistory(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_consultation_his
|
tory'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + NewbornClassification(
config=self.config).columns + NewbornTreatment().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + NewbornClassification(
config=self.config).headers + NewbornTreatment().headers)
@property
def group_by(self):
|
return EnrollChild().group_by + NewbornClassification(
config=self.config).group_by + NewbornTreatment().group_by
@property
def rows(self):
return merge_rows(NewbornClassification(config=self.config), EnrollChild(), NewbornTreatment())
class NewbornConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHNewbornClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + NewbornClassificationExtended(
config=self.config).columns + NewbornTreatmentExtended().columns
@property
def headers(self):
return DataTablesHeader(*EnrollChild().headers + NewbornClassificationExtended(
config=self.config).headers + NewbornTreatmentExtended().headers)
@property
def group_by(self):
return EnrollChild().group_by + NewbornClassificationExtended(
config=self.config).group_by + NewbornTreatmentExtended().group_by
@property
def rows(self):
return merge_rows(NewbornClassificationExtended(config=self.config), EnrollChild(),
NewbornTreatmentExtended())
class ChildConsultationHistory(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return EnrollChild().columns + ChildClassification(config=self.config).columns + ChildTreatment().columns
@property
def headers(self):
return DataTablesHeader(
*EnrollChild().headers + ChildClassification(config=self.config).headers + ChildTreatment().headers)
@property
def group_by(self):
return EnrollChild().group_by + ChildClassification(
config=self.config).group_by + ChildTreatment().group_by
@property
def rows(self):
return merge_rows(ChildClassification(config=self.config), EnrollChild(), ChildTreatment())
class ChildConsultationHistoryComplete(BaseSqlData):
table_name = "fluff_TDHChildClassificationFluff"
slug = 'newborn_consultation_history'
title = 'Newborn Consultation History'
@property
def columns(self):
return E
|
RoyNexus/python
|
homework6.py
|
Python
|
unlicense
| 3,193
| 0.003758
|
import numpy as np
import copy
import datetime as dt
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkstudy.EventProfiler as ep
from bollinger import Bollinger
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(ls_symbols, d_data):
df_close = d_data['close']
ts_market = df_close['SPY']
print "Finding Events"
# Creating an empty dataframe
df_events = copy.deepcopy(df_close)
df_events = df_events * np.NAN
# Time stamps for the event range
ldt_timestamps = df_close.index
for s_sym in ls_symbols:
for i in range(1, len(ldt_timestamps)):
# Calculating the returns for this timestamp
f_symprice_today = df_close[s_sym].ix[ldt_timestamps[i]]
f_symprice_yest = df_close[s_sym].ix[ldt_timestamps[i - 1]]
#f_marketprice_today = ts_market.ix[ldt_timestamps[i]]
#f_marketprice_yest = ts_market.ix[ldt_timestamps[i - 1]]
#f_symreturn_today = (f_symprice_today / f_symprice_yest) - 1
#f_marketreturn_today = (f_marketprice_today / f_marketprice_yest) - 1
'''
Bollinger value of equity today < -2.0
Bollinger value of equity yesterday >= -2.0
Bollinger value of SPY today >= 1.5
'''
bollinger_obj = Bollinger(df_close)
equity_today = bollinger_obj.get_value(ldt_timestamps[i], s_sym)
equity_yesterday = bollinger_obj.get_value(ldt_timestamps[i - 1], s_sym)
mkt_today = bollinger_obj.get_value(ldt_timestamps[i], 'SPY')
if equity_today < -2.0 and equity_yesterday >= -2.0 and mkt_today >= 1.5:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
return df_events
if __name__ == '__main__':
dt_start = dt.datetime(2008, 1, 1)
dt_end = dt.datetime(2009, 12, 31)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
dataobj = da.DataAccess('Yahoo')
ls_symbols = dataobj.get_symbols_from_list('sp5002012')
ls_symbols.append('SPY')
ls_keys = ['close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_events = find_events(ls_symbols, d_data)
print "Creating Study"
ep.eventpro
|
filer(df_events, d_data, i_lookback=20, i_lookforward=20,
|
s_filename='BollingerStudy.pdf', b_market_neutral=True, b_errorbars=True,
s_market_sym='SPY')
|
matthew-brett/pyblio
|
Pyblio/Style/Generic.py
|
Python
|
gpl-2.0
| 5,843
| 0.023105
|
# This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : gobry@pybliographer.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
''' Generic XML bibliographic style handler '''
import string
from Pyblio.Style import Parser
from Pyblio import Autoload, recode
def author_desc (group, coding, initials = 0, reverse = 0):
""" Create a nice string describing a group of authors.
coding : name of the output coding (as requested for recode)
initials : if = 1, uses initials instead of complete first names
reverse :
-1 use First Last format
0 use Last, First, excepted for the first entry
1 use Last, First for all the authors, not only the first
"""
l = len (group)
fulltext = ""
for i in range (0, l):
(honorific, first, last, lineage) = group [i].format (coding)
if initials:
first = group [i].initials (coding)
text = ""
if reverse == 1 or (i == 0 and reverse == 0):
if last: text = text + last
if lineage: text = text + ", " + lineage
if first: text = text + ", " + first
else:
if first: text = first + " "
if last: text = text + last
if lineage: text = text + ", " + lineage
if text:
if i < l - 2:
text = text + ", "
elif i == l - 2:
text = text + " and "
fulltext = fulltext + text
# avoid a dot at the end of the author list
if fulltext [-1] == '.':
fulltext = fulltext [0:-1]
return fulltext
def string_key (entry, fmt, table):
""" Generates an alphabetical key for an entry. fmt is the
output coding """
rc = recode.recode ("latin1.." + fmt)
if entry.has_key ('author'): aut = entry ['author']
elif entry.has_key ('editor'): aut = entry ['editor']
else: aut = ()
if len (aut) > 0:
if len (aut) > 1:
key = ''
for a in aut:
honorific, first, last, lineage = a.format (fmt)
key = key + string.join (map (lambda x:
x [0], string.split (last, ' ')), '')
if len (key) >= 3:
if len (aut) > 3:
key = key + '+'
break
else:
honorific, first, last, lineage = aut [0].format (fmt)
parts = string.split (last, ' ')
if len (parts) == 1:
key = parts [0][0:3]
else:
key = string.join (map (lambda x: x [0], parts), '')
else:
key = rc (entry.key.key [0:3])
if entry.has_key ('date'):
year = entry ['date'].format (fmt) [0]
if year:
key = key + year [2:]
if table.has_key (key) or table.has_key (key + 'a'):
if table.has_key (key):
# rename the old entry
new = key + 'a'
table [new] = table [key]
del table [key]
base = key
suff = ord ('b')
key = base + chr (suff)
while table.has_key (key):
suff = suff + 1
key = base + chr (suff)
return key
def numeric_key (entry, fmt, table):
count = 1
while table.has_key (str (count)):
count = count + 1
return str (count)
def create_string_key (database, keys, fmt):
table = {}
for key in keys:
s = string_key (database [key], fmt, table)
table [s] = key
skeys = table.keys ()
skeys.sort ()
return table, skeys
def create_numeric_key (database, keys, fmt):
table = {}
skeys = []
for key in keys:
s = numeric_key (database [key], fmt,
|
table)
table [s] = key
skeys.append (s)
return table, skeys
def standard_date (entry, coding):
(text, month, day) = entry.format (coding)
if month: text = "%s/%s" % (month, text)
if day : text = "%s/%s" % (day, text)
return text
def last_first_full_authors (entry, coding):
return author_desc (entry, coding, 0, 1)
|
def first_last_full_authors (entry, coding):
return author_desc (entry, coding, 0, -1)
def full_authors (entry, coding):
return author_desc (entry, coding, 0, 0)
def initials_authors (entry, coding):
return author_desc (entry, coding, 1, 0)
def first_last_initials_authors (entry, coding):
return author_desc (entry, coding, 1, -1)
def last_first_initials_authors (entry, coding):
return author_desc (entry, coding, 1, 1)
Autoload.register ('style', 'Generic', {
'first_last_full_authors' : first_last_full_authors,
'last_first_full_authors' : last_first_full_authors,
'full_authors' : full_authors,
'first_last_initials_authors' : first_last_initials_authors,
'last_first_initials_authors' : last_first_initials_authors,
'initials_authors' : initials_authors,
'string_keys' : create_string_key,
'numeric_keys' : create_numeric_key,
'european_date' : standard_date,
})
|
FOSSEE/eSim
|
src/frontEnd/DockArea.py
|
Python
|
gpl-3.0
| 13,847
| 0
|
from PyQt5 import QtCore, QtWidgets
from ngspiceSimulation.pythonPlotting import plotWindow
from ngspiceSimulation.NgspiceWidget import NgspiceWidget
from configuration.Appconfig import Appconfig
from modelEditor.ModelEditor import ModelEditorclass
from subcircuit.Subcircuit import Subcircuit
from maker.makerchip import makerchip
from kicadtoNgspice.KicadtoNgspice import MainWindow
from browser.Welcome import Welcome
from browser.UserManual import UserManual
from ngspicetoModelica.ModelicaUI import OpenModelicaEditor
import os
dockList = ['Welcome']
count = 1
dock = {}
class DockArea(QtWidgets.QMainWindow):
"""
This class contains function for designing UI of all the editors
in dock area part:
- Test Editor.
- Model Edit
|
or.
- Python Plotting.
- Ngspice Editor.
- Kicad to Ngspice Editor.
- Subcircuit Editor.
- Modelica editor.
"""
def __init__(self):
"""This act as constructor for class DockArea."""
QtWidgets.QMainWindow.__init__(self)
self.obj_appconfig = Appconfig()
for dockName in dockList:
dock[dockName] = QtWidgets.QDockWidget(dockName)
|
self.welcomeWidget = QtWidgets.QWidget()
self.welcomeLayout = QtWidgets.QVBoxLayout()
self.welcomeLayout.addWidget(Welcome()) # Call browser
# Adding to main Layout
self.welcomeWidget.setLayout(self.welcomeLayout)
dock[dockName].setWidget(self.welcomeWidget)
# CSS
dock[dockName].setStyleSheet(" \
QWidget { border-radius: 15px; border: 1px solid gray;\
padding: 5px; width: 200px; height: 150px; } \
")
self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock[dockName])
# self.tabifyDockWidget(dock['Notes'],dock['Blank'])
self.show()
def createTestEditor(self):
"""This function create widget for Library Editor"""
global count
self.testWidget = QtWidgets.QWidget()
self.testArea = QtWidgets.QTextEdit()
self.testLayout = QtWidgets.QVBoxLayout()
self.testLayout.addWidget(self.testArea)
# Adding to main Layout
self.testWidget.setLayout(self.testLayout)
dock['Tips-' + str(count)] = \
QtWidgets.QDockWidget('Tips-' + str(count))
dock['Tips-' + str(count)].setWidget(self.testWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Tips-' + str(count)])
self.tabifyDockWidget(
dock['Welcome'], dock['Tips-' + str(count)])
dock['Tips-' + str(count)].setVisible(True)
dock['Tips-' + str(count)].setFocus()
dock['Tips-' + str(count)].raise_()
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['Tips-' + str(count)]
)
count = count + 1
def plottingEditor(self):
"""This function create widget for interactive PythonPlotting."""
self.projDir = self.obj_appconfig.current_project["ProjectName"]
self.projName = os.path.basename(self.projDir)
# self.project = os.path.join(self.projDir, self.projName)
global count
self.plottingWidget = QtWidgets.QWidget()
self.plottingLayout = QtWidgets.QVBoxLayout()
self.plottingLayout.addWidget(plotWindow(self.projDir, self.projName))
# Adding to main Layout
self.plottingWidget.setLayout(self.plottingLayout)
dock['Plotting-' + str(count)
] = QtWidgets.QDockWidget('Plotting-' + str(count))
dock['Plotting-' + str(count)].setWidget(self.plottingWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Plotting-' + str(count)])
self.tabifyDockWidget(dock['Welcome'], dock['Plotting-' + str(count)])
dock['Plotting-' + str(count)].setVisible(True)
dock['Plotting-' + str(count)].setFocus()
dock['Plotting-' + str(count)].raise_()
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['Plotting-' + str(count)]
)
count = count + 1
def ngspiceEditor(self, projDir):
""" This function creates widget for Ngspice window."""
self.projDir = projDir
self.projName = os.path.basename(self.projDir)
self.ngspiceNetlist = os.path.join(
self.projDir, self.projName + ".cir.out")
# Edited by Sumanto Kar 25/08/2021
if os.path.isfile(self.ngspiceNetlist) is False:
return False
global count
self.ngspiceWidget = QtWidgets.QWidget()
self.ngspiceLayout = QtWidgets.QVBoxLayout()
self.ngspiceLayout.addWidget(
NgspiceWidget(self.ngspiceNetlist, self.projDir)
)
# Adding to main Layout
self.ngspiceWidget.setLayout(self.ngspiceLayout)
dock['NgSpice-' + str(count)
] = QtWidgets.QDockWidget('NgSpice-' + str(count))
dock['NgSpice-' + str(count)].setWidget(self.ngspiceWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['NgSpice-' + str(count)])
self.tabifyDockWidget(dock['Welcome'], dock['NgSpice-' + str(count)])
# CSS
dock['NgSpice-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray; padding: 0px;\
width: 200px; height: 150px; } \
")
dock['NgSpice-' + str(count)].setVisible(True)
dock['NgSpice-' + str(count)].setFocus()
dock['NgSpice-' + str(count)].raise_()
temp = self.obj_appconfig.current_project['ProjectName']
if temp:
self.obj_appconfig.dock_dict[temp].append(
dock['NgSpice-' + str(count)]
)
count = count + 1
def modelEditor(self):
"""This function defines UI for model editor."""
print("in model editor")
global count
self.modelwidget = QtWidgets.QWidget()
self.modellayout = QtWidgets.QVBoxLayout()
self.modellayout.addWidget(ModelEditorclass())
# Adding to main Layout
self.modelwidget.setLayout(self.modellayout)
dock['Model Editor-' +
str(count)] = QtWidgets.QDockWidget('Model Editor-' + str(count))
dock['Model Editor-' + str(count)].setWidget(self.modelwidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['Model Editor-' + str(count)])
self.tabifyDockWidget(dock['Welcome'],
dock['Model Editor-' + str(count)])
# CSS
dock['Model Editor-' + str(count)].setStyleSheet(" \
.QWidget { border-radius: 15px; border: 1px solid gray; \
padding: 5px; width: 200px; height: 150px; } \
")
dock['Model Editor-' + str(count)].setVisible(True)
dock['Model Editor-' + str(count)].setFocus()
dock['Model Editor-' + str(count)].raise_()
count = count + 1
def kicadToNgspiceEditor(self, clarg1, clarg2=None):
"""
This function is creating Editor UI for Kicad to Ngspice conversion.
"""
global count
self.kicadToNgspiceWidget = QtWidgets.QWidget()
self.kicadToNgspiceLayout = QtWidgets.QVBoxLayout()
self.kicadToNgspiceLayout.addWidget(MainWindow(clarg1, clarg2))
self.kicadToNgspiceWidget.setLayout(self.kicadToNgspiceLayout)
dock['kicadToNgspice-' + str(count)] = \
QtWidgets.QDockWidget('kicadToNgspice-' + str(count))
dock['kicadToNgspice-' +
str(count)].setWidget(self.kicadToNgspiceWidget)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea,
dock['kicadToNgspice-' + str(count)])
self.tabifyDockWidget(dock['Welcome'],
dock['kicadToNgspice-' + str(count)])
# CSS
dock['kicadToNgspice-' + str(count)].
|
ismailsunni/inasafe
|
safe/gis/vector/union.py
|
Python
|
gpl-3.0
| 12,362
| 0
|
# coding=utf-8
"""Clip and mask a hazard layer."""
import logging
from qgis.core import (
QgsGeometry,
QgsFeatureRequest,
QgsWkbTypes,
QgsFeature,
)
from safe.definitions.fields import hazard_class_field, aggregation_id_field
from safe.definitions.hazard_classifications import not_exposed_class
from safe.definitions.processing_steps import union_steps
from safe.gis.sanity_check import check_layer
from safe.gis.vector.clean_geometry import geometry_checker
from safe.gis.vector.tools import (
create_memory_layer, wkb_type_groups, create_spatial_index)
from safe.utilities.i18n import tr
from safe.utilities.profiling import profile
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('InaSAFE')
@profile
def union(union_a, union_b):
"""Union of two vector layers.
Issue https://github.com/inasafe/inasafe/issues/3186
Note : This algorithm is copied from :
https://github.com/qgis/QGIS/blob/master/python/plugins/processing/algs/
qgis/Union.py
:param union_a: The vector layer for the union.
:type union_a: QgsVectorLayer
:param union_b: The vector layer for the union.
:type union_b: QgsVectorLayer
:return: The clip vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = union_steps['output_layer_name']
output_layer_name = output_layer_name % (
union_a.keywords['layer_purpose'],
union_b.keywords['layer_purpose']
)
fields = union_a.fields()
fields.extend(union_b.fields())
writer = create_memory_layer(
output_layer_name,
union_a.geometryType(),
union_a.crs(),
fields
)
keywords_union_1 = union_a.keywords
keywords_union_2 = union_b.keywords
inasafe_fields_union_1 = keywords_union_1['inasafe_fields']
inasafe_fields_union_2 = keywords_union_2['inasafe_fields']
inasafe_fields = inasafe_fields_union_1
inasafe_fields.update(inasafe_fields_union_2)
# use to avoid modifying original source
writer.keywords = dict(union_a.keywords)
writer.keywords['inasafe_fields'] = inasafe_fields
writer.keywords['title'] = output_layer_name
writer.keywords['layer_purpose'] = 'aggregate_hazard'
writer.keywords['hazard_keywords'] = keywords_union_1.copy()
writer.keywords['aggregation_keywords'] = keywords_union_2.copy()
skip_field = inasafe_fields_union_2[aggregation_id_field['key']]
not_null_field_index = writer.fields().lookupField(skip_field)
writer.startEditing()
# Begin copy/paste from Processing plugin.
# Please follow their code as their code is optimized.
# The code below is not following our coding standards because we want to
# be able to track any diffs from QGIS easily.
index_a = create_spatial_index(union_b)
index_b = create_spatial_index(union_a)
count = 0
n_element = 0
# Todo fix callback
# nFeat = len(union_a.getFeatures())
for in_feat_a in union_a.getFeatures():
# progress.setPercentage(nElement / float(nFeat) * 50)
n_element += 1
list_intersecting_b = []
geom = geometry_checker(in_feat_a.geometry())
at_map_a = in_feat_a.attributes()
intersects = index_a.intersects(geom.boundingBox())
if len(intersects) < 1:
try:
_write_feature(at_map_a, geom, writer, not_null_field_index)
except BaseException:
# This really shouldn't happen, as we haven't
# edited the input geom at all
LOGGER.debug(
tr('Feature geometry error: One or more output features '
'ignored due to invalid geometry.'))
else:
request = QgsFeatureRequest().setFilterFids(intersects)
engine = QgsGeometry.createGeometryEngine(geom.constGet())
engine.prepareGeometry()
for in_feat_b in union_b.getFeatures(request):
count += 1
at_map_b = in_feat_b.attributes()
tmp_geom = geometry_checker(in_feat_b.geometry())
if engine.intersects(tmp_geom.constGet()):
int_geom = geometry_checker(geom.intersection(tmp_geom))
list_intersecting_b.append(QgsGeometry(tmp_geom))
if not int_geom:
# There was a problem creating the intersection
# LOGGER.debug(
# tr('GEOS geoprocessing error: One or more input '
# 'features have invalid geometry.'))
pass
int_geom = QgsGeometry()
else:
int_geom = QgsGeometry(int_geom)
if int_geom.wkbType() == QgsWkbTypes.UnknownGeometry \
or QgsWkbTypes.flatType(
int_geom.constGet().wkbType()) == \
QgsWkbTypes.GeometryCollection:
# Intersection produced different geometry types
temp_list = int_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
int_geom = QgsGeometry(geometry_checker(i))
try:
_write_feature(
at_map_a + at_map_b,
int_geom,
writer,
not_null_field_index,
)
except BaseException:
LOGGER.debug(
tr('Feature geometry error: One or '
'more output features ignored due '
'to invalid geometry.'))
else:
# Geometry list: prevents writing error
# in geometries of different types
# produced by the intersection
# fix #3549
if int_geom.wkbType() in wkb_type_groups[
wkb_type_groups[int_geom.wkbType()]]:
try:
_write_feature(
at_map_a + at_map_b,
int_geom,
writer,
not_null_field_index)
except BaseException:
LOGGER.debug(
tr('Feature geometry error: One or more '
'output features ign
|
ored due to '
'invalid geometry.'))
# the remaining bit of inFeatA's geometry
# if there is nothing left, this will just silently fail and we
# are good
diff_geom = QgsGeometry(geom)
if len(list_intersecting_b) != 0:
int_b = QgsGeometry.unaryUnion(list_intersecting_b)
diff_geom = geometry_checker(diff_geom.difference(int_b))
if diff_geom is N
|
one or \
diff_geom.isEmpty() or not diff_geom.isGeosValid():
# LOGGER.debug(
# tr('GEOS geoprocessing error: One or more input '
# 'features have invalid geometry.'))
pass
if diff_geom is not None and (
diff_geom.wkbType() == 0 or QgsWkbTypes.flatType(
diff_geom.constGet().wkbType()) ==
QgsWkbTypes.GeometryCollection):
temp_list = diff_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
|
cwlinkem/linkuce
|
modeltest_runner.py
|
Python
|
gpl-2.0
| 2,993
| 0.033077
|
import os
import glob
import subprocess
def expand_path(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def is_file(path):
if not path:
return False
if not os.path.isfile(path):
return False
return True
def arg_is_file(path):
try:
if not is_file(path):
raise
except:
msg = '{0!r} is not a file'.format(path)
raise argparse.ArgumentTypeError(msg)
return expand_path(path)
def run_jmodeltest(name):
jmodel_proc=subprocess.Popen('java -jar ~/phylo_tools/jmodeltest-2.1.5/jModelTest.jar -d '+str(name)+' -s 3 -f -i -g 4 -BIC -c 0.95 > '+str(name)+'.results.txt', shell=True, executable='/bin/bash')
jmodel_proc.wait()
def get_models(f, gene_name, out):
fl=file(f)
for line in fl:
line=line.strip()
if "the 95% confidence interval" in line:
model=line.split(': ')[1]
out.write(str(gene_name)+'\t'+str(model)+'\n')
def main():
for f in glob.glob('*.nex'):
run_jmodeltest(f)
out=open('models.txt','w')
for f in glob.glob('*.results.txt'):
gene_name=f.split('.')[0]
get_models(f, gene_name,out)
''' description = ('This program will run jModelTest on a single file or set '
'of files in nexus format. User can choose the set of models'
'and type of sum
|
mary using flags. The standard 24 models used'
'in MrBayes and BIC summary with 95% credible set are defaults.')
FILE_FORMATS = ['ne
|
x']
parser = argparse.ArgumentParser(description = description)
parser.add_argument('input_files', metavar='INPUT-SEQ-FILE',
nargs = '+',
type = arg_is_file,
help = ('Input sequence file(s) name '))
parser.add_argument('-o', '--out-format',
type = str,
choices = ['nex', 'fasta', 'phy'],
help = ('The format of the output sequence file(s). Valid options '))
parser.add_argument('-j', '--path-to-jModelTest',
type = str,
help=('The full path to the jModelTest executable'))
parser.add_argument('-s', '--substitution-models',
type = str,
choices = ['3','5','7','11']
default = ['3']
help = ('Number of substitution schemes to test. Default is all GTR models "-s 3".'))
parser.add_argument('-g', '--gamma',
type = str,
default = ['4']
help = ('Include models with rate variation among sites and number of categories (e.g., -g 8)'))
parser.add_argument('-i', '--invar',
type = str,
default = ['false']
help = ('include models with a proportion invariable sites (e.g., -i)'))
args = parser.parse_args()
for f in args.input_files:
in_type=os.path.splitext(f)[1]
filename=os.path.splitext(f)[0]
if in_type == '.nex' or in_type == '.nexus':
dict=in_nex(f)
elif in_type == '.fa' or in_type == '.fas' or in_type == '.fasta':
dict=in_fasta(f)
elif in_type == '.phy' or in_type == '.phylip':
dict=in_phy(f)
if args.out_format == 'nex':
out_nex(dict, filename)
elif args.out_format == 'fasta':
out_fasta(dict, filename)
elif args.out_format == 'phy':
out_phy(dict, filename)'''
if __name__ == '__main__':
main()
|
renebentes/Python4Zumbis
|
Exercícios/Lista IV/questao01.py
|
Python
|
mit
| 350
| 0
|
# coding=utf-8
impo
|
rt random
lista = []
for x in range(10):
numero = random.randint(1, 100)
if x == 0:
maior, menor = numero, numero
elif numero > maior:
maior = numero
elif numero < menor:
menor = numero
lista.append(numero)
lista.sort()
print(lista)
print("Maior: %d" %
|
maior)
print("Menor: %d" % menor)
|
centrologic/django-codenerix-products
|
codenerix_products/migrations/0012_productunique_caducity.py
|
Python
|
apache-2.0
| 516
| 0.001938
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-26 12:20
from
|
__future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('codenerix_products', '0011_auto_20180202_0826'),
]
operations = [
migrations.AddField(
|
model_name='productunique',
name='caducity',
field=models.DateField(blank=True, default=None, null=True, verbose_name='Caducity'),
),
]
|
gistic/PublicSpatialImpala
|
tests/benchmark/report-benchmark-results.py
|
Python
|
apache-2.0
| 35,471
| 0.013307
|
#!/usr/bin/env python
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# This script provides help with parsing and reporting of perf results. It currently
# provides three main capabilities:
# 1) Printing perf results to console in 'pretty' format
# 2) Comparing two perf result sets together and displaying comparison results to console
# 3) Outputting the perf results in JUnit format which is useful for plugging in to
# Jenkins perf reporting.
# By default in Python if you divide an int by another int (5 / 2), the result will also
# be an int (2). The following line changes this behavior so that float will be returned
# if necessary (2.5).
from __future__ import division
import difflib
import json
import math
import os
import prettytable
from collections import defaultdict
from datetime import date, datetime
from optparse import OptionParser
from tests.util.calculation_util import calculate_tval, calculate_avg, calculate_stddev
from time import gmtime, strftime
# String constants
AVG = 'avg'
AVG_TIME = 'avg_time'
AVG_TIME_CHANGE = 'avg_time_change'
AVG_TIME_CHANGE_TOTAL = 'avg_time_change_total'
CLIENT_NAME = 'client_name'
COMPRESSION_CODEC = 'compression_codec'
COMPRESSION_TYPE = 'compression_type'
DETAIL = 'detail'
EST_NUM_ROWS = 'est_num_rows'
EST_PEAK_MEM = 'est_peak_mem'
EXECUTOR_NAME = 'executor_name'
EXEC_SUMMARY = 'exec_summary'
FILE_FORMAT = 'file_format'
ITERATIONS = 'iterations'
MAX_TIME = 'max_time'
MAX_TIME_CHANGE = 'max_time_change'
NAME = 'name'
NUM_CLIENTS = 'num_clients'
NUM_HOSTS = 'num_hosts'
NUM_ROWS = 'num_rows'
OPERATOR = 'operator'
PEAK_MEM = 'peak_mem'
PEAK_MEM_CHANGE = 'peak_mem_change'
PREFIX = 'prefix'
QUERY = 'query'
QUERY_STR = 'query_str'
RESULT_LIST = 'result_list'
RUNTIME_PROFILE = 'runtime_profile'
SCALE_FACTOR = 'scale_factor'
STDDEV = 'stddev'
STDDEV_TIME = 'stddev_time'
TEST_VECTOR = 'test_vector'
TIME_TAKEN = 'time_taken'
TOTAL = 'total'
WORKLOAD_NAME = 'workload_name'
parser = OptionParser()
parser.add_option("--input_result_file", dest="result_file",
default=os.environ['IMPALA_HOME'] + '/benchmark_results.json',
help="The input JSON file with benchmark results")
parser.add_option("--reference_result_file", dest="reference_result_file",
default=os.environ['IMPALA_HOME'] + '/reference_benchmark_results.json',
help="The input JSON file with reference benchmark results")
parser.add_option("--junit_output_file", dest="junit_output_file", default='',
help='If set, outputs results in Junit format to the specified file')
parser.add_option("--no_output_table", dest="no_output_table", action="store_true",
default= False, help='Outputs results in table format to the console')
parser.add_option("--report_description", dest="report_description", default=None,
help='Optional description for the report.')
parser.add_option("--cluster_name", dest="cluster_name", default='UNKNOWN',
help="Name of the cluster the results are from (ex. Bolt)")
parser.add_option("--verbose", "-v", dest="verbose", action="store_true",
default= False, help='Outputs to console with with increased verbosity')
parser.add_option("--output_all_summary_nodes", dest="output_all_summary_nodes",
action="store_true", default= False,
help='Print all execution summary nodes')
parser.add_option("--build_version", dest="build_version", default='UNKNOWN',
help="Build/version info about the Impalad instance results are from.")
parser.add_option("--lab_run_info", dest="lab_run_info", default='UNKNOWN',
help="Information about the lab run (name/id) that published "\
"the results.")
parser.add_option("--tval_threshold", dest="tval_threshold", default=None,
type="float", help="The ttest t-value at which a performance change "\
"will be flagged as sigificant.")
parser.add_option("--min_percent_change_threshold",
dest="min_percent_change_threshold", default=5.0,
type="float", help="Any performance changes below this threshold" \
" will not be classified as significant. If the user specifies an" \
" empty value, the threshold will be set to 0")
parser.add_option("--max_percent_change_threshold",
dest="max_percent_change_threshold", default=20.0,
type="float", help="Any performance changes above this threshold"\
" will be classified as significant. If the user specifies an" \
" empty value, the threshold will be set to the system's maxint")
parser.add_option("--allowed_latency_diff_secs",
dest="allowed_latency_diff_secs", default=0.0, type="float",
help="If specified, only a timing change that differs by more than\
this value will be considered significant.")
# These parameters are specific to recording results in a database. This is optional
parser.add_option("--save_to_db", dest="save_to_db", action="store_true",
default= False, help='Saves results to the specified database.')
parser.add_option("--is_official", dest="is_official", action="store_true",
default= False, help='Indicates this is an official perf run result')
parser.add_option("--db_host", dest="db_host", default='localhost',
help="Machine hosting the database")
parser.add_option("--db_name", dest="db_name", default='perf_results',
help="Name of the perf database.")
parser.add_option("--db_username", dest="db_username", default='hiveuser',
help="Username used to connect to the database.")
parser.add_option("--db_password", dest="db_password", default='password',
help="Password used to connect to the the database.")
options, args = parser.parse_args()
def get_dict_from_json(filename):
"""Given a JSON file, return a nested dictionary.
Everything in this file is based on the nested dictionary data structure. The dictionary
is structured as follows: Top level maps to workload. Each workload maps to queries.
Each query maps to file_format. Each file format is contains a key "result_list" that
maps to a list of QueryResult (look at query.py) dictionaries. The compute stats method
add additional keys such as "avg" or "stddev" here.
Here's how the keys are structred:
To get a workload, the key looks like this:
(('workload_name', 'tpch'), ('scale_factor', '300gb'))
Each workload has a key that looks like this:
(('name', 'TPCH_Q10'))
Each Query has a key like this:
(('file_format', 'text'), ('compression_codec', 'zip'),
('compression_type', 'block'))
This is useful for finding queries in a certain category and computing stats
Args:
filename (str): path to the JSON file
returns:
dict: a nested dictionary with grouped quer
|
ies
"""
def add_result(query_result):
"""Add query to the dictionary.
Automatically finds the path in the nested dictionary and adds the result to the
appropriate list.
TODO: This method is hard to reason about, so it needs to be made more streamlined.
"""
def get_key(level_num):
""
|
"Build a key for a particular nesting level.
The key is built by extracting the appropriate values from query_result.
"""
level = list()
# In the outer layer, we group by workload name and scale factor
level.append([('query', 'workload_name'), ('query', 'scale_factor')])
# In the middle layer, we group by file format and compression type
level.append([('query', 'test_vector', 'file_format'),
('query', 'test_vector', 'compression_codec'),
('query', 'test_vector', 'compression_type')])
# In the bottom layer, we group by query name
level.append([('query', 'name')])
key = []
def get_nested_val(path):
"""given a path to a variable in query result, extract the value.
For example, to extract compression_type from the query_result, we need
|
mdrumond/tensorflow
|
tensorflow/contrib/eager/python/network_test.py
|
Python
|
apache-2.0
| 3,454
| 0.002606
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS I
|
S" BASIS,
# WITHOUT WARRANTIES OR
|
CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.eager.python import network
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.layers import core
# pylint: disable=not-callable
class MyNetwork(network.Network):
def __init__(self):
super(MyNetwork, self).__init__(name="abcd")
self.l1 = self.add_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.l1(x)
class NetworkTest(test.TestCase):
def testTrainableAttribute(self):
net = network.Network()
self.assertTrue(net.trainable)
with self.assertRaises(AttributeError):
net.trainable = False
self.assertTrue(net.trainable)
def testNetworkCall(self):
net = MyNetwork()
net(constant_op.constant([[2.0]])) # Force variables to be created.
self.assertEqual(1, len(net.trainable_variables))
net.trainable_variables[0].assign([[17.0]])
# TODO(josh11b): Support passing Python values to networks.
result = net(constant_op.constant([[2.0]]))
self.assertEqual(34.0, result.numpy())
def testNetworkAsAGraph(self):
self.skipTest("TODO(ashankar,josh11b): FIX THIS")
# Verify that we're using ResourceVariables
def testNetworkVariablesDoNotInterfere(self):
self.skipTest("TODO: FIX THIS")
net1 = MyNetwork()
net2 = MyNetwork()
one = constant_op.constant([[1.]])
print(type(net1(one)))
net2(one)
net1.trainable_weights[0].assign(constant_op.constant([[1.]]))
net2.trainable_weights[0].assign(constant_op.constant([[2.]]))
print("NET1")
print(net1.name)
print(net1.variables)
print(net1(one))
print("NET2")
print(net2.name)
print(net2.variables)
print(net2(one))
class SequentialTest(test.TestCase):
def testTwoLayers(self):
# Create a sequential network with one layer.
net = network.Sequential([core.Dense(1, use_bias=False)])
# Set that layer's weights so it multiplies by 3
l1 = net.get_layer(index=0)
net(constant_op.constant([[2.0]])) # Create l1's variables
self.assertEqual(1, len(l1.trainable_variables))
l1.trainable_variables[0].assign([[3.0]])
self.assertEqual(21.0, net(constant_op.constant([[7.0]])).numpy())
# Add a second layer to the network.
l2 = core.Dense(1, use_bias=False)
net.add_layer(l2)
# Set the second layer's weights so it multiplies by 11
net(constant_op.constant([[2.0]])) # Create l2's variables
self.assertEqual(1, len(l2.trainable_variables))
l2.trainable_variables[0].assign([[11.0]])
self.assertEqual(231.0, net(constant_op.constant([[7.0]])).numpy())
if __name__ == "__main__":
test.main()
|
thelabnyc/django-oscar-wfrs
|
src/wellsfargo/connector/client.py
|
Python
|
isc
| 6,228
| 0.000963
|
from datetime import timedelta
from requests.auth import HTTPBasicAuth
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.core.cache import cache
from ..settings import (
WFRS_GATEWAY_COMPANY_ID,
WFRS_GATEWAY_ENTITY_ID,
WFRS_GATEWAY_API_HOST,
WFRS_GATEWAY_CONSUMER_KEY,
WFRS_GATEWAY_CONSUMER_SECRET,
WFRS_GATEWAY_CLIENT_CERT_PATH,
WFRS_GATEWAY_PRIV_KEY_PATH,
)
from ..security import encrypt_pickle, decrypt_pickle
import requests
import logging
import uuid
logger = logging.getLogger(__name__)
class BearerTokenAuth(requests.auth.AuthBase):
def __init__(self, api_key):
self.api_key = api_key
def __call__(self, request):
request.headers["Authorization"] = "Bearer %s" % self.api_key
return request
class WFRSAPIKey:
def __init__(self, api_key, expires_on):
self.api_key = api_key
self.expires_on = expires_on
@property
def is_expired(self):
# Force key rotation 10 minutes before it actually expires
expires_on = self.expires_on - timedelta(minutes=10)
now = timezone.now()
return now >= expires_on
@property
def ttl(self):
return int((self.expires_on - timezone.now()).total_seconds())
def __str__(self):
return "<WFRSAPIKey expires_on=[%s]>" % self.expires_on
class WFRSGatewayAPIClient:
company_id = WFRS_GATEWAY_COMPANY_ID
entity_id = WFRS_GATEWAY_ENTITY_ID
api_host = WFRS_GATEWAY_API_HOST
consumer_key = WFRS_GATEWAY_CONSUMER_KEY
consumer_secret = WFRS_GATEWAY_CONSUMER_SECRET
client_cert_path = WFRS_GATEWAY_CLIENT_CERT_PATH
priv_key_path = WFRS_GATEWAY_PRIV_KEY_PATH
scopes = [
"PLCCA-Prequalifications",
"PLCCA-Applications",
"PLCCA-Payment-Calculations",
"PLCCA-Transactions-Authorization",
"PLCCA-Transactions-Charge",
"PLCCA-Transactions-Authorization-Charge",
"PLCCA-Transactions-Return",
"PLCCA-Transactions-Cancel-Authorization",
"PLCCA-Transactions-Void-Return",
"PLCCA-Transactions-Void-Sale",
"PLCCA-Transactions-Timeout-Authorization-Charge",
"PLCCA-Transactions-Timeout-Return",
"PLCCA-Account-Details",
]
cache_version = 1
@property
def cache_key(self):
return "wfrs-gateway-api-key-{api_host}-{consumer_key}".format(
api_host=self.api_host, consumer_key=self.consumer_key
)
def api_get(self, path, **kwargs):
return self.make_api_request("get", path, **kwargs)
def api_post(self, path, **kwargs):
return self.make_api_request("post", path, **kwargs)
def make_api_request(self, method, path, client_request_id=None, **kwargs):
url = "https://{host}{path}".format(host=self.api_host, path=path)
# Setup authentication
auth = BearerTokenAuth(self.get_api_key().api_key)
cert = None
if self.client_cert_path and self.priv_key_path:
cert = (self.client_cert_path, self.priv_key_path)
# Build headers
request_id = (
str(uuid.uuid4()) if client_request_id is None else str(client_request_id)
)
headers = {
"request-id": request_id,
"gateway-company-id": self.company_id,
"gateway-entity-id": self.entity_id,
}
if client_request_id is not None:
headers["client-request-id"] = str(client_request_id)
# Send request
logger.info(
"Sending WFRS Gateway API request. URL=[%s], RequestID=[%s]",
url,
request_id,
)
request_fn = getattr(requests, method)
resp = request_fn(url, auth=auth, cert=cert, headers=headers, **kwargs)
logger.info(
"WFRS Gateway API request returned. URL=[%s], RequestID=[%s], Status=[%s]",
url,
request_id,
resp.status_code,
)
# Check response for errors
if resp.status_code == 400:
resp_data = resp.json()
errors = []
for err in resp_data.get("errors", []):
exc = ValidationError(err["description"], code=err["error_code"])
errors.append(exc)
raise ValidationError(errors)
# Return response
return resp
def get_api_key(self):
# Check for a cached key
key_obj = self.get_cached_api_key()
if key_obj is None:
key_obj = self.generate_api_key()
self.store_cached_api_key(key_obj)
return key_obj
def get_cached_api_key(self):
# Try to get an API key from cache
encrypted_obj = cache.get(self.cache_key, version=self.cache_version)
if encrypted_obj is None:
return None
# Try to decrypt the object we got from cache
try:
key_obj = decrypt_pickle(encrypted_obj)
except Exception as e:
logger.exception(e)
return None
# Check if the key is expired
if key_obj.is_expired:
return None
# Return the key
return key_obj
def store_cached_api_key(self, key_obj):
# Pickle and encrypt the key object
encrypted_ob
|
j = encrypt_pickle(key_obj)
# Store it in Django's cache for later
cache.set(
self.cache_key, encrypted_obj, key_obj.ttl, version=self.cache_version
|
)
def generate_api_key(self):
url = "https://{host}/token".format(host=self.api_host)
auth = HTTPBasicAuth(self.consumer_key, self.consumer_secret)
cert = (self.client_cert_path, self.priv_key_path)
req_data = {
"grant_type": "client_credentials",
"scope": " ".join(self.scopes),
}
resp = requests.post(url, auth=auth, cert=cert, data=req_data)
resp.raise_for_status()
resp_data = resp.json()
expires_on = timezone.now() + timedelta(seconds=resp_data["expires_in"])
logger.info("Generated new WFRS API Key. ExpiresIn=[%s]", expires_on)
key_obj = WFRSAPIKey(api_key=resp_data["access_token"], expires_on=expires_on)
return key_obj
|
PythonProgramming/Pandas-Basics-with-2.7
|
pandas 8 - Standard Deviation.py
|
Python
|
mit
| 486
| 0.010288
|
import pandas
|
as pd
from pandas import DataFrame
from matplotlib import pyplot as plt
from matplotlib import style
style.use('ggplot')
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
#print df.head()
df['STD'] = pd.rolling_std(df['Close'], 25, min_periods=1)
ax1 = plt.subplot(2, 1, 1)
df['Close'].plot()
plt.ylabel('Close')
# do not do sharex first
ax2 = plt.subplot(2, 1, 2, sharex = ax1)
df['STD'].plot()
plt.ylabel('Standard
|
Deviation')
plt.show()
|
camptocamp/QGIS
|
python/plugins/processing/ProcessingPlugin.py
|
Python
|
gpl-2.0
| 5,606
| 0.004281
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingPlugin.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing import interface
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import shutil
import inspect
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from processing.commander.CommanderWindow import CommanderWindow
from processing.core.Processing import Processing
from processing.tools import dataobjects
from processing.tools.system import *
from processing.gui.ProcessingToolbox import ProcessingToolbox
from processing.gui.HistoryDialog import HistoryDialog
from processing.gui.ConfigDialog import ConfigDialog
from processing.gui.ResultsDialog import ResultsDialog
from processing.modeler.ModelerDialog import ModelerDialog
import processing.resources_rc
cmd_folder = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
class ProcessingPlugin:
def __init__(self, iface):
interface.iface = iface
Processing.initialize()
def initGui(self):
self.commander = None
self.toolbox = ProcessingToolbox()
interface.iface.addDockWidget(Qt.RightDockWidgetArea, self.toolbox)
self.toolbox.hide()
Processing.addAlgListListener(self.toolbox)
self.menu = QMenu(interface.iface.mainWindow())
self.menu.setTitle(QCoreApplication.translate("Processing", "Processing"))
self.toolboxAction = self.toolbox.toggleViewAction()
self.toolboxAction.setIcon(QIcon(":/processing/images/alg.png"))
self.toolboxAction.setText(QCoreApplication.translate("Processing", "Toolbox"))
self.menu.addAction(self.toolboxAction)
self.modelerAction = QAction(QIcon(":/processing/images/model.png"),
QCoreApplication.translate("Processing", "Graphical modeler"),
interface.iface.mainWindow())
self.modelerAction.triggered.connect(self.openModeler)
self.menu.addAction(self.modelerAction)
self.historyAction = QAction(QIcon(":/processing/images/history.gif"),
QCoreApplication.translate("Processing", "History and log"),
interface.iface.mainWindow())
self.historyAction.triggered.connect(self.openHistory)
self.menu.addAction(self.historyAction)
sel
|
f.configAction = QAction(QIcon(":/processing/images/config.png"),
QCoreApplication.translate("Processing", "Options and configuration"),
interface.iface.mainWindow())
self.configAction.triggered.connect(self.openConfig)
self.menu.addAction(self.configAction)
se
|
lf.resultsAction = QAction(QIcon(":/processing/images/results.png"),
QCoreApplication.translate("Processing", "&Results viewer"),
interface.iface.mainWindow())
self.resultsAction.triggered.connect(self.openResults)
self.menu.addAction(self.resultsAction)
menuBar = interface.iface.mainWindow().menuBar()
menuBar.insertMenu(interface.iface.firstRightStandardMenu().menuAction(), self.menu)
self.commanderAction = QAction(QIcon(":/processing/images/commander.png"),
QCoreApplication.translate("Processing", "&Commander"),
interface.iface.mainWindow())
self.commanderAction.triggered.connect(self.openCommander)
self.menu.addAction(self.commanderAction)
interface.iface.registerMainWindowAction(self.commanderAction, "Ctrl+Alt+M")
def unload(self):
self.toolbox.setVisible(False)
self.menu.deleteLater()
#delete temporary output files
folder = tempFolder()
if QDir(folder).exists():
shutil.rmtree(folder, True)
interface.iface.unregisterMainWindowAction(self.commanderAction)
def openCommander(self):
if self.commander is None:
self.commander = CommanderWindow(interface.iface.mainWindow(), interface.iface.mapCanvas())
Processing.addAlgListListener(self.commander)
self.commander.prepareGui()
self.commander.show()
#dlg.exec_()
def openToolbox(self):
if self.toolbox.isVisible():
self.toolbox.hide()
else:
self.toolbox.show()
def openModeler(self):
dlg = ModelerDialog()
dlg.exec_()
if dlg.update:
self.toolbox.updateTree()
def openResults(self):
dlg = ResultsDialog()
dlg.exec_()
def openHistory(self):
dlg = HistoryDialog()
dlg.exec_()
def openConfig(self):
dlg = ConfigDialog(self.toolbox)
dlg.exec_()
|
zhaogaolong/oneFinger
|
alarm/admin.py
|
Python
|
apache-2.0
| 150
| 0
|
from django.contrib import admi
|
n
import models
# Register your models here.
admin.site.register(models.UserProfile)
admin.site
|
.register(models.Event)
|
dotmanila/pyxbackup
|
tests/all_test.py
|
Python
|
gpl-2.0
| 835
| 0.019162
|
#!/usr/bin/python
import sys
import pyxbackup as pxb
import pytest
def test__parse_port_param():
assert(pxb._parse_port_param('27017,27019')) == True
assert(pxb.xb_opt_remote_nc_port_min) == 27017
assert(pxb.xb_opt_remote_nc_port_max) == 27019
assert(pxb._parse_port_param('27017, 27019')) == True
assert(pxb._parse_port_param('abcde, 27019')) == False
assert(pxb._parse_port_par
|
am('abcde, ')) == False
assert(pxb._parse_port_param('9999, ')) == False
assert(pxb._parse_port_param('9999 ')) == False
assert(pxb._parse_port_param('9999')) == True
assert(pxb.xb_opt_remote_nc_port_min) == 9999
assert(pxb.xb_opt_remote_nc_port_max) == 9999
def test__xb_version():
assert(pxb._xb_version(verstr = '2.2.13')) == [2, 2, 13]
asser
|
t(pxb._xb_version(verstr = '2.2.13', tof = True)) == 2.2
|
sileht/pifpaf
|
pifpaf/drivers/ceph.py
|
Python
|
apache-2.0
| 5,134
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import pkg_resources
from pifpaf import drivers
class CephDriver(drivers.Driver):
DEFAULT_PORT = 6790
def __init__(self, port=DEFAULT_PORT,
**kwargs):
"""Create a new Ceph cluster."""
super(CephDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for Ceph Monitor"},
]
def _setUp(self):
super(CephDriver, self)._setUp()
self._ensure_xattr_support()
fsid = str(uuid.uuid4())
conffile = os.path.join(self.tempdir, "ceph.conf")
mondir = os.path.join(self.tempdir, "mon", "ceph-a")
osddir = os.path.join(self.tempdir, "osd", "ceph-0")
os.makedirs(mondir)
os.makedirs(osddir)
_, version = self._exec(["ceph", "--version"], stdout=True)
version = version.decode("ascii").split()[2]
version = pkg_resources.parse_version(version)
if version < pkg_resources.parse_version("12.0.0"):
extra = """
mon_osd_nearfull_ratio = 1
mon_osd_full_ratio = 1
osd_failsafe_nearfull_ratio = 1
osd_failsafe_full_ratio = 1
"""
else:
extra = """
mon_allow_pool_delete = true
"""
# FIXME(sileht): check availible space on /dev/shm
# if os.path.exists("/dev/shm") and os.access('/dev/shm', os.W_OK):
# journal_path = "/dev/shm/$cluster-$id-journal"
# else:
journal_path = "%s/osd/$cluster-$id/journal" % self.tempdir
with open(conffile, "w") as f:
f.write("""[global]
fsid = %(fsid)s
# no auth for now
auth cluster required = none
auth service required = none
auth client required = none
## no replica
osd pool default size = 1
osd pool default min size = 1
osd crush chooseleaf type = 0
## some default path change
run dir = %(tempdir)s
pid file = %(tempdir)s/$type.$id.pid
admin socket = %(tempdir)s/$cluster-$name.asok
mon data = %(tempdir)s/mon/$cluster-$id
osd data = %(tempdir)s/osd/$cluster-$id
osd journal = %(journal_path)s
log file = %(tempdir)s/$cluster-$name.log
mon cluster log file = %(tempdir)s/$cluster.log
# Only omap to have same behavior for all filesystems
filestore xattr use omap = True
# workaround for ext4 and last Jewel version
osd max object name len = 256
osd max object namespace len = 64
osd op threads = 10
filestore max sync interval = 10001
filestore min sync interval = 10000
%(extra)s
journal_aio = false
journal_dio = false
journal zero on create = false
journal block align = false
# run as file owner
setuser match path = %(tempdir)s/$type/$cluster-$id
[mon.a]
host = localhost
mon addr = 127.0.0.1:%(port)d
""" % dict(fsid=fsid, tempdir=self.tempdir, port=self.port,
journal_path=journal_path, extra=extra)) # noqa
ceph_opts = ["ceph", "-c", conffile]
mon_opts = ["ceph-mon", "-c", conffile, "--id", "a", "-d"]
osd_opts = ["ceph-osd", "-c", conffile, "--id", "0", "-d",
"-m", "127.0.0.1:%d" % self.port]
# Create and start monitor
self._exec(mon_opts + ["--mkfs"])
self._touch(os.path.join(mondir, "done"))
mon, _ = self._exec(
mon_opts,
wait_for_line=r"mon.a@0\(leader\).mds e1 print_map")
# Create and start OSD
self._exec(ceph_opts + ["osd", "create"])
self._exec(ceph_opts + ["osd", "crush", "add", "osd.0", "1",
"root=default"])
self._exec(osd_opts + ["--mkfs", "--mkjournal"])
|
if version < pkg_resources.parse_version("0.94.0"):
wait_for_line = "journal close"
else:
wait_for_line = "done with init"
osd, _ = self._exec(osd_opts, wait_for_line=wait_for_line)
if version >= pkg_resources.parse_version("12.0.0"):
self._exec(ceph_opts + ["osd", "set-full-ratio", "0.95"])
self._exec(ceph_opt
|
s + ["osd", "set-backfillfull-ratio", "0.95"])
self._exec(ceph_opts + ["osd", "set-nearfull-ratio", "0.95"])
# Wait it's ready
out = b""
while b"HEALTH_OK" not in out:
ceph, out = self._exec(ceph_opts + ["health"], stdout=True)
if b"HEALTH_ERR" in out:
raise RuntimeError("Fail to deploy ceph")
self.putenv("CEPH_CONF", conffile, True)
self.putenv("CEPH_CONF", conffile)
self.putenv("URL", "ceph://localhost:%d" % self.port)
|
codemedic/retext
|
retext.py
|
Python
|
gpl-3.0
| 1,988
| 0.017606
|
#!/usr/bin/env python3
# ReText
#
|
Copyright 2011-2012 Dmitry Shachnev
# This program is
|
free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import sys
import signal
from ReText import *
from ReText.window import ReTextWindow
def main():
app = QApplication(sys.argv)
app.setOrganizationName("ReText project")
app.setApplicationName("ReText")
RtTranslator = QTranslator()
for path in datadirs:
if RtTranslator.load('retext_'+QLocale.system().name(), path+'/locale'):
break
QtTranslator = QTranslator()
QtTranslator.load("qt_"+QLocale.system().name(), QLibraryInfo.location(QLibraryInfo.TranslationsPath))
app.installTranslator(RtTranslator)
app.installTranslator(QtTranslator)
if settings.contains('appStyleSheet'):
stylename = readFromSettings('appStyleSheet', str)
sheetfile = QFile(stylename)
sheetfile.open(QIODevice.ReadOnly)
app.setStyleSheet(QTextStream(sheetfile).readAll())
sheetfile.close()
window = ReTextWindow()
window.show()
fileNames = [QFileInfo(arg).canonicalFilePath() for arg in sys.argv[1:]]
for fileName in fileNames:
try:
fileName = QString.fromUtf8(fileName)
except:
# Not needed for Python 3
pass
if QFile.exists(fileName):
window.openFileWrapper(fileName)
signal.signal(signal.SIGINT, lambda sig, frame: window.close())
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
SKIRT/PTS
|
modeling/build/construct.py
|
Python
|
agpl-3.0
| 25,266
| 0.00661
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.build.construct Contains functions to construct ski files from model definitions.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.log import log
from ...core.filter.filter import parse_filter
from ...core.tools.introspection import skirt_main_version, has_skirt
from ...core.tools.stringify import tostr
from ...core.filter.filter import Filter
from ...core.tools import types
# -----------------------------------------------------------------
# Check SKIRT version
if not has_skirt(): version_number = 8
else: version_number = skirt_main_version()
# Set flags
if version_number == 7:
skirt7 = True
skirt8 = False
elif version_number == 8:
skirt7 = False
skirt8 = True
else: raise RuntimeError("Invalid SKIRT version number")
# -----------------------------------------------------------------
def add_stellar_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adding stellar component '" + name + "' to the ski file ...")
# THIS HAS TO COME FIRST!!
# If an input map is required
if "map_path" in component: filename = set_stellar_input_map(name, component)
else: filename = None
# NEW COMPONENT OR ADJUST EXISTING
if title is not None and not ski.has_stellar_component(title): add_new_stellar_component(ski, name, component, title=title)
else: adjust_stellar_component(ski, name, component, title=title)
# Return the input filename
return filename
# -----------------------------------------------------------------
def add_new_stellar_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugging
log.debug("Adding new stellar component '" + name + "' to the ski file ...")
# From properties
if component.properties is not None:
# Check title
if title is None: log.warning("Title for the component '" + name + "' is not given")
# Add component
ski.add_stellar_component(component.properties, title=title)
return
# Initialize properties
geometry = None
geometry_type = None
geometry_properties = None
sed_type = None
sed_properties = None
normalization_type = None
normalization_properties = None
luminosities = [1]
sed_template = None
age = None
metallicity = None
compactness = None
pressure = None
covering_factor = None
luminosity = None
filter_or_wavelength = None
# Set properties of the component
if "model" in component: geometry = component.model
elif "deprojection" in component: geometry = component.deprojection
# Parameters are defined
if component.parameters is not None:
# Check if this is a new component (geometry not defined above): add geometry, SED and normalization all at once
if "geometry" in component.parameters:
# Get class names
geometry_type = component.parameters.geometry
sed_type = component.parameters.sed
normalization_type = component.parameters.normalization
# Get properties for each of the three classes
geometry_properties = component.properties["geometry"]
sed_properties = component.properties["sed"]
normalization_properties = component.properties["normalizat
|
ion"]
# Component with MAPPINGS template (geometry defined above)
elif "sfr" in component.parameters: #set_stellar_component_mappings(ski, component)
# Set template for MAPPINGS
sed_template =
|
"Mappings"
# Get SED properties
metallicity = component.parameters.metallicity
compactness = component.parameters.compactness
pressure = component.parameters.pressure
covering_factor = component.parameters.covering_factor
# Get normalization
fltr = parse_filter(component.parameters.filter)
luminosity = component.parameters.luminosity
# Determine the normalization wavelength
filter_or_wavelength = fltr.center
# Existing component, no MAPPINGS
else: # set_stellar_component(ski, component)
# Get SED properties
sed_template = component.parameters.template
age = component.parameters.age
metallicity = component.parameters.metallicity
# Get normalization
luminosity = component.parameters.luminosity
# Determine the normalization wavelength
if "wavelength" in component.parameters: wavelength = component.parameters.wavelength
elif "filter" in component.parameters:
fltr = parse_filter(component.parameters.filter)
wavelength = fltr.wavelength
else: raise ValueError("Neither wavelength nor filter is defined in the component parameters")
# Set the normalization to the wavelength
filter_or_wavelength = wavelength
# Check whether title is defined
if title is None: log.warning("Title for the component '" + name + "' is not defined")
# Set normalization type
if normalization_type is None:
if filter_or_wavelength is None: raise ValueError("Cannot determine normalization type")
if isinstance(filter_or_wavelength, Filter): normalization_type = "LuminosityStellarCompNormalization"
elif types.is_length_quantity(filter_or_wavelength): normalization_type = "SpectralLuminosityStellarCompNormalization"
else: normalization_type = "BolLuminosityStellarCompNormalization" #raise ValueError("Unrecognized filter of wavelength of type '" + str(type(filter_or_wavelength)))
# Set stellar component properties
properties = dict()
properties["geometry"] = geometry
properties["geometry_type"] = geometry_type
properties["geometry_properties"] = geometry_properties
properties["sed_type"] = sed_type
properties["sed_properties"] = sed_properties
properties["normalization_type"] = normalization_type
properties["normalization_properties"] = normalization_properties
properties["luminosities"] = luminosities
properties["sed_template"] = sed_template
properties["age"] = age
properties["metallicity"] = metallicity
properties["compactness"] = compactness
properties["pressure"] = pressure
properties["covering_factor"] = covering_factor
properties["luminosity"] = luminosity
properties["filter_or_wavelength"] = filter_or_wavelength
# Show properties
log.debug("")
log.debug("Stellar component properties:")
log.debug("")
for label in properties:
if label == "geometry":
log.debug(" - geometry:")
for parameter in properties[label]:
value = properties[label][parameter]
if value is None: continue
log.debug(" * " + parameter + ": " + tostr(value))
else:
value = properties[label]
if value is None: continue
log.debug(" - " + label + ": " + tostr(value))
log.debug("")
# Create new component
ski.create_new_stellar_component(title, **properties)
# -----------------------------------------------------------------
def adjust_stellar_component(ski, name, component, title=None):
"""
This function ...
:param ski:
:param name:
:param component:
:param title:
:return:
"""
# Debugg
|
insiderr/insiderr-app
|
app/modules/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py
|
Python
|
gpl-3.0
| 19,942
| 0.001254
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
import logging
from modules.oauthlib import common
from modules.oauthlib.uri_validate import is_absolute_uri
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class AuthorizationCodeGrant(GrantTypeBase):
"""`Authorization Code Grant`_
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI ---->| |
|
|
User- | | Authorization |
| Agent -+----(B)-- User authenticates --->| Server |
| | | |
| -+----(C)-- Authorization Code ---<| |
+-|----|---+ +---------------+
| | ^ v
(A) (
|
C) | |
| | | |
^ v | |
+---------+ | |
| |>---(D)-- Authorization Code ---------' |
| Client | & Redirection URI |
| | |
| |<---(E)----- Access Token -------------------'
+---------+ (w/ Optional Refresh Token)
Note: The lines illustrating steps (A), (B), and (C) are broken into
two parts as they pass through the user-agent.
Figure 3: Authorization Code Flow
The flow illustrated in Figure 3 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier (in the request or during
client registration). The redirection URI includes an
authorization code and any local state provided by the client
earlier.
(D) The client requests an access token from the authorization
server's token endpoint by including the authorization code
received in the previous step. When making the request, the
client authenticates with the authorization server. The client
includes the redirection URI used to obtain the authorization
code for verification.
(E) The authorization server authenticates the client, validates the
authorization code, and ensures that the redirection URI
received matches the URI used to redirect the client in
step (C). If valid, the authorization server responds back with
an access token and, optionally, a refresh token.
.. _`Authorization Code Grant`: http://tools.ietf.org/html/rfc6749#section-4.1
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_authorization_code(self, request):
"""Generates an authorization grant represented as a dictionary."""
grant = {'code': common.generate_token()}
if hasattr(request, 'state') and request.state:
grant['state'] = request.state
log.debug('Created authorization code grant %r for request %r.',
grant, request)
return grant
def create_authorization_response(self, request, token_handler):
"""
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "code".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The client directs the resource owner to the constructed URI using an
HTTP redirection response, or by other means available to it via the
user-agent.
:param request: oauthlib.commong.Request
:param token_handler: A token handler instace, for example of type
oauthlib.oauth2.BearerToken.
:returns: headers, body, status
:raises: FatalClientError on invalid redirect URI or client id.
ValueError if scopes are not set on the request object.
A few examples::
>>> from your_validator import your_validator
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F')
>>> from oauthlib.common import Request
>>> from oauthlib.oauth2 import AuthorizationCodeGrant, BearerToken
>>> token = BearerToken(your_validator)
>>> grant = AuthorizationCodeGrant(your_validator)
>>> grant.create_authorization_response(request, token)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/grant_types.py", line 513, in create_authorization_response
raise ValueError('Scopes must be set on post auth.')
ValueError: Scopes must be set on post auth.
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?error=invalid_request&error_description=Missing+response_type+parameter.', None, None, 400)
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F'
... '&response_type=code')
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?code=u3F05aEObJuP2k7DordviIgW5wl52N', None, None, 200)
>>> # If the client id or redirect uri fails validation
>>> grant.create_authorization_response(request, token)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/
|
andreas-koukorinis/ambhas
|
ambhas/amsr2.py
|
Python
|
lgpl-2.1
| 3,408
| 0.010857
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 24 11:30:24 2013
@author: Sat Kumar Tomer
@email: satkumartomer@gmail.com
@website: www.ambhas.com
"""
import numpy as np
import h5py
import os
import datetime as dt
def extract_smc(h5_file, lat, lon):
"""
Extract Soil Moisture Content from AMSR2 h5 products
Input:
h5_file: a single file name
lat: latitude, either a single value or min,max limits
eg.
lat = 12
lat = [10,15]
lon: longitude, either a single value or min,max limits
eg. as for lat
"""
res = 0.1
######### convert lat, lon into indices ##############
# min max are given
min_max = type(lat) is list
if min_max:
lat_min = lat[0]
lat_max = lat[1]
i_lat_min = int(np.floor((90-lat_min)/res))
i_lat_max = int(np.floor((90-lat_max)/res))
lon_min = lon[0]
lon_max = lon[1]
if lon_min<0: lon_min += 360
if lon_max<0: lon_max += 360
j_lon_min = int(np.floor(lon_min/res))
j_lon_max = int(np.floor(lon_max/res))
else: # if only single value of lat, lon is given
i_lat = np.floor((90-lat)/res)
i_lat = i_lat.astype(int)
lon1 = np.copy(lon)
if lon1<0:
lon1 += 360
j_lon = np.floor(lon1/res)
j_lon = j_lon.astype(int)
# read the data
if type(h5_file) is str:
f = h5py.File(h5_file, "r")
if min_max:
smc = f["Geophysical Data"][i_lat_max:i_lat_min+1, j_lon_min:j_lon_max+1,0]
else:
smc = f["Geophysical Data"][i_lat, j_lon,0]
elif type(h5_file) is list:
n = len(h5_file)
if min_max:
nlat = i_lat_min+1 - i_lat_max
nlon = j_lon_max+1 - j_lon_min
smc = np.empty((n, nlat, nlon))
for h5_f,i in zip(h5_file, range(n)):
f = h5py.File(h5_f, "r")
smc[i,:,:] = f["Geophysical Data"][i_lat_max:i_lat_min+1, j_lon_min:j_lon_max+1,0]
f.close()
else:
smc = np.empty(n,)
for h5_f,i in zip(h5_file, range(n)):
f = h5py.File(h5_f, "r")
smc[i] = f["Geophysical Data"][i_lat, j_lon,0]
f.close()
try:
smc[smc<0] = np.nan
except:
if smc <0: smc = np.nan
return smc
def extract_dates(h5_file):
h5_dates = []
for h5_f in h5_file:
foo = os.path.basename(h5_f)[7:15]
h5_dates.append(dt.datetime.strptime(foo
|
, '%Y%m%d'))
return h5_dates
def extract_orbit(h5_file):
asc = []
for h5_f in h5_file:
f = h5py.File(h5_f, "r")
if f.attrs['OrbitDirection'][0] == 'Ascending':
asc.append(True)
elif f.attrs['OrbitDirection'][0] == 'Descending':
asc.append(False)
else:
asc.append(None)
f.close()
retur
|
n asc
if __name__ == "__main__":
import glob
h5_file = '/home/tomer/amsr2/data/h5/GW1AM2_20130722_01D_EQMD_L3SGSMCHA1100100.h5'
h5_file = glob.glob('/home/tomer/amsr2/data/h5/GW1AM2_201?????_01D*.h5')
h5_file.sort()
h5_file = h5_file[:5]
lat = [8, 38]
lon = [68, 98]
sm = extract_smc(h5_file, lat, lon)
sm_dates = extract_dates(h5_file)
asc = extract_orbit(h5_file)
|
open-power-ref-design/opsmgr
|
plugins/devices/powernode/opsmgr/plugins/devices/powernode/PowerNodePlugin.py
|
Python
|
apache-2.0
| 5,846
| 0.002737
|
# Copyright 2016, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from opsmgr.common import constants
from opsmgr.common import exceptions
from opsmgr.common.utils import entry_exit, execute_command
from opsmgr.inventory.interfaces import IManagerDevicePlugin
class PowerNodePlugin(IManagerDevicePlugin.IManagerDevicePlugin):
IPMI_TOOL = "/usr/local/bin/ipmitool"
def __init__(self):
self.host = None
self.userid = None
self.password = None
self.version = None
self.machine_type_model = ""
self.serial_number = ""
@staticmethod
def get_type():
return "PowerNode"
@staticmethod
def get_web_url(host):
return "https://" + host
@staticmethod
def get_capabilities():
return [constants.MONITORING_CAPABLE]
@entry_exit(exclude_index=[0, 3, 4], exclude_name=["self", "password", "ssh_key_string"])
def connect(self, host, userid, password=None, ssh_key_string=None):
"""connect to the BMC and store the mtm and serial number
"""
_method_ = "PowerNodePlugin.connect"
self.host = host
self.userid = userid
self.password = password
if ssh_key_string is not None:
raise exceptions.AuthenticationException("SSH Key Authentication "
"is not supported for PowerNode devices")
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H",
host, "-U", userid, "-P", password, "fru", "print"]
(_rc, stdout, stderr) = execute_command(" ".join(cmd_parms))
logging.warning("%s::ipmi query standard error output %s", _method_, stderr)
for line in stderr:
if "Unable to establish IPMI" in line:
raise exceptions.ConnectionException(
"Unable to connect to the device using IPMI")
for line in stdout:
if "Chassis Part Number" in line:
self.machine_type_model = line.split(":")[1].strip()
elif "Chassis Serial" in line:
self.serial_number = line.split(":")[1].strip()
@entry_exit(exclude_index=[0], exclude_name=["self"])
def disconnect(self):
pass
@entry_exit(exclude_index=[0], exclude_name=["self"])
def get_machine_type_model(self):
return self.machine_type_model
@entry_exit(exclude_index=[0], exclude_name=["self"])
def get_serial_number(self):
return self.serial_number
@entry_exit(exclude_index=[0], exclude_name=["self"])
def get_version(self):
_method_ = "PowerNodePlugin.get_version"
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host,
"-U", self.userid, "-P", self.password, "mc", "info"]
(rc, stdout, stderr) = execute_command(" ".join(cmd_parms))
if rc != 0:
logging.warning("%s::ipmi query failed with output %s", _method_, stderr)
raise exceptions.DeviceException("ipmi query failed with output %s" % stderr)
for line in stdout:
if "Firmware Revision" in line:
self.version = line.split(":")[1].strip()
break
return self.version
@entry_exit(exclude_index=[0], ex
|
clude_name=["self"])
def get_architecture(self):
return None
@entry_exit(exclude_index=[0, 1], exclude_name=["self", "new_password"])
def change_device_password(self, new_password):
"""Update the password of the ipmi default user on the BMC of the openpower server.
"""
_method_ = "PowerNodePlugin.change_device_password"
user_number = self._get_user_numbe
|
r()
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host, "-U", self.userid,
"-P", self.password, "user", "set", "password", user_number, new_password]
(rc, _stdout, stderr) = execute_command(" ".join(cmd_parms))
if rc != 0:
logging.error("%s::ipmi password change failed with output %s", _method_, stderr)
raise exceptions.DeviceException("ipmi password change failed with output %s" % stderr)
@entry_exit(exclude_index=[0], exclude_name=["self"])
def _get_user_number(self):
"""Each user in IPMI has a number associated with that is used on the command line
when modifying a user. This method will find the number associated with the userid
"""
_method_ = "PowerNodePlugin._get_user_number"
user_id = None
cmd_parms = [self.IPMI_TOOL, "-I", "lanplus", "-H", self.host, "-U", self.userid,
"-P", self.password, "user", "list"]
(rc, stdout, stderr) = execute_command(" ".join(cmd_parms))
if rc != 0:
logging.warning("%s::ipmi query failed with output %s", _method_, stderr)
raise exceptions.DeviceException("ipmi query failed with output %s" % stderr)
for line in stdout:
ids = line.split()[0]
user = line.split()[1]
if user == self.userid:
user_id = ids
break
if user_id:
return user_id
else:
raise exceptions.DeviceException("Failed to determine the id for the user: %s" %
self.userid)
|
matty-jones/MorphCT
|
morphct/definitions.py
|
Python
|
gpl-3.0
| 419
| 0.002387
|
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
TEST_ROOT = os.path.join(os.path.dirname(PR
|
OJECT_ROOT), "tests")
SINGLE_ORCA_RUN_FILE = os.path.join(PROJECT_ROOT, "code", "single_core_run_orca.py")
SINGLE_RUN_MOB_KMC_FILE = os.path.join(
PROJECT_ROOT, "code", "single_core_run_
|
mob_KMC.py"
)
SINGLE_RUN_DEVICE_KMC_FILE = os.path.join(
PROJECT_ROOT, "code", "single_core_run_device_KMC.py"
)
|
lukleh/TwistedBot
|
twistedbot/botentity.py
|
Python
|
mit
| 16,437
| 0.000973
|
import math
import config
import utils
import packets
import logbot
import fops
import blocks
import behavior_tree as bt
from axisbox import AABB
log = logbot.getlogger("BOT_ENTITY")
class BotObject(object):
def __init__(self):
self.velocities = utils.Vector(0.0, 0.0, 0.0)
self.direction = utils.Vector2D(0, 0)
self._x = 0
self._y = 0
self._z = 0
self.stance_diff = config.PLAYER_EYELEVEL
self.pitch = None
self.yaw = None
self.on_ground = False
self.is_collided_horizontally = False
self.horizontally_blocked = False
self.action = 2 # norm
|
al
self._action = self.action
self.is_jumping = False
self.hold_position_flag = True
def set_xyz(self, x, y, z):
self._x = x
self._y = y
self._z = z
self._aabb = AABB.from_player_coords(self.position)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
@property
def position(self):
return utils.Vector(self.x, self.y, s
|
elf.z)
@property
def position_grid(self):
return utils.Vector(self.grid_x, self.grid_y, self.grid_z)
@property
def position_eyelevel(self):
return utils.Vector(self.x, self.y_eyelevel, self.z)
@property
def y_eyelevel(self):
return self.y + config.PLAYER_EYELEVEL
@property
def stance(self):
return self.y + self.stance_diff
@property
def grid_x(self):
return utils.grid_shift(self.x)
@property
def grid_y(self):
return utils.grid_shift(self.y)
@property
def grid_z(self):
return utils.grid_shift(self.z)
@property
def aabb(self):
return self._aabb
@aabb.setter
def aabb(self, v):
raise Exception('setting bot aabb')
class BotEntity(object):
def __init__(self, world, name):
self.world = world
self.name = name
self.bot_object = BotObject()
self.eid = None
self.chunks_ready = False
self.ready = False
self.i_am_dead = False
self.location_received = False
self.check_location_received = False
self.spawn_point_received = False
self.behavior_tree = bt.BehaviorTree(self.world, self)
def on_connection_lost(self):
if self.location_received:
self.location_received = False
self.chunks_ready = False
def new_location(self, x, y, z, stance, grounded, yaw, pitch):
self.bot_object.set_xyz(x, y, z)
self.bot_object.stance_diff = stance - y
self.bot_object.on_ground = grounded
self.bot_object.yaw = yaw
self.bot_object.pitch = pitch
self.bot_object.velocities = utils.Vector(0.0, 0.0, 0.0)
self.check_location_received = True
if self.location_received is False:
self.location_received = True
if not self.in_complete_chunks(self.bot_object):
log.msg("Server sent me into incomplete chunks, will wait until they load up.")
self.ready = False
def in_complete_chunks(self, b_obj):
return self.world.grid.aabb_in_complete_chunks(b_obj.aabb)
def tick(self):
if self.location_received is False:
return
if not self.ready:
self.ready = self.in_complete_chunks(self.bot_object) and self.spawn_point_received
if not self.ready:
return
self.move(self.bot_object)
self.bot_object.direction = utils.Vector2D(0, 0)
self.send_location(self.bot_object)
self.send_action(self.bot_object)
self.stop_sneaking(self.bot_object)
if not self.i_am_dead:
utils.do_now(self.behavior_tree.tick)
def send_location(self, b_obj):
self.world.send_packet("player position&look", {
"position": packets.Container(x=b_obj.x, y=b_obj.y, z=b_obj.z,
stance=b_obj.stance),
"orientation": packets.Container(yaw=b_obj.yaw, pitch=b_obj.pitch),
"grounded": packets.Container(grounded=b_obj.on_ground)})
def send_action(self, b_obj):
"""
sneaking, not sneaking, leave bed, start sprinting, stop sprinting
"""
if b_obj.action != b_obj._action:
b_obj.action = b_obj._action
self.world.send_packet("entity action", {"eid": self.eid, "action": b_obj._action})
def turn_to_point(self, b_obj, point):
if point.x == b_obj.x and point.z == b_obj.z:
return
yaw, pitch = utils.yaw_pitch_between(point, b_obj.position_eyelevel)
if yaw is None or pitch is None:
return
b_obj.yaw = yaw
b_obj.pitch = pitch
def turn_to_direction(self, b_obj, x, y, z):
if x == 0 and z == 0:
return
yaw, pitch = utils.vector_to_yaw_pitch(x, y, z)
b_obj.yaw = yaw
b_obj.pitch = pitch
def turn_to_vector(self, b_obj, vect):
if vect.x == 0 and vect.z == 0:
return
yaw, pitch = utils.vector_to_yaw_pitch(vect.x, vect.y, vect.z)
b_obj.yaw = yaw
b_obj.pitch = pitch
def clip_abs_velocities(self, b_obj):
if abs(b_obj.velocities.x) < 0.005: # minecraft value
b_obj.velocities.x = 0
if abs(b_obj.velocities.y) < 0.005: # minecraft value
b_obj.velocities.y = 0
if abs(b_obj.velocities.z) < 0.005: # minecraft value
b_obj.velocities.z = 0
def clip_ladder_velocities(self, b_obj):
if self.is_on_ladder(b_obj):
if b_obj.velocities.y < -0.15:
b_obj.velocities.y = -0.15
if abs(b_obj.velocities.x) > 0.15:
b_obj.velocities.x = math.copysign(0.15, b_obj.velocities.x)
if abs(b_obj.velocities.z) > 0.15:
b_obj.velocities.z = math.copysign(0.15, b_obj.velocities.z)
if self.is_sneaking(b_obj) and b_obj.velocities.y < 0:
b_obj.velocities.y = 0
def handle_water_movement(self, b_obj):
is_in_water = False
water_current = utils.Vector(0, 0, 0)
bb = b_obj.aabb.expand(-0.001, -0.401, -0.001)
top_y = utils.grid_shift(bb.max_y + 1)
for blk in self.world.grid.blocks_in_aabb(bb):
if isinstance(blk, blocks.BlockWater):
if top_y >= (blk.y + 1 - blk.height_percent):
is_in_water = True
water_current = blk.add_velocity_to(water_current)
if water_current.size > 0:
water_current.normalize()
wconst = 0.014
water_current = water_current * wconst
b_obj.velocities = b_obj.velocities + water_current
return is_in_water
def handle_lava_movement(self, b_obj):
for blk in self.world.grid.blocks_in_aabb(
b_obj.aabb.expand(-0.1,
-0.4,
-0.1)):
if isinstance(blk, blocks.BlockLava):
return True
return False
def move_collisions(self, b_obj, vx, vy, vz):
if self.is_in_web(b_obj):
vx *= 0.25
vy *= 0.05000000074505806
vz *= 0.25
b_obj.velocities.x = 0
b_obj.velocities.y = 0
b_obj.velocities.z = 0
aabbs = self.world.grid.collision_aabbs_in(b_obj.aabb.extend_to(vx, vy, vz))
b_bb = b_obj.aabb
dy = vy
if not fops.eq(vy, 0):
for bb in aabbs:
dy = b_bb.calculate_axis_offset(bb, dy, 1)
b_bb = b_bb.offset(dy=dy)
dx = vx
if not fops.eq(vx, 0):
for bb in aabbs:
dx = b_bb.calculate_axis_offset(bb, dx, 0)
b_bb = b_bb.offset(dx=dx)
dz = vz
if not fops.eq(vz, 0):
for bb in aabbs:
dz = b_bb.calculate_axis_offset(bb, dz, 2)
b_bb = b_bb.offset(dz=dz)
if vy != dy and vy < 0 and (dx != vx or dz != vz):
|
npalko/uRPC
|
python/urpc/__init__.py
|
Python
|
bsd-3-clause
| 78
| 0.012821
|
import Decimal_pb2
import Log_pb2
import uRPC_pb2
import client
impo
|
rt
|
server
|
maurodoglio/taar
|
tests/test_hybrid_recommender.py
|
Python
|
mpl-2.0
| 3,508
| 0.000285
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Test cases for the TAAR Hybrid recommender
"""
from taar.recommenders.hybrid_recommender import CuratedRecommender
from taar.recommenders.hybrid_recommender import HybridRecommender
from taar.recommenders.ensemble_recommender import EnsembleRecommender
from taar.recommenders.s3config import TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY
# from taar.recommenders.hybrid_recommender import ENSEMBLE_WEIGHTS
from .test_ensemblerecommender import install_mock_ensemble_data
from .mocks import MockRecommenderFactory
import json
from moto import mock_s3
import boto3
def install_no_curated_data(ctx):
ctx = ctx.child()
conn = boto3.resource("s3", region_name="us-west-2")
conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET)
conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(Body="")
return ctx
def install_mock_curated_data(ctx):
mock_data = []
for i in range(20):
mock_data.append(str(i) * 16)
ctx = ctx.child()
conn = boto3.resource("s3", region_name="us-west-2")
conn.create_bucket(Bucket=TAAR_WHITELIST_BUCKET
|
)
conn.Object(TAAR_WHITELIST_BUCKET, TAAR_WHITELIST_KEY).put(
Body=json.dumps(mock_data)
)
return ctx
def install_ensemble_fixtures(ctx):
ctx = install_mock_ensemble_data(ctx)
factory = MockRecommenderFactory()
ctx["recommender_factory"] = factory
ctx["recommender_map"] = {
"colla
|
borative": factory.create("collaborative"),
"similarity": factory.create("similarity"),
"locale": factory.create("locale"),
}
ctx["ensemble_recommender"] = EnsembleRecommender(ctx.child())
return ctx
@mock_s3
def test_curated_can_recommend(test_ctx):
ctx = install_no_curated_data(test_ctx)
r = CuratedRecommender(ctx)
# CuratedRecommender will always recommend something no matter
# what
assert r.can_recommend({})
assert r.can_recommend({"installed_addons": []})
@mock_s3
def test_curated_recommendations(test_ctx):
ctx = install_mock_curated_data(test_ctx)
r = CuratedRecommender(ctx)
# CuratedRecommender will always recommend something no matter
# what
for LIMIT in range(1, 5):
guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT)
# The curated recommendations should always return with some kind
# of recommendations
assert len(guid_list) == LIMIT
@mock_s3
def test_hybrid_recommendations(test_ctx):
# verify that the recommendations mix the curated and
# ensemble results
ctx = install_mock_curated_data(test_ctx)
ctx = install_ensemble_fixtures(ctx)
r = HybridRecommender(ctx)
# Test that we can generate lists of results
for LIMIT in range(4, 8):
guid_list = r.recommend({"client_id": "000000"}, limit=LIMIT)
# The curated recommendations should always return with some kind
# of recommendations
assert len(guid_list) == LIMIT
# Test that the results are actually mixed
guid_list = r.recommend({"client_id": "000000"}, limit=4)
# A mixed list will have two recommendations with weight > 1.0
# (ensemble) and 2 with exactly weight 1.0 from the curated list
assert guid_list[0][1] > 1.0
assert guid_list[1][1] > 1.0
assert guid_list[2][1] == 1.0
assert guid_list[3][1] == 1.0
|
cathywu/flow
|
tests/slow_tests/test_baselines.py
|
Python
|
mit
| 2,809
| 0
|
import unittest
import os
from flow.benchmarks.baselines.bottleneck0 import bottleneck0_baseline
from flow.benchmarks.baselines.bottleneck1 import bottleneck1_baseline
from flow.benchmarks.baselines.bottleneck2 import bottleneck2_baseline
from flow.benchmarks.baselines.figureeight012 import figure_eight_baseline
from flow.benchmarks.baselines.grid0 import grid0_baseline
from flow.benchmarks.baselines.grid1 import grid1_baseline
from flow.benchmarks.baselines.merge012 import merge_baseline
os.environ["TEST_FLAG"] = "True"
class TestBaselines(unittest.TestCase):
"""
Tests that the baselines in the benchmarks folder are running and
returning expected values (i.e. values that match those in the CoRL paper
reported on the website, or other).
"""
def test_bottleneck0(self):
"""
Tests flow/benchmark/baselines/bottleneck0.py
"""
# run the bottleneck to make sure it runs
bott
|
leneck0_baseli
|
ne(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_bottleneck1(self):
"""
Tests flow/benchmark/baselines/bottleneck1.py
"""
# run the bottleneck to make sure it runs
bottleneck1_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_bottleneck2(self):
"""
Tests flow/benchmark/baselines/bottleneck2.py
"""
# run the bottleneck to make sure it runs
bottleneck2_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_figure_eight(self):
"""
Tests flow/benchmark/baselines/figureeight{0,1,2}.py
"""
# run the bottleneck to make sure it runs
figure_eight_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_grid0(self):
"""
Tests flow/benchmark/baselines/grid0.py
"""
# run the bottleneck to make sure it runs
grid0_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_grid1(self):
"""
Tests flow/benchmark/baselines/grid1.py
"""
# run the bottleneck to make sure it runs
grid1_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
def test_merge(self):
"""
Tests flow/benchmark/baselines/merge{0,1,2}.py
"""
# run the bottleneck to make sure it runs
merge_baseline(num_runs=1, render=False)
# TODO: check that the performance measure is within some range
if __name__ == '__main__':
unittest.main()
|
h2020-westlife-eu/VRE
|
api/migrations/0010_auto_20160121_1536.py
|
Python
|
mit
| 4,758
| 0.002102
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0009_dummyprovider'),
]
operations = [
migrations.CreateModel(
name='ExternalCredentials',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('provider_name', models.CharField(max_length=1024)),
('username', models.CharField(max_length=1024)),
('password', models.CharField(max_length=1024)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortalForm',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
('template_name', models.CharField(max_length=1024)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortalFormGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=1024)),
('parent', models.ForeignKey(to='api.ExternalJobPortalFormGroup', null=True)),
('portal', models.ForeignKey(to='api.ExternalJobPortal')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalJobPortalSubmission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('data', models.TextField()),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(to='api.ExternalJobPortal')),
],
options={
'abstract': False,
|
},
),
migrations.CreateModel(
name='ExternalJobPortalSubmissionStateChange',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('state', models.CharField
|
(max_length=256, choices=[(b'EXTERNAL_SUBMISSION_RUNNING', b'Running'), (b'EXTERNAL_SUBMISSION_FAILED', b'FAILED'), (b'EXTERNAL_SUBMISSION_PENDING', b'Pending'), (b'EXTERNAL_SUBMISSION_PENDING_SUBMISSION', b'Submission in progress'), (b'EXTERNAL_SUBMISSION_SUCCESS', b'Succeeded')])),
('external_submission', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='externaljobportalform',
name='parent',
field=models.ForeignKey(to='api.ExternalJobPortalFormGroup', null=True),
),
migrations.AddField(
model_name='externaljobportalform',
name='portal',
field=models.ForeignKey(to='api.ExternalJobPortal'),
),
]
|
github/codeql
|
python/ql/test/3/query-tests/Classes/equals-hash/equals_hash.py
|
Python
|
mit
| 1,147
| 0.012206
|
#Equals and hash
class Eq(object):
def __init__(self, data):
self.data = data
def __eq__(self, other):
return self.data == other.data
class Ne(object):
def __init__(self, data):
self.data = data
def __ne__(self, other):
return self.data != other.data
class Hash(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return hash(self.data)
class Unhashable1(o
|
bject):
__hash__ = None
class EqOK1(Unhashable1)
|
:
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class Unhashable2(object):
#Not the idiomatic way of doing it, but not uncommon either
def __hash__(self):
raise TypeError("unhashable object")
class EqOK2(Unhashable2):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class ReflectiveNotEquals(object):
def __ne__(self, other):
return not self == other
class EqOK3(ReflectiveNotEquals, Unhashable1):
def __eq__(self, other):
return self.data == other.data
|
pferreir/indico-backup
|
bin/utils/db_log.py
|
Python
|
gpl-3.0
| 5,979
| 0.004014
|
# -*- coding: utf-8 -*-
# #
# #
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico; if not, see <http://www.gnu.org/licenses/>.
import argparse
import cPickle
import fcntl
import logging
import logging.handlers
import os
import pprint
import signal
import SocketServer
import struct
import sys
import termios
import textwrap
from threading import Lock
import sqlparse
from pygments import highlight
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.lexers.agile import PythonLexer, PythonTracebackLexer
from pygments.lexers.sql import SqlLexer
output_lock = Lock()
help_text = textwrap.dedent("""
To use this script, you need to add the following to your logging.conf:
[logger_db]
level=DEBUG
handlers=db
qualname=indico.db
propagate=0
[handler_db]
class=handlers.SocketHandler
level=DEBUG
args=('localhost', 9020)
Also add your new logger/handler to the loggers/handlers lists, e.g. like this:
[loggers]
keys=root,db
[handlers]
keys=indico,db,other,smtp
""").strip()
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
def handle(self):
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
size = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(size)
while len(chunk) < size:
chunk = chunk + self.connection.recv(size - len(chunk))
obj = cPickle.loads(chunk)
self.handle_log(obj)
def handle_log(self, obj):
sql_log_type = obj.get('sql_log_type')
if sql_log_type == 'start':
source = prettify_source(obj['sql_source'], self.server.traceback_frames) if obj['sql_source'] else None
statement = prettify_statement(obj['sql_statement'])
params = prettify_params(obj['sql_params']) if obj['sql_params'] else None
with output_lock:
if source:
print prettify_caption('Source')
print source
print
print prettify_caption('Statement')
print statement
if params:
print
print prettify_caption('Params')
print params
elif sql_log_type == 'end':
with output_lock:
print
print prettify_caption('Duration')
print ' {:.06f}s'.format(obj['sql_duration'])
print_linesep()
class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
allow_reuse_address = True
def __init__(self, host, port, handler=LogRecordStreamHandler, traceback_frames=1):
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.timeout = 1
self.traceback_frames = traceback_frames
def terminal_size():
h, w, hp, wp = struct.unpack('HHHH', fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
return w, h
def print_linesep():
print terminal_size()[0] * u'\N{BOX DRAWINGS LIGHT HORIZONTAL}'
def indent(msg, level=4):
indentation = level * ' '
return indentation + msg.replace('\n', '\n' + indentation)
def prettify_caption(caption):
return '\x1b[38;5;75;04m{}\x1b[0m'.format(caption)
def prettify_source(source, traceback_frames):
if not traceback_frames:
return None
msg = 'Traceback (most recent call last):\n'
frame_msg = textwrap.dedent("""
File "{}", line {}, in {}
{}\n""").strip()
msg += indent('\n'.join(frame_msg.format(*frame) for frame in source[:traceback_frames]), 2)
highlighted = highlight(msg, PythonTracebackLexer(), Terminal256Formatter(style='native'))
# Remove first line (just needed for PythonTracebackLexer)
highlighted = '\n'.join(highlighted.splitlines()[1:])
return indent(highlighted, 2).rstrip()
def prettify_statement(statement):
statement = sqlparse.format(statement, keyword_case='upper', reindent=True)
return indent(highlight(statement, SqlLexer(), Terminal256Formatter(style='native'))).rstri
|
p()
def prettify_params(args):
args = pprint.pformat(args)
return indent(highlight(args, PythonLexer(), Terminal256Formatter(style='native'))).rstrip()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='port', type=int, default=logging.handlers.DE
|
FAULT_TCP_LOGGING_PORT,
help='The port to bind the UDP listener to')
parser.add_argument('-t', dest='traceback_frames', type=int, default=1,
help='Number of stack frames to show (max. 3)')
parser.add_argument('--setup-help', action='store_true', help='Explain how to enable logging for script')
return parser.parse_args()
def sigint(*unused):
print '\rTerminating'
os._exit(1)
def main():
args = parse_args()
if args.setup_help:
print help_text
sys.exit(1)
signal.signal(signal.SIGINT, sigint)
print 'Listening on 127.0.0.1:{}'.format(args.port)
server = LogRecordSocketReceiver('localhost', args.port, traceback_frames=args.traceback_frames)
try:
server.serve_forever()
except KeyboardInterrupt:
print
if __name__ == '__main__':
main()
|
nagyistoce/devide.johannes
|
install_packages/ip_vtk58.py
|
Python
|
bsd-3-clause
| 9,776
| 0.004603
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import re
import shutil
import sys
import utils
BASENAME = "VTK"
GIT_REPO = "http://vtk.org/VTK.git"
GIT_TAG = "v5.8.0"
VTK_BASE_VERSION = "vtk-5.8"
# this patch does three things:
# 1. adds try/catch blocks to all python method calls in order
# to trap bad_alloc exceptions
# 2. implements my scheme for turning all VTK errors into Python exceptions
# by making use of a special output window class
# 3. gives up the GIL around all VTK calls. This is also necessary
# for 2 not to deadlock on multi-cores.
EXC_PATCH = "pyvtk580_tryexcept_and_pyexceptions.diff"
# fixes attributes in vtkproperty for shader use in python
VTKPRPRTY_PATCH = "vtkProperty_PyShaderVar.diff"
# recent segfault with vtk 5.6.1 and wxPython 2.8.11.0
# see here for more info:
# http://vtk.1045678.n5.nabble.com/wx-python-scripts-segfault-td1234471.html
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH = "wxvtkrwi_displayid_segfault.diff"
dependencies = ['CMake']
class VTK58(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s-build' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
self.exc_patch_src = os.path.join(config.patches_dir, EXC_PATCH)
self.exc_patch_dst = os.path.join(config.archive_dir, EXC_PATCH)
self.vtkprprty_patch_filename = os.path.join(config.patches_dir,
VTKPRPRTY_PATCH)
self.wxvtkrwi_displayid_segfault_patch_filename = os.path.join(
config.patches_dir,
WXVTKRWI_DISPLAYID_SEGFAULT_PATCH)
config.VTK_LIB = os.path.join(self.inst_dir, 'lib')
# whatever the case may be, we have to register VTK variables
if os.name == 'nt':
# on Win, inst/VTK/bin contains the so files
config.VTK_SODIR = os.path.join(self.inst_dir, 'bin')
# inst/VTK/lib/site-packages the VTK python package
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'site-packages')
else:
# on *ix, inst/VTK/lib contains DLLs
config.VTK_SODIR = os.path.join(
config.VTK_LIB, VTK_BASE_VERSION)
# on *ix, inst/lib/python2.5/site-packages contains the
# VTK python package
# sys.version is (2, 5, 0, 'final', 0)
config.VTK_PYTHON = os.path.join(
config.VTK_LIB, 'python%d.%d/site-packages' % \
sys.version_info[0:2])
# this contains the VTK cmake config (same on *ix and Win)
config.VTK_DIR = os.path.join(config.VTK_LIB, VTK_BASE_VERSION)
def get(self):
if os.path.exists(self.source_dir):
utils.output("VTK already checked out, skipping step.")
else:
utils.goto_archive()
ret = os.system("git clone %s %s" % (GIT_REPO, BASENAME))
if ret != 0:
utils.error("Could not clone VTK repo. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("git checkout %s" % (GIT_TAG,))
if ret != 0:
utils.error("Could not checkout VTK %s. Fix and try again." % (GIT_TAG,))
if not os.path.exists(self.exc_patch_dst):
utils.output("Applying EXC patch")
# we do this copy so we can see if the patch has been done yet or not
shutil.copyfile(self.exc_patch_src, self.exc_patch_dst)
os.chdir(self.source_dir)
# default git-generated patch, so needs -p1
ret = os.system(
"%s -p1 < %s" % (config.PATCH, self.exc_patch_dst))
if ret != 0:
utils.error(
"Could not apply EXC patch. Fix and try again.")
# # VTKPRPRTY PATCH
# utils.output("Applying VTKPRPRTY patch")
# os.chdir(os.path.join(self.source_dir, 'Rendering'))
# ret = os.system(
# "%s -p0 < %s" % (config.PATCH, self.vtkprprty_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply VTKPRPRTY patch. Fix and try again.")
# # WXVTKRWI_DISPLAYID_SEGFAULT patch
# utils.output("Applying VTKWXRWI_DISPLAYID_SEGFAULT patch")
# os.chdir(self.source_dir)
# # default git-generated patch, so needs -p1
# ret = os.system(
# "%s -p1 < %s" % (config.PATCH,
# self.wxvtkrwi_displayid_segfault_patch_filename))
# if ret != 0:
# utils.error(
# "Could not apply WXVTKRWI_DISPLAYID_SEGFAULT patch. Fix and try again.")
def unpack(self):
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("VTK build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
cmake_params = "-DBUILD_SHARED_LIBS=ON " \
"-DBUILD_TESTING=OFF " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DVTK_USE_TK=NO " \
"-DVTK_USE_METAIO=ON " \
"-DVTK_USE_PARALLEL=ON " \
"-DPYTHON_EXECUTABLE=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DPYTHON_INCLUDE_PATH=%s " \
"-DVTK_W
|
RAP_PYTHON=ON " % (self.inst_dir,
config.PYTHON_EXECUTABLE,
config.PYTHON_LIBRARY,
config.PYTHON_INCLUDE_PATH)
ret = utils.cmake_command(self.build_dir, self.sourc
|
e_dir,
cmake_params)
if ret != 0:
utils.error("Could not configure VTK. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir,
'bin/libvtkWidgetsPython.so')
nt_file = os.path.join(self.build_dir, 'bin', config.BUILD_TARGET,
'vtkWidgetsPythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('VTK.sln')
if ret != 0:
utils.error("Error building VTK. Fix and try again.")
def install(self):
posix_file = os.path.join(self.inst_dir, 'bin/vtkpython')
nt_file = os.path.join(self.inst_dir, 'bin', 'vtkpython.exe')
if utils.file_exists(posix_file, nt_file):
utils.output("VTK already installed. Skipping build step.")
else:
# python 2.5.2 setup.py complains that this does not exist
# with VTK PV-3-2-1. This is only on installations with
# EasyInstall / Python Eggs, then the VTK setup.py uses
# EasyInstall and not standard distutils. gah!
# just tested with VTK 5.8.0 and Python 2.7.2
# it indeed installs VTK_PYTHON/VTK-5.8.0-py2.7.egg
# but due to the site.py and easy-install.pth magic in there,
# adding VTK_PYTHON to the PYTHONPATH still works. We can keep
# pip, yay!
if not os.path.exists(config.VTK_PYTHON):
os.makedirs(config.VTK_PYTHON)
os.chdir(self.build_dir)
# we save, set and restore the PP env variable, else
# stupid setuptools complains
save_env = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = config.VTK_PYTHON
ret = util
|
rahulunair/nova
|
nova/tests/unit/virt/powervm/test_mgmt.py
|
Python
|
apache-2.0
| 7,781
| 0
|
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import retrying
from nova import exception
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.tests.test_utils import pvmhttp
from nova.virt.powervm import mgmt
LPAR_HTTPRESP_FILE = "lpar.txt"
class TestMgmt(test.TestCase):
def setUp(self):
super(TestMgmt, self).setUp()
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt)
self.assertIsNotNone(
lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE)
self.resp = lpar_http.response
@mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
def test_mgmt_uuid(self, mock_get_partition):
mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
adpt = mock.Mock()
# First run should call the partition only once
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
mock_get_partition.assert_called_once_with(adpt)
# But a subsequent call should effectively no-op
mock_get_partition.reset_mock()
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
self.assertEqual(mock_get_partition.call_count, 0)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
def test_discover_vscsi_disk(self, mock_realpath, mock_writefile,
mock_glob):
scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan'
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid)
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# Reali
|
stically, first glob would return e.g. .../host0/.../host0/...
# but it doesn't matter for test purposes.
mock_glob.side_effect = [[scanpath], [devlink]]
mgmt.discover_vscsi_disk(mapping)
mock_glo
|
b.assert_has_calls(
[mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])])
mock_writefile.assert_called_once_with(scanpath, 'a', '- - -')
mock_realpath.assert_called_with(devlink)
@mock.patch('retrying.retry', autospec=True)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_discover_vscsi_disk_not_one_result(self, mock_writefile,
mock_glob, mock_retry):
"""Zero or more than one disk is found by discover_vscsi_disk."""
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(300000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_passthrough(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return _poll_for_dev
return wrapped
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return raiser
return wrapped
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# No disks found
mock_retry.side_effect = retry_timeout
mock_glob.side_effect = lambda path: []
self.assertRaises(exception.NoDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
# Multiple disks found
mock_retry.side_effect = retry_passthrough
mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']]
self.assertRaises(exception.UniqueDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
@mock.patch('time.sleep', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
@mock.patch('os.stat', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_remove_block_dev(self, mock_writefile, mock_stat, mock_realpath,
mock_sleep):
link = '/dev/link/foo'
realpath = '/dev/sde'
delpath = '/sys/block/sde/device/delete'
mock_realpath.return_value = realpath
# Good path
mock_stat.side_effect = (None, None, OSError())
mgmt.remove_block_dev(link)
mock_realpath.assert_called_with(link)
mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath),
mock.call(realpath)])
mock_writefile.assert_called_once_with(delpath, 'a', '1')
self.assertEqual(0, mock_sleep.call_count)
# Device param not found
mock_writefile.reset_mock()
mock_stat.reset_mock()
mock_stat.side_effect = (OSError(), None, None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called once; exec was not called
self.assertEqual(1, mock_stat.call_count)
self.assertEqual(0, mock_writefile.call_count)
# Delete special file not found
mock_writefile.reset_mock()
mock_stat.reset_mock()
mock_stat.side_effect = (None, OSError(), None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called twice; exec was not called
self.assertEqual(2, mock_stat.call_count)
self.assertEqual(0, mock_writefile.call_count)
@mock.patch('retrying.retry')
@mock.patch('os.path.realpath')
@mock.patch('os.stat')
@mock.patch('nova.privsep.path.writefile')
def test_remove_block_dev_timeout(self, mock_dacw, mock_stat,
mock_realpath, mock_retry):
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(10000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_del):
return raiser
return wrapped
# Deletion was attempted, but device is still there
link = '/dev/link/foo'
delpath = '/sys/block/sde/device/delete'
realpath = '/dev/sde'
mock_realpath.return_value = realpath
mock_stat.side_effect = lambda path: 1
mock_retry.side_effect = retry_timeout
self.assertRaises(
exception.DeviceDeletionException, mgmt.remove_block_dev, link)
mock_realpath.assert_called_once_with(link)
mock_dacw.assert_called_with(delpath, 'a', '1')
|
tomeshnet/node-list
|
ci/scripts/kml/main.py
|
Python
|
gpl-3.0
| 3,457
| 0.001736
|
# This file is formatted with black.
# https://github.com/psf/black
import os
import json
import subprocess
import sys
import simplekml
ALT_MODE = simplekml.AltitudeMode.absolute # Absolute altitude means from sea floor
# Current commit
if os.environ.get("TRAVIS"):
COMMIT = os.environ["TRAVIS_COMMIT"]
else:
# For local dev
proc = subprocess.run(
["git", "rev-parse", "HEAD"], capture_output=True, cwd="../../../", text=True
)
if proc.returncode != 0:
print("Git command failed")
sys.exit(1)
COMMIT = proc.stdout.strip()
def get_desc(node):
"""Generate HTML description for a node."""
# Required keys
desc = f"<h1>{node['name']}</h1>"
desc += f"<h2>{node['status']}</h2>"
desc += f"Type: {node['type']}<br>"
desc += f"Altitude: {node['altitude']}<br>"
desc += f"Date Added: {node['dateAdded']}<br>"
desc += f"Group: {node['group']}<br>"
# Optional keys
desc += f"Model: {node.get('model')}<br>"
desc += f"IPv4: {node.get('ipv4')}<br>"
desc += f"IPv6: {node.get('ipv6')}<br>"
desc += f"Mode: {node.get('mode')}<br>"
if node["type"] != "router":
desc += f"Connected Router: {node.get('router')}<br>"
# Antenna specific keys
if node["type"] == "antenna":
desc += f"SSID: {node.get('ssid')}<br>"
desc += "<br>"
desc += f"Antenna Type: {node.get('antennaType')}<br>"
desc += f"Antenna Cone: {node.get('antennaCone')}<br>"
desc += f"Antenna Direction: {node.get('antennaDirection')}<br>"
desc += f"Antenna Distance: {node.get('antennaDistance')}<br>"
desc += f"Antenna Protocol: {node.get('antennaProtocol')}<br>"
desc += "<br>"
# Images
if node.get("images") is not None:
for image in node["images"]:
url = (
"https://raw.githubusercontent.com/tomeshnet/node-list/"
+ COMMIT
+ "/images/"
+ image
)
desc += f'<a href={url}><img alt={image} src={url} width="300"></a><br>'
return "<![CDATA[" + desc + "]]>"
with open("../../../tomeshnet-node-list.json", "r") as f:
nodes = json.load(f)["nodeList"]
kml = simplekml.Kml(name="Toronto Community Network")
active = kml.newfolder(name="Active Nodes", open=0, visibility=1)
proposed = kml.newfolder(name="Proposed Nodes", open=0, visibility=1)
inactive = kml.newfolder(name="Inactive Nodes", open=0, visibility=0)
for node in nodes:
if node["status"] == "active":
folder = active
vis = 1 # Active nodes always visible
# Yellow
icon_url = "http://maps.google.com/mapfiles/kml/pushpin/ylw-pushpin.png"
elif node["status"] == "proposed":
folder = proposed
vis = 1
# Light Blue
icon_url = "http://maps.google.com/mapfiles/kml/pushpin/ltblu-pushpin.png"
else:
# All other nodes are considered inactive
folder = inactive
vis = 0
# Red
icon_url = "http://maps.google.com/mapfiles/kml/pushpin/re
|
d-pushpin.png"
pnt = folder.newpoint(
name=node["name"],
altitudemode=ALT_MODE,
coords=[(node["longitude"], node["latitude"], node["altitude"])],
|
visibility=vis,
description=get_desc(node),
snippet=simplekml.Snippet(), # Empty snippet
)
pnt.style.iconstyle.icon.href = icon_url
kml.save("../../build/tomeshnet-node-list-kml.kml")
|
QEDK/AnkitBot
|
UAA/UAA.py
|
Python
|
epl-1.0
| 680
| 0.010294
|
#! /usr/bin/python
import sys, localconfig, platform, time
#OS Runtime comments
if platform.system() == "Windows":
sys.path.append(localconfig.winpath)
|
print "You are running the AnkitBot UAA Modu
|
le for Windows. Sponsored by DQ. :)"
else:
sys.path.append(localconfig.linuxpath)
print "You are running the AnkitBot UAA Module for Linux. Sponsored by DQ. :)"
import wikipedia
import globalfunc as globe
override = False
if not globe.startAllowed(override):
print "Fatal - System Access Denied."
sys.exit(1)
print "System Alert - Program is still running."
globe.main()
globe.checkWait()
globe.pageCleanup()
wikipedia.stopme()
|
oscaro/django
|
tests/template_tests/test_nodelist.py
|
Python
|
bsd-3-clause
| 2,315
| 0.00216
|
from unittest import TestCase
from django.template import Context, Template, VariableNode
from django.test import override_settings
class NodelistTest(TestCase):
def test_for(self):
template = Template('{% for i in 1 %}{{ a }}{% endfor %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
template = Template('{% if x %}{{ a }}{% endif %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
template = Template('{% ifequal x y %}{{ a }}{% endifequal %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
template = Template('{% ifchanged x %}{{ a }}{% endifchanged %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
class ErrorIndexTest(TestCase):
"""
Checks whether index of error is calculated correctly in
template debugger in for loops. Refs ticket #5831
"""
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
def test_correct_exception_index(self):
tests = [
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)),
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j i
|
n five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)),
('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)),
]
context = Context({
'range': range(5),
|
'five': 5,
})
for source, expected_error_source_index in tests:
template = Template(source)
try:
template.render(context)
except (RuntimeError, TypeError) as e:
error_source_index = e.django_template_source[1]
self.assertEqual(error_source_index,
expected_error_source_index)
|
Alex-Ian-Hamilton/sunpy
|
sunpy/map/sources/tests/test_cor_source.py
|
Python
|
bsd-2-clause
| 977
| 0.004094
|
"""Test cases for STEREO Map subclasses.
This particular test file pertains to CORMap.
@Author: Pritish C. (VaticanCameos)
"""
import os
import glob
from sunpy.map.sources.stereo import CORMap
from sunpy.map import Map
import sunpy.data.test
path = sunpy.data.test.rootdir
fitspath = glob.glob(os.path.join(path, "cor1_20090615_000500_s4c1A.fts"))
cor = Map(fitspath)
# COR Tests
def test_fitstoEIT():
"""Tests the creation of CORMap using FITS."""
assert isinstance(cor, CORMap)
def test_is_datasource_for():
"""Test the is_datasource_for method of CORMap.
Note that header data to be provided as an argument
|
can be a MapMeta object."""
assert cor.is_datasource_for(cor.data, cor.meta)
def test_measurement():
"""Tests the measurement property of the CORMap object."""
assert cor.measurement == "white-l
|
ight"
def test_observatory():
"""Tests the observatory property of the CORMap object."""
assert cor.observatory == "STEREO A"
|
Sodki/ansible
|
lib/ansible/plugins/filter/core.py
|
Python
|
gpl-3.0
| 17,081
| 0.003396
|
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import crypt
import glob
import hashlib
import itertools
import json
import ntpath
import os.path
import re
import string
import sys
import uuid
from collections import MutableMapping, MutableSequence
from datetime import datetime
from functools import partial
from random import Random, SystemRandom, shuffle
import yaml
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
try:
import passlib.hash
HAS_PASSLIB = True
except:
HAS_PASSLIB = False
from ansible import errors
from ansible.module_utils.six import iteritems, string_types, integer_types
from ansible.module_utils.six.moves import reduce, shlex_quote
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.hashing import md5s, checksum_s
from ansible.utils.unicode import unicode_wrap
from ansible.utils.vars import merge_hash
from ansible.vars.hostvars import HostVars
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
class AnsibleJSONEncoder(json.JSONEncoder):
'''
Simple encoder class to deal with JSON encoding of internal
types like HostVars
'''
def default(self, o):
if isinstance(o, HostVars):
return dict(o)
else:
return super(AnsibleJSONEncoder, self).default(o)
def to_yaml(a, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
return to_text(transformed)
def to_nice_yaml(a, indent=4, *args, **kw):
'''Make verbose, human readable yaml'''
transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
return to_text(transformed)
def to_json(a, *args, **kw):
''' Convert the value to JSON '''
return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
def to_nice_json(a, indent=4, *args, **kw):
'''Make verbose, human readable JSON'''
# python-2.6's json encoder is buggy (can't encode hostvars)
if sys.version_info < (2, 7):
try:
import simplejson
except ImportError:
pass
else:
try:
major = int(simplejson.__version__.split('.')[0])
except:
pass
else:
if major >= 2:
return simplejson.dumps(a, indent=indent, sort_keys=True, *args, **kw)
try:
return json.dumps(a, indent=indent, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
def to_bool(a):
''' return a bool for the arg '''
if a is None or isinstance(a, bool):
return a
if isinstance(a, string_types):
a = a.lower()
if a in ('yes', 'on', '1', 'true', 1):
return True
return False
def to_datetime(string, format="%Y-%d-%m %H:%M:%S"):
return datetime.strptime(string, format)
def quote(a):
''' return its argument quoted for shell usage '''
return shlex_quote(a)
def fileglob(pathname):
''' return list of matched regular files for glob '''
return [ g for g in glob.glob(pathname) if os.path.isfile(g) ]
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
''' Perform a `re.sub` returning a string '''
value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
return _re.sub(replacement, value)
def regex_findall(value, regex, multiline=False, ignorecase=False):
''' Perform re.findall and return the list of matches '''
flags = 0
if ignorecase:
flags |= re.I
if multiline:
flags |= re.M
return re.findall(regex, value, flags)
def regex_search(value, regex, *args, **kwargs):
''' Perform re.search and return the list of matches or a backref '''
groups = list()
for arg in args:
if arg.startswith('\\g'):
match = re.match(r'\\g<(\S+)>', arg).group(1)
groups.append(match)
elif arg.startswith('\\'):
match = int(re.match(r'\\(\d+)', arg).group(1))
groups.append(match)
else:
raise errors.AnsibleFilterError('Unknown argument')
flags = 0
if kwargs.get('ignorecase'):
flags |= re.I
if kwargs.get('multiline'):
flags |= re.M
match = re.search(regex, value, flags)
if match:
if not groups:
return match.group()
else:
items = list()
for item in groups:
items.append(match.group(item))
return items
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def regex_escape(string):
'''Escape all regular expressions special characters from STRING.'''
return re.escape(string)
def from_yaml(data):
if isinstance(data, string_types):
return yaml.safe_load(data)
return data
@environmentfilter
def rand(environment, end, start=None, step=None, seed=None):
if seed is None:
r = SystemRandom()
else:
r = Random(seed)
if isinstance(end, integer_types):
if not start:
start = 0
if not step:
step = 1
return r.randrange(start, end, step)
elif hasattr(end, '__iter__'):
if start or step:
raise errors.AnsibleFilterError('start and step can only be used with integer values')
return r.choice(end)
else:
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
def randomize_list(mylist, seed=None):
try:
mylist = list(mylist)
if seed:
r = Random(seed)
r.shuffle(mylist)
else:
shuffle(mylist)
except:
pass
return mylist
def get_hash(data, hashtype='sha1'):
try: # see if hash is supported
h = hashlib.new(hashtype)
except:
return None
h.update(to_bytes(data, errors='surrogate_then_strict'))
return h.hexdigest()
def get_encrypted_password(password, hashtype='sha512', salt=None):
# TODO: find a way to construct dynamically from system
cryptmethod= {
'md5': '1',
'blowfish': '2a',
'sha256': '5',
'sha512': '6',
}
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
if hashtype in ['md5']:
saltsize = 8
else:
saltsize = 16
saltcharset = string.ascii_letters + string.digits + '/.'
salt = ''.join([r.choice(saltcharset) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
sal
|
tstring = "$%s$%s" % (cryptmethod[hashtype],salt)
encrypted = crypt.crypt(password, saltstring)
else:
if hashtype == 'b
|
lowfish':
cls = passlib.hash.bcrypt
else:
cls = getattr(passl
|
thismachinechills/awful.py
|
awful.py
|
Python
|
gpl-3.0
| 1,950
| 0.004103
|
from sa_tools.base.magic import MagicMixin
from sa_tools.inbox import Inbox
from sa_tools.session import SASession
from sa_tools.index import Index
import os
import pickle
import sys
def py_ver() -> str:
return str(sys.version_info.major)
class APSession(object):
def __init__(self, username: str, passwd: str=None, save_session: bool=False, *args, **kwargs):
self.username = username
self.passwd = passwd
self._session_bak = \
'.' + username.replace(' ', '_') + py_ver() + '.bak'
self.session = self._get_session(save_session=save_session)
del passwd
del self.passwd
|
def _get_session(self, save_session: bool=True) -> SASession:
backup_exists = os.path.exists(self._session_bak)
# session = None
if backup_exists:
session = self._load_session()
else:
session = SASession(self.username, self.p
|
asswd)
if save_session:
self._save_session(session)
return session
def _load_session(self) -> None:
with open(self._session_bak, 'rb') as old_session:
print("Loading from backup: " + self._session_bak)
session = pickle.load(old_session)
return session
def _save_session(self, session: SASession) -> None:
with open(self._session_bak, 'wb') as session_file:
pickle.dump(session, session_file)
class AwfulPy(APSession, MagicMixin):
def __init__(self, username, *args, **kwargs):
super().__init__(username, *args, **kwargs)
self.index = Index(self.session)
self.inbox = Inbox(self.session)
self.name = "awful.py"
self.version = "v0.2014.08.24"
def __repr__(self):
info = '[' + self.name + ' ' + self.version + '] '
acct = 'Logged in as ' + self.username
login_time = ' on ' + self.session.login_time
return info + acct + login_time
|
orlandi/connectomicsPerspectivesPaper
|
participants_codes/aaagv/directivity.py
|
Python
|
mit
| 2,853
| 0
|
# Authors: Aaron Qiu <zqiu@ulg.ac.be>,
# Antonio Sutera <a.sutera@ulg.ac.be>,
# Arnaud Joly <a.joly@ulg.ac.be>,
# Gilles Louppe <g.louppe@ulg.ac.be>,
# Vincent Francois <v.francois@ulg.ac.be>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
from itertools import chain
import numpy as np
from sklearn.externals.joblib import Parallel, delayed, cpu_count
from utils import scale
def _partition_X(X, n_jobs):
"""Private function used to partition X between jobs."""
n_nodes = X.shape[1]
# Compute the number of jobs
n_jobs = min(cpu_count() if n_jobs == -1 else n_jobs, n_nodes)
# Partition estimators between jobs
n_node_per_job = (n_nodes // n_jobs) * np.ones(n_jobs, dtype=np.int)
n_node_per_job[:n_nodes % n_jobs] += 1
starts = np.cumsum(n_node_per_job)
return n_jobs, [0] + starts.tolist()
def _parallel_count(X, start, end):
"""Private function used to compute a batch of score within a job."""
count = np.zeros((end - start, X.shape[1]))
for index, jx in enumerate(range(start, end)):
X_jx_bot = X[:-1, jx] + 0.2
X_jx_top = X[:-1, jx] + 0.5
for j in range(X.shape[1]):
if j == jx:
continue
count[index, j] = ((X[1:, j] > X_jx_bot) &
(X[1:, j] < X_jx_top)).sum()
return count
def make_prediction_directivity(X, threshold=0.12, n_jobs=1):
"""Score neuron connectivity using a precedence measure
Parameters
----------
X : numpy array of shape (n_samples, n_nodes)
Fluorescence signals
threshold : float, (default=0.11)
Threshold value for hard thresholding filter:
x_new[i] = x[i] if x[i] >= threshold else 0.
n_jobs : integer, optional (default=1)
The number of jobs to run the algorithm in parallel.
If -1, then the number of jobs is set to the number of cores.
Returns
-------
score : numpy array of shape (n_nodes, n_nodes)
Pairwise neuron connectivity score.
"""
# Perform filtering
X_new = np.zeros((X.shape))
for i in range(1, X.shape[0] - 1):
for j in range(X.shape[1]):
X_new[i, j] = (X[i, j] + 1 * X[i - 1, j] + 0.8 * X[i - 2, j] +
|
0.4 * X[i - 3, j])
X_new = np.diff(X_new, axis=0)
thresh1 = X_new < threshold * 1
thresh2 = X_new >= threshold * 1
X_new[thresh1] = 0
X_new[thresh2] = pow(X_new[thresh2], 0.9)
# Score directivity
n_jobs, starts = _partition_X(X, n_jobs)
all_counts = Parallel(n_jobs=n_jobs)(
delayed(_parallel_count)(X_new, starts[i], starts[
|
i + 1])
for i in range(n_jobs))
count = np.vstack(list(chain.from_iterable(all_counts)))
return scale(count - np.transpose(count))
|
lewischeng-ms/pox
|
pox/forwarding/l2_ofcommand_learning.py
|
Python
|
gpl-3.0
| 5,023
| 0.014334
|
# Copyright 2011 Kyriakos Zarifis
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is an L2 learning switch derived originally from NOX's pyswitch
example. It is now a demonstration of the ofcommand library for constructing
OpenFlow messages.
"""
from time import time
# TODO: mac_to_str and mact_to_int aren't currently defined in packet_utils...
#from pox.lib.packet.packet_utils import mac_to_str, mac_to_int
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.tcp import tcp
from pox.lib.packet.udp import udp
from pox.lib.packet.vlan import vlan
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.icmp import icmp
from pox.lib.packet.ethernet import ethernet
from pox.core import core
from pox.lib.revent import *
from pox.lib.addresses import EthAddr
log = core.getLogger()
import pox.openflow.ofcommand as ofcommand
class dumb_l2_switch (EventMixin):
def __init__ (self):
log.info("Starting")
self.listenTo(core)
self.st = {}
def _handle_GoingUpEvent (self, event):
self.listenTo(core.openflow)
def _handle_PacketIn (self, event):
"""Packet entry method.
Drop LLDP packets (or we get confused) and attempt learning and forwarding
"""
con = event.connection
dpid = event.connection.dpid
inport = event.port
packet = event.parse()
buffer_id = event.ofp.buffer_id
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if not con in self.st:
log.info('registering new switch ' + str(dpid))
self.st[con] = {}
# don't forward lldp packets
if packet.type == ethernet.LLDP_TYPE:
return
# learn MAC on incoming port
self.do_l2_learning(con, inport, packet)
# forward packet
self.forward_l2_packet(con, inport, packet, packet.arr, buffer_id)
def do_l2_learning(self, con, inport, packet):
"""Given a packet, learn the source and peg to a switch/inport
"""
# learn MAC on incoming port
srcaddr = EthAddr(packet.src)
#if ord(srcaddr[0]) & 1:
# return
if self.st[con].has_key(srcaddr.toStr()): # change to raw?
# we had already heard from this switch
dst = self.st[con][srcaddr.toStr()] # raw?
if dst[0] != inport:
# but from a different port
log.info('MAC has moved from '+str(dst)+'to'+str(inport))
else:
return
else:
log.info('learned MAC '+srcaddr.toStr()+' on Switch %s, Port %d'% (con.dpid,inport))
# learn or update timestamp of entry
self.st[con][srcaddr.toStr()] = (inport, time(), packet) # raw?
# Replace any old entry for (switch,mac).
#mac = mac_to_int(packet.src)
def forward_l2_packet(self, con, inport, packet, buf, bufid):
"""If we've learned the destination MAC set up a flow and
send only out of its inport. Else, flood.
"""
dstaddr = EthAddr(packet.dst)
#if not ord(dstaddr[0]) & 1 and # what did this do?
if self.st[con].has_key(dstaddr.toStr()): # raw?
prt = self.st[con][dstaddr.toStr()] # raw?
if prt[0] == inport:
log.warning('**warning** learned port = inport')
ofcommand.floodPacket(con, inport, packet, buf, bufid)
else:
# We know the outport, set up a flow
log.info('installing flow for ' + str(packet))
match = ofcommand.extractMatch(packet)
actions = [ofcommand.Output(prt[0])]
ofcommand.addFlowEntry(con, inport, match, actions, bufid)
# Separate bufid, make addFlowEntry() only AD
|
D the entry
# send/wait for Barrier
# sendBufferedPacket(bufid)
else:
# haven't learned destination MAC. Flood
ofcommand.floodPacket(con, inport, packet, b
|
uf, bufid)
'''
add arp cache timeout?
# Timeout for cached MAC entries
CACHE_TIMEOUT = 5
def timer_callback():
"""Responsible for timing out cache entries. Called every 1 second.
"""
global st
curtime = time()
for con in st.keys():
for entry in st[con].keys():
if (curtime - st[con][entry][1]) > CACHE_TIMEOUT:
con.msg('timing out entry '+mac_to_str(entry)+" -> "+str(st[con][entry][0])+' on switch ' + str(con))
st[con].pop(entry)
'''
|
plotly/python-api
|
packages/python/plotly/plotly/validators/indicator/_delta.py
|
Python
|
mit
| 1,527
| 0
|
import _plotly_utils.basevalidators
class DeltaValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="delta", parent_name="indicator", **kwargs):
super(DeltaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Delta"),
data_docs=kwargs.pop(
"data_docs",
"""
decreasing
:class:`plotly.graph_objects.indicator.delta.De
creasing` instance or dict with compatible
properties
font
Set the font used to display the delta
increasing
:class:`plotly.graph_objects.indicator.delta.In
creasing` instance or dict with compatible
properties
position
Sets the position of delta with respect to the
number.
reference
Sets the reference val
|
ue to compute the delta.
|
By default, it is set to the current value.
relative
Show relative change
valueformat
Sets the value formatting rule using d3
formatting mini-language which is similar to
those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
""",
),
**kwargs
)
|
muccg/rdrf
|
rdrf/rdrf/migrations/0082_auto_20181106_1100.py
|
Python
|
agpl-3.0
| 461
| 0
|
# -*- coding: utf-8 -*-
# Gen
|
erated by Django 1.10.8 on 2018-11-06 11:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rdrf', '0081_clinicaldata_active'),
]
operations = [
migrations.AlterField(
model_name='surveyrequest',
name='survey_name',
field=models.CharField(max_length=80)
|
,
),
]
|
CenterForOpenScience/modular-file-renderer
|
mfr/extensions/codepygments/render.py
|
Python
|
apache-2.0
| 3,749
| 0.0008
|
import os
import chardet
from humanfriendly import format_size
import pygments
import pygments.lexers
import pygments.lexers.special
import pygments.formatters
from pygments.util import ClassNotFound
from mako.lookup import TemplateLookup
from mfr.core import extension
from mfr.extensions.codepygments import settings
from mfr.extensions.codepygments import exceptions
class CodePygmentsRenderer(extension.BaseRenderer):
DEFAULT_LEXER = pygments.lexers.special.TextLexer
TEMPLATE = TemplateLookup(
directories=[
os.path.join(os.path.dirname(__file__), 'templates')
]).get_template('viewer.mako')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metrics.add('pygments_version', pygments.__version__)
def render(self):
file_size = os.path.getsize(self.file_path)
if file_size > settings.MAX_SIZE:
raise exceptions.FileTooLargeError(
'Text fi
|
les larger than {} are not rendered. Please download '
'the file to view.'.format(format_size(settings.MAX_SIZE, binary=True)),
file_size=file_size,
max_size=settings.MAX_SIZE,
extension=self.metadata.ext,
)
with open(self.file_path, 'rb') as fp:
body = self._render_html(fp, self.metadata.ext)
return
|
self.TEMPLATE.render(base=self.assets_url, body=body)
@property
def file_required(self):
return True
@property
def cache_result(self):
return True
def _render_html(self, fp, ext, *args, **kwargs):
"""Generate an html representation of the file
:param fp: File pointer
:param ext: File name extension
:return: Content html
"""
formatter = pygments.formatters.HtmlFormatter()
data = fp.read()
content, encoding = None, 'utf-8'
try:
content = data.decode(encoding)
except UnicodeDecodeError:
detected_encoding = chardet.detect(data)
encoding = detected_encoding.get('encoding', None)
if encoding is None:
raise exceptions.FileDecodingError(
message='Unable to detect encoding of source file.',
extension=ext,
category='undetectable_encoding',
code=400,
)
try:
content = data.decode(encoding)
except UnicodeDecodeError as err:
raise exceptions.FileDecodingError(
message='Unable to decode file as {}.'.format(encoding),
extension=ext,
category='undecodable',
original_exception=err,
code=400,
)
if content is None:
raise exceptions.FileDecodingError(
message='File decoded to undefined using encoding "{}"'.format(encoding),
extension=ext,
category='decoded_to_undefined',
code=500,
)
self.metrics.merge({'encoding': encoding, 'default_lexer': False})
try:
# check if there is a lexer available for more obscure file types
if ext in settings.lexer_lib.keys():
lexer = pygments.lexers.get_lexer_by_name(settings.lexer_lib[ext])
else:
lexer = pygments.lexers.guess_lexer_for_filename(ext, content)
except ClassNotFound:
self.metrics.add('default_lexer', True)
lexer = self.DEFAULT_LEXER()
self.metrics.add('lexer', lexer.name)
return pygments.highlight(content, lexer, formatter)
|
ttitto/python
|
Basics/SampleExam/MostCommonCharacter/paint_bottles.py
|
Python
|
mit
| 196
| 0.015306
|
import sys
from math import ceil
de
|
f main():
AREA = 1.76
w = float(input())
h = flo
|
at(input())
print(ceil(w * h / AREA))
if __name__ == "__main__":
sys.exit(int(main() or 0))
|
potassco/clingo
|
examples/clingo/expansion/main.py
|
Python
|
mit
| 3,702
| 0.006483
|
#!/usr/bin/env python
import json
import sys
import argparse
from clingo import Control, Number
class App:
def __init__(self, args):
self.control = Control()
self.args = args
self.horizon = 0
self.objects = 0
self.end = None
def show(self, model):
if not self.args.quiet:
print("Model: {}".format(model))
def ground(self, kind):
count = self.objects + self.horizon + 1
parts = [("expand", [Number(count)])]
if self.args.scratch and count > 1:
self.control = Control()
for source in self.args.file: self.control.load(source)
for i in range(0, self.objects): parts.append(("object", [Number(i + 1, count)]))
for i in range(0, self.horizon): parts.append(("horizon", [Number(i + 1, count)]))
if self.args.scratch or count == 1:
for option in self.args.option:
setattr(self.control.configuration, option[0], option[1])
parts.append(("base", []))
if kind:
self.objects += 1
parts.append(("object", [Number(self.objects), Number(count)]))
else:
self.horizon += 1
parts.append(("horizon", [Number(self.horizon), Number(count)]))
if self.args.verbose:
print("")
print("Objects: {}".format(Number(self.objects)))
print("Horizon: {}".format(Number(self.horizon)))
self.control.ground(parts)
if self.args.verbose:
print("Solving: {}".format(count))
def run(self):
for source in self.args.file:
self.control.load(source)
if self.args.maxobj is None:
self.end =
|
self.control.get_const("n").number
else:
self.end = self.args.maxobj
while self.objects < self.end:
self.ground(True)
while True:
ret = self.control.solve(on_model=self.show)
if self.args.stats:
args = {"s
|
ort_keys": True, "indent": 0, "separators": (',', ': ')}
stats = {}
for x in ["step", "enumerated", "time_cpu", "time_solve", "time_sat", "time_unsat", "time_total"]:
stats[x] = self.control.statistics[x]
for x in ["lp", "ctx", "solvers"]:
for y in self.control.statistics[x]:
stats[y] = self.control.statistics[x][y]
print(json.dumps(stats, *args))
if ret.satisfiable:
break
self.ground(False)
parser = argparse.ArgumentParser(description="Gradually expand logic programs.", epilog="""Example: main.py -x -q -s -v -m 42 -o solve.models 0 encoding.lp instance.lp""")
parser.add_argument("-x", "--scratch", action='store_true', help="start each step from scratch (single-shot solving)")
parser.add_argument("-q", "--quiet", action='store_true', help="do not print models")
parser.add_argument("-s", "--stats", action='store_true', help="print solver statistics")
parser.add_argument("-v", "--verbose", action='store_true', help="print progress information")
parser.add_argument("-m", "--maxobj", type=int, metavar="NUM", default=None, help="maximum number of introduced objects")
parser.add_argument("-o", "--option", nargs=2, metavar=("OPT", "VAL"), action="append", default=[], help="set sover options")
parser.add_argument("file", nargs="*", default=[], help="gringo source files")
args = parser.parse_args()
if args.maxobj is not None and args.maxobj < 1:
parser.error("maximum number of objects must be positive")
App(args).run()
|
inspirehep/inspire-dojson
|
inspire_dojson/cds/__init__.py
|
Python
|
gpl-3.0
| 1,112
| 0
|
# -*- coding: utf-8 -*-
#
# This
|
file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
|
published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""DoJSON model and rules for CDS to INSPIRE HEP MARC."""
from __future__ import absolute_import, division, print_function
from . import rules # noqa: F401
from .model import cds2hep_marc # noqa: F401
|
tahpee/detest
|
detest/detest/settings.py
|
Python
|
mit
| 2,968
| 0.000337
|
"""
Django settings for detest project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&^ck$n8qsz2e#s+z6%b%(f$r4)2!w4fvz7m9ks@blx=(hq*efu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'detest_ui',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'detest.urls'
WSGI_APPLICATION = 'detest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'detest',
'USER': 'detest',
'PASSWORD': 'detest',
'HOST': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ
|
= True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(li
|
neno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
},
'detest': {
'handlers': ['console'],
'level': 'DEBUG',
},
'detest_ui': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
LOGGING = {}
|
hackBCA/missioncontrol
|
application/mod_user/controllers.py
|
Python
|
mit
| 3,822
| 0.026688
|
from application import CONFIG, app
from .models import *
from flask import current_app, session
from flask.ext.login import login_user, logout_user, current_user
from flask.ext.principal import Principal, Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
import bcrypt
import re
import sendgrid
import time
from itsdangerous import URLSafeTimedSerializer
AuthenticationError = Exception("AuthenticationError", "Invalid credentials.")
UserExistsError = Exception("UserExistsError", "Email already exists in database.")
UserDoesNotExistError = Exception("UserDoesNotExistError", "Account with given email does not exist.")
login_manager = LoginManager()
login_manager.init_app(app)
principals = Principal(app)
sg = sendgrid.SendGridClient(CONFIG["SENDGRID_API_KEY"])
ts = URLSafeTimedSerializer(CONFIG["SECRET_KEY"])
@login_manager.user_loader
def load_user(user_id):
user_entries = StaffUserEntry.objects(id = user_id)
if user_entries.count() != 1:
return None
currUser = user_entries[0]
user = User(currUser.id, currUser.email, currUser.firstname, currUser.lastname, currUser.roles)
return user
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
identity.provides.add(RoleNeed(role))
def get_user(email):
entries = StaffUserEntry.objects(email = email)
if entries.count() == 1:
return entries[0]
return None
def verify_user(email, password):
currUser = get_user(email)
if currUser is None:
return None
hashed = currUser.hashed
if bcrypt.hashpw(password.encode("utf-8"), hashed.encode("utf-8")) == hashed.encode("utf-8"):
return load_user(currUser.id)
else:
return None
def login(email):
user = load_user(get_user(email).id)
if user != None:
login_user(user)
identity_changed.send(current_app._get_current_object(), identity = Identity(user.uid))
else:
raise UserDoesNotExistError
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identit
|
y_changed.send(current_app._get_current_object(), identity = AnonymousIdentity())
def tokenize_email(email):
return ts.dumps(email, salt = CONFIG["EMAIL_TOKENIZER_SALT"])
def detokenize_email(token):
return ts.loads(token, salt = CONFIG["EMAIL_TOKENIZER_SALT"], max_age = 86400)
def send_recovery_email(email):
user = get_user(email)
if user is None:
|
raise UserDoesNotExistError
token = tokenize_email(email)
message = sendgrid.Mail()
message.add_to(email)
message.set_from("noreply@hackbca.com")
message.set_subject("hackBCA III - Account Recovery")
message.set_html("<p></p>")
message.add_filter("templates", "enable", "1")
message.add_filter("templates", "template_id", CONFIG["SENDGRID_ACCOUNT_RECOVERY_TEMPLATE"])
message.add_substitution("prefix", "staff")
message.add_substitution("token", token)
status, msg = sg.send(message)
def change_name(email, firstname, lastname):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
account.firstname = firstname
account.lastname = lastname
account.save()
login(email) #To update navbar
def change_password(email, password):
account = get_user(email)
if account is None:
raise UserDoesNotExistError
hashed = str(bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()))[2:-1]
account.hashed = hashed
account.save()
def get_user_attr(email, attr):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
return getattr(user, attr)
def set_user_attr(email, attr, value):
user = get_user(email)
if user is None:
raise UserDoesNotExistError
setattr(user, attr, value)
user.save()
|
Emilv2/mandarinrecording
|
registration/apps.py
|
Python
|
agpl-3.0
| 107
| 0
|
from django.apps import AppConfig
|
class Userr
|
egistrationConfig(AppConfig):
name = 'userregistration'
|
aqisnotliquid/minder2
|
app/murmur/views.py
|
Python
|
mit
| 2,492
| 0.014045
|
import utils
from flask import render_template, redirect, request, session, url_for, json, jsonify
from . import murmurbp
from .User import User
# User Views
@murmurbp.route("/users", methods = ['GET'])
def get_all_users():
u = User()
ul = utils.obj_to_dict(u.get_all())
data = [{'UserId': k, 'UserName': v} for k, v in ul.iteritems()]
resp = jsonify(users=data)
return resp, 200
@murmurbp.route("/users/<int:id>", methods = ['GET'])
def get_user(id):
u = User()
data = utils.obj_to_dict(u.get(id))
resp = jsonify(data)
return resp, 200
@murmurbp.route("/users", methods = ['POST'])
def add_user():
u = User()
user = json.loads('{"UserName": "TestUser7"}')
new_user = u.add(user)
data = utils.obj_to_dict(new_user)
resp = jsonify(data)
return resp, 200
@murmurbp.route("/users/<int:id>", methods = ['DELETE'])
def delete_user(id):
u = User()
u.delete(id)
return jsonify(), 201
from .Channel import Channel
# Channel Views
@murmurbp.route("/channels", methods = ['GET'])
def get_all_channels():
c = Channel()
cl = utils.obj_to_dict(c.get_all())
data = [ v for k, v in cl.iteritems()]
resp = jsonify(channels=data)
return resp, 200
@murmurbp.route("/channe
|
ls", methods = ['POST'])
def add_channel():
c = Channel()
name = request.form['channelName']
parent = request.form['parent']
new_channel = c.add_channel(name, parent)
data = utils.obj_to_dict(new_channel)
resp = jsonify(data)
return resp
|
, 200
@murmurbp.route("/channels/<int:id>", methods = ['DELETE'])
def delete_channel(id):
c = Channel()
c.delete(id)
return jsonify(), 201
from .ACLGroup import ACL, Group
# ACL and Group Views
@murmurbp.route("/acls/<int:channel_id>", methods = ['GET'])
def get_all_acls(channel_id):
a = ACL()
data = utils.obj_to_dict(a.get_all(channel_id))
resp = jsonify(acls=data)
return resp, 200
@murmurbp.route("/groups/<int:channel_id>", methods = ['GET'])
def get_all_groups(channel_id):
g = Group()
data = utils.obj_to_dict(g.get_all(channel_id))
resp = jsonify(groups=data)
return resp, 200
@murmurbp.route("/acls/<int:channel_id>", methods = ['POST'])
def add_acl_to_channel(channel_id):
# TODO: load json object
a = ACL()
acl = json.loads('{"applyHere": true,"applySubs": true,"userid": 1,"group": "admin","allow": 1024,"deny": 0}')
data = a.add(channel_id, acl)
resp = jsonify(data)
return resp, 200
|
carvalhomb/tsmells
|
src/viz/gui/TDockable.py
|
Python
|
gpl-2.0
| 1,659
| 0.007836
|
#
# This file is part of TSmells
#
# TSmells is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# TSmells is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with TSmells; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Copyright 2007-2008 Manuel Breugelmans <manuel.breugelmans@student.ua.ac.be>
#
from com.hp.hpl.guess.ui import Dockable
class TDockable(JPanel, Dockable):
#
# Implementation of Dockable interface
#
def mouseEnterEdge(self, edge):
pass
def mouseLeaveNode(self, node):
pass
def mouseLeaveEdge(self, edge):
pass
def getPreferredSize(self):
return Dimension(200,600)
def getDefaul
|
tFrameBounds(self):
return Rectangle(50, 50, 300, 600)
def getDirectionPreference(self):
''' prefer vertical orientation '''
return 2 # vertical, see com.hp.hpl.guess.ui.MainUIWindow.java
def opening(self, state):
self.visible = state
def attaching(self, state):
pass
def getTitle(self):
return("")
def getWindow(self):
return self.myParent
def setWindo
|
w(self,gjf):
self.myParent = gjf
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/py-wrapt/package.py
|
Python
|
lgpl-2.1
| 1,546
| 0.000647
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE file
|
s for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or
|
modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyWrapt(PythonPackage):
"""Module for decorators, wrappers and monkey patching."""
homepage = "https://github.com/GrahamDumpleton/wrapt"
url = "https://pypi.io/packages/source/w/wrapt/wrapt-1.10.10.tar.gz"
version('1.10.10', '97365e906afa8b431f266866ec4e2e18')
|
nonZero/OpenCommunity
|
src/communities/legacy_mapping.py
|
Python
|
bsd-3-clause
| 574
| 0.002227
|
#encoding: utf-8
from __future__ import unicode_literals
TITLE_TO_SLUG = {
'איגוד הביטקוין': 'bitcoin-org-il',
'אליאב': 'eliav',
'* בדיקות פרודקשיין *': 'production-test',
'בי״ס עמית': 'amit',
'הבר קיימא': 'barkayma',
'הסדנא לידע ציבורי': 'hasadna',
'הפורום לממשל פתוח': 'open-government',
'התנועה לאיכות השלטון': 'mqg',
'מעיין ברוך': 'maayan-baruch',
'מרצ': 'meretz',
'נטף': 'nataf',
'נען': 'naan',
'קהילה פתוחה
|
': 'o
|
pen-community',
}
|
P1R/cinves
|
TrabajoFinal/tubo350cm/2-DbvsFreq/tubo2huecos/DbvsFreq-Ampde0.1v-2huequitos.py
|
Python
|
apache-2.0
| 511
| 0.078278
|
#!/usr/bin/env python2.7
import numpy as np
import matplotlib.pyplot as plt
Freq=np.array([30,40,45,50,53,55,60,65,70,80,90,95,98,100,110,120])
Db=np.array([70.5,78.6,83.2,88.4,87.5,86.
|
7,85.2,83.9,85.1,88,95.7,100.4,100.4,99.2,94.7,94.9])
plt.xlabel('Frecuencia')
plt.ylabel('Decibel')
plt.title('DecibelvsFreq a 0.1volts')
#for i in range(len(Freq)):
#
|
plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i]))
plt.axis([0, 330, 50, 130])
plt.plot(Freq,Db,'bo',Freq,Db,'k')
plt.grid(True)
plt.show()
|
ThisIsSoSteve/Project-Tensorflow-Cars
|
plot_course_data.py
|
Python
|
mit
| 2,616
| 0.006116
|
#import os
import pickle
import glob
#import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
#from data_control_no_images.read import Read
listing = glob.glob('F:/Project_Cars_Data/1lap-fullspeed/Watkins Glen International - Short Circuit' + '/*.pkl')
x = []
y = []
throttle = []
raw_throttle = []
brake = []
raw_brake = []
steering = []
raw_steering = []
xy = []
for filename in tqdm(listing):
with open(filename, 'rb') as file_data:
project_cars_state = pickle.load(file_data)
controller_state = pickle.load(file_data)
#remove none flying lap data
if project_cars_state
|
.mParticipantInfo[0].mCurrentLapDistance == 0.0:
continue
position = project_cars_
|
state.mParticipantInfo[0].mWorldPosition
x.append(round(position[0]))
y.append(round(position[2]))
throttle.append(controller_state['right_trigger']/255)# 0 - 255
brake.append(controller_state['left_trigger']/255) #0 - 255
steering.append(controller_state['thumb_lx']/32767) #-32768 - 32767
#steering.append(project_cars_state.mSteering)
raw_steering.append(project_cars_state.mUnfilteredSteering)
raw_brake.append(project_cars_state.mUnfilteredBrake)
raw_throttle.append(project_cars_state.mUnfilteredThrottle)
xy.append([position[0], position[2]])
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=steering)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller steering')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_steering)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw steering')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=throttle)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller throttle')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_throttle)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw throttle')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=brake)
plt.colorbar()
plt.axis('equal')
plt.title('position and controller brake')
plt.show()
plt.close()
plt.figure(figsize=(10, 10))
plt.scatter(x, y, c=raw_brake)
plt.colorbar()
plt.axis('equal')
plt.title('position and raw brake')
plt.show()
plt.close()
# get_data = Read(True)
# mean, std = get_data.load_mean_and_std('F:/Project_Cars_Data/Full_Speed_Training_none_image')
# print(mean)
# print(std)
# xy = (xy - mean) / std
# print(np.array(xy[:,0]).shape)
# plt.scatter(xy[:,0], xy[:,1])
# plt.axis('equal')
# plt.show()
|
yugangzhang/chxanalys
|
chxanalys/chx_libs.py
|
Python
|
bsd-3-clause
| 9,246
| 0.025741
|
"""
Dec 10, 2015 Developed by Y.G.@CHX
yuzhang@bnl.gov
This module is for the necessary packages for the XPCS analysis
"""
from IPython.core.magics.display import Javascript
from skbeam.core.utils import multi_tau_lags
from skimage.draw import line_aa, line, polygon, ellipse, circle
from modest_image import ModestImage, imshow
from databroker import DataBroker as db, get_images, get_table, get_events, get_fields
from filestore.api import register_handler, deregister_handler
#from filestore.retrieve import _h_registry, _HANDLER_CACHE, HandlerBase
from eiger_io.pims_reader import EigerImages
from chxtools import handlers
from filestore.path_only_handlers import RawHandler
## Import all the required packages for Data Analysis
#* scikit-beam - data analysis tools for X-ray science
# - https://github.com/scikit-beam/scikit-beam
#* xray-vision - plotting helper functions for X-ray science
# - https://github.com/Nikea/xray-vision
import xray_vision
import matplotlib.cm as mcm
import copy
import xray_vision.mpl_plotting as mpl_plot
from xray_vision.mpl_plotting import speckle
from xray_vision.mask.manual_mask import ManualMask
import skbeam.core.roi as roi
import skbeam.core.correlation as corr
import skbeam.core.utils as utils
import numpy as np
from datetime import datetime
import h5py
import pims
from pandas import DataFrame
import os, sys, time
import getpass
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import pickle
from lmfit import Model
from lmfit import minimize, Parameters, Parameter, report_fit
from matplotlib.figure import Figure
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
import collections
import itertools
import random
from PIL import Image
import warnings
from eiger_io.fs_handler2 import EigerHandler2
from eiger_io.fs_handler import LazyEigerHandler
fs = db.event_sources[0].fs
fs.deregister_handler('AD_EIGER')
fs.register_handler('AD_EIGER', LazyEigerHandler)
fs.deregister_handler('AD_EIGER2')
fs.register_handler('AD_EIGER2', EigerHandler2)
mcolors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k','darkgoldenrod','oldlace', 'brown','dodgerblue' ])
markers = itertools.cycle(list(plt.Line2D.filled_markers))
lstyles = itertools.cycle(['-', '--', '-.','.',':'])
colors = itertools.cycle(["blue", "darkolivegreen", "brown", "m", "orange", "hotpink", "darkcyan", "red",
"gray", "green", "black", "cyan", "purple" , "navy"])
colors_copy = itertools.cycle(["blue", "darkolivegreen", "brown", "m", "orange", "hotpink", "darkcyan", "red",
"gray", "green", "black", "cyan", "purple" , "navy"])
markers = itertools.cycle( ["o", "2", "p", "1", "s", "*", "4", "+", "8", "v","3", "D", "H", "^",])
markers_copy = itertools.cycle( ["o", "2", "p", "1", "s", "*", "4", "+", "8", "v","3", "D", "H", "^",])
RUN_GUI = False #if True for gui setup; else for notebook; the main code difference is the Figure() or plt.figure(figsize=(8, 6))
markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H',
'h', '*', 'd',
'$I$','$L$', '$O$','$V$','$E$',
'$c$', '$h$','$x$','
|
$b$','$e$','$a$','$m$','$l$','$i$','$n$', '$e$',
'8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',]
markers = np.array( markers *100 )
markers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H',
|
'h', '*', 'd',
'8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',]
markers = np.array( markers *100 )
colors = np.array( ['darkorange', 'mediumturquoise', 'seashell', 'mediumaquamarine', 'darkblue',
'yellowgreen', 'mintcream', 'royalblue', 'springgreen', 'slategray',
'yellow', 'slateblue', 'darkslateblue', 'papayawhip', 'bisque', 'firebrick',
'burlywood', 'dodgerblue', 'dimgrey', 'chartreuse', 'deepskyblue', 'honeydew',
'orchid', 'teal', 'steelblue', 'limegreen', 'antiquewhite',
'linen', 'saddlebrown', 'grey', 'khaki', 'hotpink', 'darkslategray',
'forestgreen', 'lightsalmon', 'turquoise', 'navajowhite',
'darkgrey', 'darkkhaki', 'slategrey', 'indigo',
'darkolivegreen', 'aquamarine', 'moccasin', 'beige', 'ivory', 'olivedrab',
'whitesmoke', 'paleturquoise', 'blueviolet', 'tomato', 'aqua', 'palegoldenrod',
'cornsilk', 'navy', 'mediumvioletred', 'palevioletred', 'aliceblue', 'azure',
'orangered', 'lightgrey', 'lightpink', 'orange', 'wheat',
'darkorchid', 'mediumslateblue', 'lightslategray', 'green', 'lawngreen',
'mediumseagreen', 'darksalmon', 'pink', 'oldlace', 'sienna', 'dimgray', 'fuchsia',
'lemonchiffon', 'maroon', 'salmon', 'gainsboro', 'indianred', 'crimson',
'mistyrose', 'lightblue', 'darkgreen', 'lightgreen', 'deeppink',
'palegreen', 'thistle', 'lightcoral', 'lightgray', 'lightskyblue', 'mediumspringgreen',
'mediumblue', 'peru', 'lightgoldenrodyellow', 'darkseagreen', 'mediumorchid',
'coral', 'lightyellow', 'chocolate', 'lavenderblush', 'darkred', 'lightseagreen',
'darkviolet', 'lightcyan', 'cadetblue', 'blanchedalmond', 'midnightblue',
'lightsteelblue', 'darkcyan', 'floralwhite', 'darkgray',
'lavender', 'sandybrown', 'cornflowerblue', 'gray',
'mediumpurple', 'lightslategrey', 'seagreen',
'silver', 'darkmagenta', 'darkslategrey', 'darkgoldenrod', 'rosybrown',
'goldenrod', 'darkturquoise', 'plum',
'purple', 'olive', 'gold','powderblue', 'peachpuff','violet', 'lime', 'greenyellow', 'tan', 'skyblue',
'magenta', 'black', 'brown', 'green', 'cyan', 'red','blue'] *100 )
colors = colors[::-1]
colors_ = itertools.cycle( colors )
#colors_ = itertools.cycle(sorted_colors_ )
markers_ = itertools.cycle( markers )
import matplotlib as mpl
# Custom colormaps
################################################################################
# ROYGBVR but with Cyan-Blue instead of Blue
color_list_cyclic_spectrum = [
[ 1.0, 0.0, 0.0 ],
[ 1.0, 165.0/255.0, 0.0 ],
[ 1.0, 1.0, 0.0 ],
[ 0.0, 1.0, 0.0 ],
[ 0.0, 0.2, 1.0 ],
[ 148.0/255.0, 0.0, 211.0/255.0 ],
[ 1.0, 0.0, 0.0 ]
]
cmap_cyclic_spectrum = mpl.colors.LinearSegmentedColormap.from_list('cmap_cyclic_spectrum', color_list_cyclic_spectrum)
# classic jet, slightly tweaked
# (bears some similarity to mpl.cm.nipy_spectral)
color_list_jet_extended = [
[0, 0, 0],
[0.18, 0, 0.18],
[0, 0, 0.5],
[0, 0, 1],
[ 0. , 0.38888889, 1. ],
[ 0. , 0.83333333, 1. ],
[ 0.3046595 , 1. , 0.66308244],
[ 0.66308244, 1. , 0.3046595 ],
[ 1. , 0.90123457, 0. ],
[ 1. , 0.48971193, 0. ],
[ 1. , 0.0781893 , 0. ],
[1, 0, 0],
[ 0.5 , 0. , 0. ],
]
cmap_jet_extended = mpl.colors.LinearSegmentedColormap.from_list('cmap_jet_extended', color_list_jet_extended)
# Tweaked version of "view.gtk" default color scale
color_list_vge = [
[ 0.0/255.0, 0.0/255.0, 0.0/255.0],
[ 0.0/255.0, 0.0/255.0, 254.0/255.0],
[ 188.0/255.0, 2.0/255.0, 107.0/255.0],
[ 254.0/255.0, 55.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 254.0/255.0]
]
cmap_vge = mpl.colors.LinearSegmentedColormap.from_list('cmap_vge', color_list_vge)
# High-dynamic-range (HDR) version of VGE
color_list_vge_hdr = [
[ 255.0/255.0, 255.0/255.0, 255.0/255.0],
[ 0.0/255.0, 0.0/255.0, 0.0/255.0],
[ 0.0/255.0, 0.0/255.0, 255.0/255.0],
[ 188.0/255.0, 0.0/255.0, 107.0/255.0],
[ 254.0/255.0, 55.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 0.0/255.0],
[ 254.0/255.0, 254.0/255.0, 254.0/255.0]
]
cmap_vge_hdr = mpl.colors.LinearSegmentedColormap.from_list('cmap_vge_hdr', color_list_vge_hdr)
# Simliar to Dectris ALBULA default color-scale
color_list_hdr_albula = [
|
yunhaowang/IDP-APA
|
utilities/py_idpapa_sam2gpd.py
|
Python
|
apache-2.0
| 3,045
| 0.042365
|
#!/usr/bin/env python
import sys,re,time,argparse
def main(args):
# print >>sys.stdout, "Start analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
convert(args.input,args.output)
# print >>sys.stdout, "Finish analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
def extract_exon_length_from_cigar(cigar):
cigar_m = ["0"] + re.findall(r"(\d+)M",cigar)
cigar_d = ["0"] + re.findall(r"(\d+)D",cigar)
cigar_m_s,cigar_d_s = [0,0]
for m in cigar_m:
cigar_m_s += int(m)
for d in cigar_d:
cigar_d_s += int(d)
exon_length = cigar_m_s+cigar_d_s
return exon_length
def extract_soft_clip_from_cigar(cigar):
cigar_5 = ["0"] + re.findall(r"^(\d+)S",cigar)
cigar_3 = ["0"] + re.findall(r"(\d+)S$",cigar)
cigar_5_s,cigar_3_s = [0,0]
for s5 in cigar_5:
cigar_5_s += int(s5)
for s3 in cigar_3:
cigar_3_s += int(s3)
return cigar_5_s,cigar_3_s
def convert(sam_file,gpd_file):
for line in sam_file:
if line[0] != "@":
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq = line.strip().split("\t")[:10]
tag = "\t".join(line.strip().split("\t")[11:])
if rname != "*" and re.search(r"XS:A:(\S)",tag):
s5,s3 = extract_soft_clip_from_cigar(cigar)
sf = str(s5)+"_"+str(s3)
strand = (re.search(r"XS:A:(\S)",tag)).group(1)
cigar_n_l = 0
exon_length = 0
exon_start = int(pos)-1
exon_end = 0
exon_start_list = []
exon_end_list = []
if "N" in cigar:
for exon in cigar.split("N"):
exon = exon + "N"
exon_start = exon_start + exon_length + cigar_n_l
exon_length = extract_exon_length_from_cigar(exon)
exon_end = exon_start + exon_length
if re.search(r"(\d+)N",exon):
cigar_n_l = int((re.search(r"(\d+)N",exon)).group(1))
exon_start_list.append(str(exon_start))
exon_end_list.append(str(exon_end))
else:
exon_start = exon_start
exon_length = extract_exon_length_from_cigar(cigar)
exon_end = exon_start + exon_length
exon_start_list.append(str(exon_start))
exon_end_list.append(str(exon_end))
exon_start_list.append("")
exon_end_list.append("")
print >>gpd_file, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (qname,qname,rname,strand,str(int(pos)-1),str(exon_end),mapq,sf,str(len(exon_start_list)-1),",".join(exon_start_list),",".join(exon_end_list))
sam_file.close()
gpd_file.close()
def do_inputs():
output_gpd_format = '''
1. read id
2. read id
3. chromosome id
4. strand
5.
|
start site of alignment
6. end site of alignment
7. MAPQ
8. Number of nucleotides that are softly-clipped by aligner; left_right
9. exon count
10. exon start set
11. exon end set'''
parser = argparse.ArgumentParser(description="Function: convert sam to gpd.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i','--input',type=argparse.FileType('r'),required=True,help="Input: sam file")
parser.add_argument('-o','--output',ty
|
pe=argparse.FileType('w'),required=True,help="Output: gpd file")
args = parser.parse_args()
return args
if __name__=="__main__":
args = do_inputs()
main(args)
|
oscarcbr/cellery
|
cellery/ecp.py
|
Python
|
gpl-3.0
| 11,283
| 0.042808
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ecp.py part of cellery (ceRNAs linking inference)
#
# Copyright 2016 Oscar Bedoya Reina <obedoya@igmm-linux-005>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""
Methods to calculate ECP values (Endogenous Competition Potential)
"""
########################################################
#~ Import libraries.
########################################################
from cellery import exceptions
from itertools import product
from multiprocessing import Queue,Process
from numpy import array,empty,float32,float64,nan,zeros
from clcECP import rtrnECP,rtrnECPMskd,rtrnECPDnsty,rtrnECPDnstyMskd
import os
import sqlite3
########################################################
#~ Compute ECP values for all combinations of two arrays of arrays with
# values.
########################################################
def cmpECP(aMrnVlsDtA,aMrnVlsDtB,aANmsA,aANmsB,fldrOutECPPrws, \
aALenA=False,aALenB=False,aMskRef=False,nThrds=10,intrvlSz=700, \
sqlFl=False,pntrCnts=True):
"""
Input: aMrnVlsDtA is an array A of arrays with values for miRNAs.
aMrnVlsDtB is an array B of arrays with values for miRNAs. aANmsA is
the array of variable names in the same position as the numbers in
vrblAPos. aANmsB is the array of variable names in the same order as
vrblBPos. fldrOutECPPrws is a folder to store partial ECP results.
Optionally, aALenA is an array of object lengths in the same order
that aAVlsA. aALenB is an array of object lengths in the same order
that aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays
within array A and B). nThrds is the number of threads to run in
parallel. intrvlSz is the size of the interval to run in multithread.
sqlFl is a sql database to save the ECP values. If pntrCnts is True
aAVlsA and aAVlsB are counts so 0 values shall be considered
(excluded in shared counts).
Output: aECPVlsAVlsB is an array with the ECP values for all
combinations of array A and B.
NOTE: The subarrays in arrays A and B must have the same dimensions
(i.e. all the miRNA arrays must have the same size.).
NOTE: Null values shall be numpy.nan.
NOTE: aECPVlsAVlsB has arrays in A as rows and in B as columns.
NOTE: if aALenA and aALenB ECP density is going to be calculated.
NOTE: if aMskRef miRNA is going to be masked.
"""
def mltECPclc(qInJobs,qOutRslts,mthdECPclc,aMrnVlsDtA,aMrnVlsDtB, \
fldrOutECPPrws,aALenA,aALenB,aMskRef,pntrCnts):
"""
Input: qInJobs is a queue with pairs of intervals. qOutRslts is
the queue to store position in arrayA, position in arrayB, and
ECP value. mthdECPclc is the method to calculate the ECP value.
aMrnVlsDtA is an array A of arrays with values for miRNAs.
aMrnVlsDtB is an array B of arrays with values for miRNAs.
fldrOutECPPrws is a folder to store partial ECP results. aALenA
is an array of object lengths in the same order that aAVlsA.
aALenB is an array of object lengths in the same order that
aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays
within array A and B). If pntrCnts is True aAVlsA and aAVlsB
are counts so 0 values shall be considered (excluded in shared
counts).
Output: qOutRslts is the queue to store position in arrayA,
position in arrayB, and ECP values.
"""
for intrvlA,intrvB in iter(qInJobs.get,'STOP'):
lECPVlsAVlsB = mthdECPclc(aMrnVlsDtA,aMrnVlsDtB, \
fldrOutECPPrws,intrvlA,intrvB,pntrCnts,aMskRef,aALenA, \
aALenB)
qOutRslts.put(lECPVlsAVlsB)
#--------------------------
#~ Check if there is mask for miRNAs
if dir(aMskRef)[0]=='T':
assert len(aMskRef) == len(aALenB[0]) == len(aALenA[0])
if dir(aALenB)[0]=='T':
assert dir(aALenB)[1]=='T'
mthdECPclc = rtrnECPDnstyMskd
else:
assert not aALenA and not aALenB
mthdECPclc = rtrnECPMskd
else:
if dir(aALenB)[0]=='T':
assert dir(aALenB)[1]=='T'
mthdECPclc = rtrnECPDnsty
else:
assert not aALenA and not aALenB
mthdECPclc = rtrnECP
#--------------------------
#~ Create list of intervals for multithreading
lenaMrnVlsDtA = len(aMrnVlsDtA)
lenaMrnVlsDtB = len(aMrnVlsDtB)
intrvlsMrnVlsA = []
for strt in xrange(0,lenaMrnVlsDtA,intrvlSz):
cEnd = strt+intrvlSz
if cEnd<lenaMrnVlsDtA:
end = cEnd
else:
end = lenaMrnVlsDtA
intrvlsMrnVlsA.append([strt,end])
intrvlsMrnVlsB = []
for strt in xrange(0,lenaMrnVlsDtB,intrvlSz):
cEnd = strt+intrvlSz
if cEnd<lenaMrnVlsDtB:
end = cEnd
else:
end = lenaMrnVlsDtB
intrvlsMrnVlsB.append([strt,end])
#--------------------------
#~ Run in parallel.
aECPVlsAVlsB = zeros((lenaMrnVlsDtA,lenaMrnVlsDtB),dtype=float32)
aECPVlsAVlsB.fill(nan)#fill all ECP with nan to start
qInJobs = Queue()
qOutRslts = Queue()
cntVlABPrs=0
for intrvlA,intrvB in product(intrvlsMrnVlsA,intrvlsMrnVlsB):
qInJobs.put((intrvlA,intrvB))
cntVlABPrs += 1
for t in xrange(nThrds):
Process(target = mltECPclc,args=(qInJobs,qOutRslts,mthdECPclc, \
aMrnVlsDtA,aMrnVlsDtB,fldrOutECPPrws,aALenA,aALenB, \
aMskRef
|
,pntrCnts)).start()
lECPVlsAVlsBGlbl = []#store global results
for cnt in range(cntVlABPrs):
if cnt%50==0:
print 'Running calculations on pair %s out of %s'%(cnt, \
cntVlABPrs)
lECPVlsAVlsB = qOutRslts.get()
lECPVlsAVlsBGlbl.extend(lECPVlsAVlsB)
for t in xrange(nThrds):
qInJobs.put('STOP')
#--------------------------
|
#~ create array: aMrnVlsDtA in rows, aMrnVlsDtB in columns.
for vlsAPos,vlsBPos,ECP in lECPVlsAVlsBGlbl:
aECPVlsAVlsB[vlsAPos,vlsBPos] = ECP
if sqlFl:
mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB)
return aECPVlsAVlsB
########################################################
#~ Make a sqlite3 database for ECP values between genes/lncRNAs of
# interest.
########################################################
def mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB):
"""
Input: lECPVlsAVlsBGlbl is a list of tuples (vrblAPos,vrblBPos,ECP).
vrblAPos is the position of the first variables, vrblBPos is the
position of the second variable, ECP is the ECP value between
vrblAPos and vrblBPos. A sqlite3 database will be created for the
input list. aANmsA is the array of variable names in the same
position as the numbers in vrblAPos. aANmsB is the array of variable
names in the same order as vrblBPos.
Output: A sqlite3 database will be created for the input list in the
file sqlFl.
"""
conn = sqlite3.connect(sqlFl)
c = conn.cursor()
c.execute \
('''CREATE TABLE records (id TEXT, vrblANm TEXT, vrblBNm TEXT, ECP REAL)''')
lCnt = 0
for vrblAPos,vrblBPos,ECP in lECPVlsAVlsBGlbl:
vrblANm,vrblBNm = aANmsA[vrblAPos],aANmsB[vrblBPos]
lCnt+=1
c.execute('insert into records VALUES (?,?,?,?)', (str(lCnt), \
vrblANm,vrblBNm,float64(ECP)))
# create indexes. Decrease complexity of querying
c.execute("CREATE INDEX index_records on records (id);")
conn.commit()
conn.close()
return 0
########################################################
#~ Read a sqlite3 database for correlations between genes/lncRNAs of
# interest.
########################################################
def rtrnSqlFlECP(sqlFl,srtdVrblANms,srtdVrblBNms,rtrnECPSgnd=False):
"""
Input: sqlFl is a sqlite3 database with the fields id, vrblANm,
vrblBNm, and ECP. srtdVrblANms is a sorted lists of names
present in the field vrblANm. srtdVrblBNms is a sorted lists of
names present in the field vrblBNm. Optionally, rtrnECPSgnd can have
values 'negative' or 'positive', in those cases only 'negative' or
'positive' E
|
alexforencich/verilog-ethernet
|
tb/test_ptp_clock_cdc_64.py
|
Python
|
mit
| 5,957
| 0.000671
|
#!/usr/bin/env python
"""
Copyright (c) 2019 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LI
|
ABLE FOR ANY CLAIM, DAMAGES OR
|
OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import ptp
module = 'ptp_clock_cdc'
testbench = 'test_%s_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
TS_WIDTH = 64
NS_WIDTH = 4
FNS_WIDTH = 16
INPUT_PERIOD_NS = 0x6
INPUT_PERIOD_FNS = 0x6666
OUTPUT_PERIOD_NS = 0x6
OUTPUT_PERIOD_FNS = 0x6666
USE_SAMPLE_CLOCK = 1
LOG_FIFO_DEPTH = 3
LOG_RATE = 3
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_clk = Signal(bool(0))
input_rst = Signal(bool(0))
output_clk = Signal(bool(0))
output_rst = Signal(bool(0))
sample_clk = Signal(bool(0))
input_ts = Signal(intbv(0)[96:])
# Outputs
output_ts = Signal(intbv(0)[96:])
output_ts_step = Signal(bool(0))
output_pps = Signal(bool(0))
# PTP clock
ptp_clock = ptp.PtpClock(period_ns=INPUT_PERIOD_NS, period_fns=INPUT_PERIOD_FNS)
ptp_logic = ptp_clock.create_logic(
input_clk,
input_rst,
ts_64=input_ts
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
input_clk=input_clk,
input_rst=input_rst,
output_clk=output_clk,
output_rst=output_rst,
sample_clk=sample_clk,
input_ts=input_ts,
output_ts=output_ts,
output_ts_step=output_ts_step,
output_pps=output_pps
)
@always(delay(3200))
def clkgen():
clk.next = not clk
input_clk.next = not input_clk
output_clk_hp = Signal(int(3200))
@instance
def clkgen_output():
while True:
yield delay(int(output_clk_hp))
output_clk.next = not output_clk
@always(delay(5000))
def clkgen_sample():
sample_clk.next = not sample_clk
@instance
def check():
yield delay(100000)
yield clk.posedge
rst.next = 1
input_rst.next = 1
output_rst.next = 1
yield clk.posedge
yield clk.posedge
yield clk.posedge
input_rst.next = 0
output_rst.next = 0
yield clk.posedge
yield delay(100000)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: Same clock speed")
current_test.next = 1
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 2: Slightly faster")
current_test.next = 2
output_clk_hp.next = 3100
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 3: Slightly slower")
current_test.next = 3
output_clk_hp.next = 3300
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 4: Significantly faster")
current_test.next = 4
output_clk_hp.next = 2000
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 5: Significantly slower")
current_test.next = 5
output_clk_hp.next = 5000
yield clk.posedge
for i in range(30000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
ezarowny/url-condenser
|
url_condenser/condensed_urls/migrations/0002_condensedurl_visited_count.py
|
Python
|
mit
| 456
| 0
|
# -*- coding: utf-8 -*-
#
|
Generated by Django 1.9.1 on 2016-01-19 06:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('condensed_urls', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='condensedurl',
name='visited_count',
field=models.IntegerField(default=0),
),
]
| |
openstack/nomad
|
cyborg/objects/base.py
|
Python
|
apache-2.0
| 6,515
| 0
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Ver
|
sion 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR
|
CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cyborg common internal object model"""
import netaddr
from oslo_utils import versionutils
from oslo_versionedobjects import base as object_base
from cyborg import objects
from cyborg.objects import fields as object_fields
class CyborgObjectRegistry(object_base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
# NOTE(jroll): blatantly stolen from nova
# NOTE(danms): This is called when an object is registered,
# and is responsible for maintaining cyborg.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
class CyborgObject(object_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'cyborg_object'
OBJ_PROJECT_NAMESPACE = 'cyborg'
fields = {
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
}
def as_dict(self):
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k))
@staticmethod
def _from_db_object(obj, db_obj):
"""Converts a database entity to a formal object.
:param obj: An object of the class.
:param db_obj: A DB model of the object
:return: The object of the class with the database entity added
"""
for field in obj.fields:
obj[field] = db_obj[field]
obj.obj_reset_changes()
return obj
@classmethod
def _from_db_object_list(cls, db_objs, context):
"""Converts a list of database entities to a list of formal objects."""
objs = []
for db_obj in db_objs:
objs.append(cls._from_db_object(cls(context), db_obj))
return objs
class CyborgObjectSerializer(object_base.VersionedObjectSerializer):
# Base class to use for object hydration
OBJ_BASE_CLASS = CyborgObject
CyborgObjectDictCompat = object_base.VersionedObjectDictCompat
class CyborgPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
'deleted_at': object_fields.DateTimeField(nullable=True),
'deleted': object_fields.BooleanField(default=False),
}
class ObjectListBase(object_base.ObjectListBase):
@classmethod
def _obj_primitive_key(cls, field):
return 'cyborg_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=object_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == object_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A CyborgObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, CyborgObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['cyborg_object.changes'] + ignore
else:
keys = ['cyborg_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
class DriverObjectBase(CyborgObject):
@staticmethod
def _from_db_object(obj, db_obj):
fields = obj.fields
fields.pop("updated_at")
fields.pop("created_at")
for field in fields:
obj[field] = db_obj[field]
obj.obj_reset_changes()
return obj
|
pmghalvorsen/gramps_branch
|
gramps/gui/editors/edittaglist.py
|
Python
|
gpl-2.0
| 4,483
| 0.003792
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Tag editing module for Gramps.
"""
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..managedwindow import ManagedWindow
from gramps.gen.const import URL_MANUAL_PAGE
from ..display import display_help
from ..listmodel import ListModel, TOGGLE
#-------------------------------------------------------------------------
#
# Constants
#
#--------
|
-----------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Entering_and_Editing_Data:_Detailed_-_part_3' % \
URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Tags')
#-------------------------------------------------------------------------
#
# EditTagList
#
#-------------------------------------------------------------------------
class EditTagList(M
|
anagedWindow):
"""
Dialog to allow the user to edit a list of tags.
"""
def __init__(self, tag_list, full_list, uistate, track):
"""
Initiate and display the dialog.
"""
ManagedWindow.__init__(self, uistate, track, self)
self.namemodel = None
top = self._create_dialog()
self.set_window(top, None, _('Tag selection'))
for tag in full_list:
self.namemodel.add([tag[0], tag in tag_list, tag[1]])
self.namemodel.connect_model()
# The dialog is modal. We don't want to have several open dialogs of
# this type, since then the user will loose track of which is which.
self.return_list = None
self.show()
while True:
response = self.window.run()
if response == Gtk.ResponseType.HELP:
display_help(webpage=WIKI_HELP_PAGE,
section=WIKI_HELP_SEC)
elif response == Gtk.ResponseType.DELETE_EVENT:
break
else:
if response == Gtk.ResponseType.OK:
self.return_list = [(row[0], row[2])
for row in self.namemodel.model
if row[1]]
self.close()
break
def _create_dialog(self):
"""
Create a dialog box to select tags.
"""
# pylint: disable-msg=E1101
title = _("%(title)s - Gramps") % {'title': _("Edit Tags")}
top = Gtk.Dialog(title)
top.set_default_size(360, 400)
top.set_modal(True)
top.vbox.set_spacing(5)
columns = [('', -1, 300),
(' ', -1, 25, TOGGLE, True, None),
(_('Tag'), -1, 300)]
view = Gtk.TreeView()
self.namemodel = ListModel(view, columns)
slist = Gtk.ScrolledWindow()
slist.add_with_viewport(view)
slist.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
top.vbox.pack_start(slist, 1, 1, 5)
top.add_button(Gtk.STOCK_HELP, Gtk.ResponseType.HELP)
top.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
top.add_button(Gtk.STOCK_OK, Gtk.ResponseType.OK)
top.show_all()
return top
def build_menu_names(self, obj):
"""
Define the menu entry for the ManagedWindows.
"""
return (_("Tag selection"), None)
|
dalejung/trtools
|
trtools/core/tests/test_topper.py
|
Python
|
mit
| 8,499
| 0.004118
|
from unittest import TestCase
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import trtools.core.topper as topper
import imp
imp.reload(topper)
arr = np.random.randn(10000)
s = pd.Series(arr)
df = tm.makeDataFrame()
class TestTopper(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_topn_largest(self):
# get the n largest
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10]
np.testing.assert_almost_equal(bn_res, pd_res)
# change result to biggest to smallest
bn_res = topper.bn_topn(arr, 10, ascending=True)
assert bn_res[-1] == max(arr) # sanity check
pd_res = s.order(ascending=True)[-10:] # grab from end since we reversed
np.testing.assert_almost_equal(bn_res, pd_res)
def test_topn_big_N(self):
"""
When calling topn where N is greater than the number of non-nan values.
This can happen if you're tracking a Frame of returns where not all series start at the same time.
It's possible that in the begining or end, or anytime for that matter, you might not have enough
values. This screws up the logic.
"""
# test data
arr = np.random.randn(100)
arr[5:] = np.nan # only first four are non-na
s = pd.Series(arr)
# top
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10].dropna()
tm.assert_almost_equal(bn_res, pd_res.values)
# bottom
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10].dropna() # grab from end since we reversed
tm.assert_almost_equal(bn_res, pd_res.values)
def test_top_smallest(self):
# get the nsmallest
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# change ordering
bn_res = topper.bn_topn(arr, -10, ascending=False)
assert bn_res[-1] == min(arr) # sanity check
pd_res = s.order(ascending=False)[-10:] # grab from end since we reversed
tm.assert_almost_equal(bn_res, pd_res.values)
def test_top_arg(self):
# get the nlargest
bn_res = topper.bn_topn(arr, 10)
bn_args = topper.bn_topargn(arr, 10)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
# get the nsmallest
bn_res = topper.bn_topn(arr, -10)
bn_args = topper.bn_topargn(arr, -10)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
# get the nsmallest
bn_res = topper.bn_topn(arr, -10, ascending=False)
bn_args = topper.bn_topargn(arr, -10, ascending=False)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
def test_nans(self):
"""
bottleneck.partsort doesn't handle nans. We need to correct for them.
the arg version is trickiers since we need to make sure to
translate back into the nan-filled array
"""
nanarr = np.arange(10).astype(float)
nanarr[nanarr % 2 == 0] = np.nan
test = topper.topn(nanarr, 3)
correct = [9,7,5]
tm.assert_almost_equal(test, correct)
test = topper.topn(nanarr, -3)
correct = [1,3,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, 3)
correct = [9,7,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, -3)
correct = [1,3,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, -3, ascending=False)
correct = [5,3,1]
tm.assert_almost_equal(test, correct)
def test_df_topn(self):
# long way of getting the topn
tops = df.apply(lambda s: s.topn(2, ascending=False), axis=1)
correct = pd.DataFrame(tops, index=df.index)
test = topper.topn_df(df, 2, ascending=False)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order()[-1]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
# bottom 2
tops = df.apply(lambda s: s.topn(-2), axis=1)
correct = pd.DataFrame(tops, index=df.index)
test = topper.topn_df(df, -2)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order()[0]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
def test_df_topindexn(self):
# long way of getting the topindexn
top_pos = df.apply(lambda s: s.topargn(2, ascending=False), axis=1)
correct = df.columns[top_pos.values]
correct = pd.DataFrame(correct, index=df.index)
test = topper.topindexn_df(df, 2, ascending=False)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order().index[-1]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
# bottom 2
top_pos = df.apply(lambda s: s.topargn(-2), axis=1)
correct = df.columns[top_pos.values]
correct = pd.DataFrame(correct, index=df.index)
test = topper.topindexn_df(df, -2)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order().index[0]
t = test.iloc[0][0]
tm.assert_frame_equal(test, correct)
def test_df_topargn(self):
# really this is tested via topindexn indirectly
pass
def test_default_ascending(self):
"""
Changed ascending to change based on N
More intuitive, by default you'd expect the greatest or lowest
value would be first, depending on which side you are looking for
"""
# top should default to asc=False
bn_res = topper.bn_topn(arr, 10)
pd_res = s.order(ascending=False)[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
|
# make sure ascending is still respected
bn_res = topper.bn_topn(arr, 10, ascending=True)
pd_res = s.order(ascending=True)[-10:]
tm.assert_almost_equal(bn_res, pd_res.values)
# bottom defaults asc=True
bn_res = topper.bn_topn(arr, -10)
pd_res = s.order()[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# make sure ascending is still resp
|
ected
bn_res = topper.bn_topn(arr, -10, ascending=False)
pd_res = s.order()[:10][::-1]
tm.assert_almost_equal(bn_res, pd_res.values)
def test_test_ndim(self):
"""
Make sure topn and topargn doesn't accept DataFrame
"""
try:
topper.topn(df, 1)
except:
pass
else:
assert False
try:
topper.topargn(df, 1)
except:
pass
else:
assert False
def test_too_big_n_df(self):
df = pd.DataFrame(np.random.randn(100, 10))
df[df > 0] = np.nan
testdf = topper.topn_df(df, 10)
for x in range(len(df)):
correct = df.iloc[x].order(ascending=False).reset_index(drop=True)
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
testdf = topper.topn_df(df, 2)
for x in range(len(df)):
correct = df.iloc[x].order(ascending=False).reset_index(drop=True)[:2]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
# bottom
testdf = topper.topn_df(df, -2)
for x in range(len(df)):
correct = df.iloc[x].order().reset_index(drop=True)[:2]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
# bottom
testdf = topper.topn_df(df,
|
RTHMaK/RPGOne
|
circleci-demo-python-flask-master/app/auth/views.py
|
Python
|
apache-2.0
| 6,044
| 0
|
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.
|
commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user,
|
token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
|
jjdmol/LOFAR
|
SubSystems/LAPS_CEP/test/startPythonFromMsg.py
|
Python
|
gpl-3.0
| 725
| 0.015172
|
#!/usr/bin/python
import sys
from LAPS.MsgBus.Bus import Bus
# Create queue with a unique name
# insert message
# receive msg
# delete queue
if __name__ == "__main__":
# If invoked directly, parse command line arguments for logger information
|
# and pass the rest to the run() method defined above
# --------------------------------------------------------------------------
try:
unique_queue_name = sys.argv[1]
except:
print "Not enough command line arguments: this test needs a unique queue name"
exit(1)
#msgbus = Bus(broker="lhd002", address=unique_queue_name)
#parset = """
#key=value
#"""
#msgbu
|
s.send(parset,"Observation123456")
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/distutils/cmd.py
|
Python
|
mit
| 19,279
| 0.004201
|
"""distutils.cmd
Provides the Command class, the base class for the command classes
in the distutils.command package.
"""
# This module should be kept compatible with Python 1.5.2.
__revision__ = "$Id: cmd.py,v 1.34 2003/02/20 02:10:08 gvanrossum Exp $"
import sys, os, string, re
from types import *
from distutils.errors import *
from distutils import util, dir_util, file_util, archive_util, dep_util
from distutils import log
class Command:
"""Abstract base class for defining command classes, the "worker bees"
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called "options". The options
are "declared" in 'initialize_options()' and "defined" (given their
final values, aka "finalized") in 'finalize_options()', both of which
must be defined by every command class. The distinction between the
two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed *after* these outside influences have
been processed -- hence 'finalize_options()'. The "body" of the
subroutine, where it does all its work based on the values of its
options, is the 'run()' method, which must also be implemented by every
command class.
"""
# 'sub_commands' formalizes the notion of a "family" of commands,
# eg. "install" as the parent with sub-commands "install_lib",
# "install_headers", etc. The parent of a family of commands
# defines
|
'sub_commands' as a class attribute; it's a list of
# (command_name : string, predicate : unbound_method | string | None)
# tuples, where 'predicate' is a method of the parent command that
# determines whether the corresponding command is applicable in the
# current situation. (Eg. we "install_headers" is only applicable if
# we have any C header files to install.) If 'predicate' is None,
# that command is always applicable.
#
# 'sub_commands
|
' is usually defined at the *end* of a class, because
# predicates can be unbound methods, so they must already have been
# defined. The canonical example is the "install" command.
sub_commands = []
# -- Creation/initialization methods -------------------------------
def __init__ (self, dist):
"""Create and initialize a new Command object. Most importantly,
invokes the 'initialize_options()' method, which is the real
initializer and depends on the actual command being
instantiated.
"""
# late import because of mutual dependence between these classes
from distutils.dist import Distribution
if not isinstance(dist, Distribution):
raise TypeError, "dist must be a Distribution instance"
if self.__class__ is Command:
raise RuntimeError, "Command is an abstract class"
self.distribution = dist
self.initialize_options()
# Per-command versions of the global flags, so that the user can
# customize Distutils' behaviour command-by-command and let some
# commands fallback on the Distribution's behaviour. None means
# "not defined, check self.distribution's copy", while 0 or 1 mean
# false and true (duh). Note that this means figuring out the real
# value of each flag is a touch complicated -- hence "self._dry_run"
# will be handled by __getattr__, below.
# XXX This needs to be fixed.
self._dry_run = None
# verbose is largely ignored, but needs to be set for
# backwards compatibility (I think)?
self.verbose = dist.verbose
# Some commands define a 'self.force' option to ignore file
# timestamps, but methods defined *here* assume that
# 'self.force' exists for all commands. So define it here
# just to be safe.
self.force = None
# The 'help' flag is just used for command-line parsing, so
# none of that complicated bureaucracy is needed.
self.help = 0
# 'finalized' records whether or not 'finalize_options()' has been
# called. 'finalize_options()' itself should not pay attention to
# this flag: it is the business of 'ensure_finalized()', which
# always calls 'finalize_options()', to respect/update it.
self.finalized = 0
# __init__ ()
# XXX A more explicit way to customize dry_run would be better.
def __getattr__ (self, attr):
if attr == 'dry_run':
myval = getattr(self, "_" + attr)
if myval is None:
return getattr(self.distribution, attr)
else:
return myval
else:
raise AttributeError, attr
def ensure_finalized (self):
if not self.finalized:
self.finalize_options()
self.finalized = 1
# Subclasses must define:
# initialize_options()
# provide default values for all options; may be customized by
# setup script, by options from config file(s), or by command-line
# options
# finalize_options()
# decide on the final values for all options; this is called
# after all possible intervention from the outside world
# (command-line, option file, etc.) has been processed
# run()
# run the command: do whatever it is we're here to do,
# controlled by the command's various option values
def initialize_options (self):
"""Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, 'initialize_options()' implementations
are just a bunch of "self.foo = None" assignments.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def finalize_options (self):
"""Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to to code option dependencies: if
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
long as 'foo' still has the same value it was assigned in
'initialize_options()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def dump_options (self, header=None, indent=""):
from distutils.fancy_getopt import longopt_xlate
if header is None:
header = "command options for '%s':" % self.get_command_name()
print indent + header
indent = indent + " "
for (option, _, _) in self.user_options:
option = string.translate(option, longopt_xlate)
if option[-1] == "=":
option = option[:-1]
value = getattr(self, option)
print indent + "%s = %s" % (option, value)
def run (self):
"""A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
'initialize_options()', customized by other commands, the setup
script, the command-line, and config files, and finalized in
'finalize_options()'. All terminal output and filesystem
interaction should be done by 'run()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def announce (self, msg, level=1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
|
evancich/apm_motor
|
modules/waf/waflib/ConfigSet.py
|
Python
|
gpl-3.0
| 8,007
| 0.037842
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
ConfigSet: a special dict
The values put in :py:class:`ConfigSet` must be lists
"""
import copy, re, os
from waflib import Logs, Utils
re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
class ConfigSet(object):
"""
A dict that honor serialization and parent relationships. The serialization format
is human-readable (python-like) and performed by using eval() and repr().
For high performance prefer pickle. Do not store functions as they are not serializable.
The values can be accessed by attributes or by keys::
from waflib.ConfigSet import ConfigSet
env = ConfigSet()
env.FOO = 'test'
env['FOO'] = 'test'
"""
__slots__ = ('table', 'parent')
def __init__(self, filename=None):
self.table = {}
"""
Internal dict holding the object values
"""
#self.parent = None
if filename:
self.load(filename)
def __contains__(self, key):
"""
Enable the *in* syntax::
if 'foo' in env:
print(env['foo'])
"""
if key in self.table: return True
try: return self.parent.__contains__(key)
except AttributeError: return False # parent may not exist
def keys(self):
"""Dict interface (unknown purpose)"""
keys = set()
cur = self
while cur:
keys.update(cur.table.keys())
cur = getattr(cur, 'parent', None)
keys = list(keys)
keys.sort()
return keys
def __str__(self):
"""Text representation of the ConfigSet (for debugging purposes)"""
return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in self.keys()])
def __getitem__(self, key):
"""
Dictionary interface: get value from key::
def configure(conf):
conf.env['foo'] = {}
print(env['foo'])
"""
try:
while 1:
x = self.table.get(key, None)
if not x is None:
return x
self = self.parent
except AttributeError:
return []
def __setitem__(self, key, value):
"""
Dictionary interface: get value from key
"""
self.table[key] = value
def __delitem__(self, key):
"""
Dictionary interface: get value from key
"""
self[key] = []
def __getattr__(self, name):
"""
Attribute access provided for convenience. The following forms are equivalent::
def configure(conf):
conf.env.value
conf.env['value']
"""
if name in self.__slots__:
return object.__getattr__(self, name)
else:
return self[name]
def __setattr__(self, name, value):
"""
Attribute access provided for convenience. The following forms are equivalent::
def configure(conf):
conf.env.value = x
env['value'] = x
"""
if name in self.__slots__:
object.__setattr__(self, name, value)
else:
self[name] = value
def __delattr__(self, name):
"""
Attribute access provided for convenience. The following forms are equivalent::
def configure(conf):
del env.value
del env['value']
"""
if name in self.__slots__:
object.__delattr__(self, name)
else:
del self[name]
def derive(self):
"""
Returns a new ConfigSet deriving from self. The copy returned
will be a shallow copy::
from waflib.ConfigSet import ConfigSet
env = ConfigSet()
env.append_value('CFLAGS', ['-O2'])
child = env.derive()
child.CFLAGS.append('test') # warning! this will modify 'env'
child.CFLAGS = ['-O3'] # new list, ok
child.append_value('CFLAGS', ['-O3']) # ok
Use :py:func:`ConfigSet.detach` to detach the child from the parent.
"""
newenv = ConfigSet()
newenv.parent = self
return newenv
def detach(self):
"""
Detach self from its parent (if existing)
Modifying the parent :py:class:`ConfigSet` will not change the current object
Modifying this :py:class:`ConfigSet` will not modify the parent one.
"""
tbl = self.get_merged_dict()
try:
delattr(self, 'parent')
except AttributeError:
pass
else:
keys = tbl.keys()
for x in keys:
tbl[x] = copy.deepcopy(tbl[x])
self.table = tbl
return self
def get_flat(self, key):
"""
Return a value as a string. If the input is a list, the value returned is space-separated.
:param key: key to use
:type key: string
"""
s = self[key]
if isinstance(s, str): return s
return ' '.join(s)
def _get_list_value_for_modification(self, key):
"""
Return a list value for further modification.
The list may be modified inplace and there is no need to do this afterwards::
self.table[var] = value
"""
try:
value = self.table[key]
except KeyError:
try: value = self.parent[key]
except AttributeError: value = []
if isinstance(value, list):
value = value[:]
else:
value = [value]
else:
if not isinstance(value, list):
value = [value]
self.table[key] = value
return value
def append_value(self, var, val):
"""
Appends a value to the specified config key::
def build(bld):
bld.env.append_value('CFLAGS', ['-O2'])
The value must be a list or a tuple
"""
if isinstance(val, str): # if there were string everywhere we could optimize this
val = [val]
current_value = self._get_list_value_for_modification(var)
current_value.extend(val)
def prepend_value(self, var, val):
"""
Prepends a value to the specified item::
def configure(conf):
conf.env.prepend_value('CFLAGS', ['-O2'])
The value must be a list or a tuple
"""
if isinstance(val, str):
val = [val]
self.table[var] = val + self._get_list_value_for_modification(var)
def append_unique(self, var, val):
"""
Append a value to the specified item only if it's not already present::
def build(bld):
bld.env.append_unique('CFLAGS', ['-O2', '-g'])
The value must be a list or a tuple
"""
if isinstance(val, str):
val = [val]
current_value = self._get_list_value_for_modification(var)
for x in val:
if x not in current_value:
current_value.append(x)
def get_merged_dict(self):
"""
Compute the merged dictionary from the fusion of self and all its parent
:rtype: a ConfigSet object
"""
table_list = []
env = self
while 1:
table_list.insert(0, env.table)
try: env = env.parent
except AttributeError: break
merged_table = {}
for table in table_list:
merged_table.update(table)
return merged_table
def store(self, filename):
"""
Write the :py:class:`ConfigSet` data into a file. See :py:meth:`ConfigSet.load` for reading such files.
:param filename: file to use
:type filename: string
"""
try:
os.makedirs(os.path.split(filename)[0])
except OSError:
pass
buf = []
merged_table = self.get_merged_dict()
keys = list(merged_table.keys())
keys.sort()
try:
fun = ascii
except NameError:
fun = repr
for k in keys:
if k != 'undo_stack':
buf.append('%s = %s\n' % (k, fun(merged_table[k])))
Utils.writef(filename, ''.join(buf))
def load(self, filename):
"""
Retrieve the :py:class:`ConfigSet` data from a file. See :py:meth:`ConfigSet.store` for writing such files
:param filename: file to use
:type filename: string
"""
tbl = self.table
code = Utils.readf(filename, m='rU')
for m in re_imp.finditer(code):
g = m.group
tbl[g(2)] = eval(g(3))
Logs.debug('env: %s' % str(self.table))
def update(self, d):
"""
Dictionary interface: replace values from another dict
:param d: object to use the value from
:type d: dict-like object
"""
for k, v in d.items():
self[k] = v
def stash(self):
"""
Store the object sta
|
te, to provide a kind of transaction support::
env = ConfigSet()
env.stash()
try:
env.append_value('CFLAGS', '-O3')
call_some_method(env)
finally:
env.revert()
The history is kept in a stack, and is lost during the serialization by :py:meth:`ConfigSet.store`
"""
orig = self.table
tbl = self.table = self.table.copy()
for x in tbl.keys():
tbl[x] = copy.deepcopy(tbl[x])
self.undo_stack = self.undo_stack +
|
[orig]
def revert(self):
"""
Reverts the object to a previous state. See :py:meth:`ConfigSet.stash`
"""
self.table = self.undo_stack.pop(-1)
|
TeamEOS/external_chromium_org
|
tools/perf/benchmarks/scheduler.py
|
Python
|
bsd-3-clause
| 1,116
| 0.006272
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from measurement
|
s import smoothness
import page_sets
@test.Disabled('linux') # crbug.com/368767
class SchedulerToughSchedulingCases(test.Test):
"""Measures rendering statistics while interacting with pages that have
challenging scheduling properties.
https://docs.google.com/a/chromium.org/document/d/
17yhE5Po9By0sCdM1yZT3LiUECaUr_94rQt9j-4tOQIM/view"""
test = smoothness.Smoothness
page_se
|
t = page_sets.ToughSchedulingCasesPageSet
# Pepper plugin is not supported on android.
@test.Disabled('android', 'win') # crbug.com/384733
class SchedulerToughPepperCases(test.Test):
"""Measures rendering statistics while interacting with pages that have
pepper plugins"""
test = smoothness.Smoothness
page_set = page_sets.ToughPepperCasesPageSet
def CustomizeBrowserOptions(self, options):
# This is needed for testing pepper plugin.
options.AppendExtraBrowserArgs('--enable-pepper-testing')
|
UltracoldAtomsLab/labhardware
|
projects/gwmultilog/quickplot.py
|
Python
|
mit
| 1,511
| 0.023825
|
import numpy as np
import pylab as pl
import sys
sys.path.append('../../lablib')
import ourgui
def smoothList(list,strippedXs=False,degree=10):
if strippedXs==True: return Xs[0:-(len(list)-(len(list)-degree+1))]
smoothed=[0]*(len(list)-degree+1)
for i in range(len(smoothed)):
smoothed[i]=sum(list[i:i+degree])/float(degree)
return smoothed
def smoothListGaussian(list,strippedXs=False,degree=5):
window=degree*2-1
weight=np.array([1.0]*window)
weightGauss=[]
for i in ran
|
ge(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(np.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=np.array(weightGauss)*weight
smoothed=[0.0]*(len(list)-window)
for i in range(len(smoothed)):
smoothed[i]=sum(np.array(list[i:i+window])*weight)/sum(weight)
return smoothed
filename = ourgui.openFile()
dtypes = {'names': ['date', 'value', 'unit'],
'formats': ['f8', 'f4', 'S1']}
data = np.
|
loadtxt(filename,
delimiter=",",
dtype=dtypes,
)
date = data['date']
date -= date[0]
scale = 60
date /= scale
pl.plot(date, data['value'], '.')
degree = 200
sdataG = smoothListGaussian(data['value'], degree=degree)
sdateG = date[degree:(-degree+1)]
sdata = smoothList(data['value'], degree=degree)
sdate = date[degree/2:-degree/2+1]
pl.plot(sdate, sdata, 'g-')
pl.plot(sdateG, sdataG, 'r-')
pl.xlabel("time (min)")
pl.ylabel("thermistor resistance")
pl.show()
|
ioram7/keystone-federado-pgid2013
|
build/passlib/passlib/handlers/bcrypt.py
|
Python
|
apache-2.0
| 13,180
| 0.003794
|
"""passlib.bcrypt -- implementation of OpenBSD's BCrypt algorithm.
TODO:
* support 2x and altered-2a hashes?
http://www.openwall.com/lists/oss-security/2011/06/27/9
* deal with lack of PY3-compatibile c-ext implementation
"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement, absolute_import
# core
import os
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
try:
from bcrypt import hashpw as pybcrypt_hashpw
except ImportError: # pragma: no cover
pybcrypt_hashpw = None
try:
from bcryptor.engine import Engine as bcryptor_engine
except ImportError: # pragma: no cover
bcryptor_engine = None
# pkg
from passlib.exc import PasslibHashWarning
from passlib.utils import bcrypt64, safe_crypt, repeat_string, \
classproperty, rng, getrandstr, test_crypt
from passlib.utils.compat import bytes, b, u, uascii_to_str, unicode, str_to_uascii
import passlib.utils.handlers as uh
# local
__all__ = [
"bcrypt",
]
#=============================================================================
# support funcs & constants
#=============================================================================
_builtin_bcrypt = None
def _load_builtin():
global _builtin_bcrypt
if _builtin_bcrypt is None:
from passlib.utils._blowfish import raw_bcrypt as _builtin_bcrypt
IDENT_2 = u("$2$")
IDENT_2A = u("$2a$")
IDENT_2X = u("$2x$")
IDENT_2Y = u("$2y$")
_BNULL = b('\x00')
#=============================================================================
# handler
#=============================================================================
class bcrypt(uh.HasManyIdents, uh.HasRounds, uh.HasSalt, uh.HasManyBackends, uh.GenericHandler):
"""This class implements the BCrypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 22 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 12, must be between 4 and 31, inclusive.
This value is logarithmic, the actual number of iterations used will be :samp:`2**{rounds}`
-- increasing the rounds by +1 will double the amount of time taken.
:type ident: str
:param ident:
Specifies which version of the BCrypt algorithm will be used when creating a new hash.
Typically this option is not needed, as the default (``"2a"``) is usually the correct choice.
If specified, it must be one of the following:
* ``"2"`` - the first revision of BCrypt, which suffers from a minor security flaw and is generally not used anymore.
* ``"2a"`` - latest revision of the official BCrypt algorithm, and the current default.
* ``"2y"`` - format specific to the *crypt_blowfish* BCrypt implementation,
identical to ``"2a"`` in all but name.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
This class now supports ``"2y"`` hashes, and recognizes
(but does not support) the broken ``"2x"`` hashes.
(see the :ref:`crypt_blowfish bug <crypt-blowfish-bug>`
for details).
.. versionchanged:: 1.6
Added a pure-python backend.
"""
#============================================
|
=======================
# class attrs
#===================================================================
#--GenericHandler--
name = "bcrypt"
setting_kwds = ("salt", "rounds", "ident")
checksum_size = 31
chec
|
ksum_chars = bcrypt64.charmap
#--HasManyIdents--
default_ident = u("$2a$")
ident_values = (u("$2$"), IDENT_2A, IDENT_2X, IDENT_2Y)
ident_aliases = {u("2"): u("$2$"), u("2a"): IDENT_2A, u("2y"): IDENT_2Y}
#--HasSalt--
min_salt_size = max_salt_size = 22
salt_chars = bcrypt64.charmap
# NOTE: 22nd salt char must be in bcrypt64._padinfo2[1], not full charmap
#--HasRounds--
default_rounds = 12 # current passlib default
min_rounds = 4 # bcrypt spec specified minimum
max_rounds = 31 # 32-bit integer limit (since real_rounds=1<<rounds)
rounds_cost = "log2"
#===================================================================
# formatting
#===================================================================
@classmethod
def from_string(cls, hash):
ident, tail = cls._parse_ident(hash)
if ident == IDENT_2X:
raise ValueError("crypt_blowfish's buggy '2x' hashes are not "
"currently supported")
rounds_str, data = tail.split(u("$"))
rounds = int(rounds_str)
if rounds_str != u('%02d') % (rounds,):
raise uh.exc.MalformedHashError(cls, "malformed cost field")
salt, chk = data[:22], data[22:]
return cls(
rounds=rounds,
salt=salt,
checksum=chk or None,
ident=ident,
)
def to_string(self):
hash = u("%s%02d$%s%s") % (self.ident, self.rounds, self.salt,
self.checksum or u(''))
return uascii_to_str(hash)
def _get_config(self, ident=None):
"internal helper to prepare config string for backends"
if ident is None:
ident = self.ident
if ident == IDENT_2Y:
ident = IDENT_2A
else:
assert ident != IDENT_2X
config = u("%s%02d$%s") % (ident, self.rounds, self.salt)
return uascii_to_str(config)
#===================================================================
# specialized salt generation - fixes passlib issue 25
#===================================================================
@classmethod
def _bind_needs_update(cls, **settings):
return cls._needs_update
@classmethod
def _needs_update(cls, hash, secret):
if isinstance(hash, bytes):
hash = hash.decode("ascii")
# check for incorrect padding bits (passlib issue 25)
if hash.startswith(IDENT_2A) and hash[28] not in bcrypt64._padinfo2[1]:
return True
# TODO: try to detect incorrect $2x$ hashes using *secret*
return False
@classmethod
def normhash(cls, hash):
"helper to normalize hash, correcting any bcrypt padding bits"
if cls.identify(hash):
return cls.from_string(hash).to_string()
else:
return hash
def _generate_salt(self, salt_size):
# override to correct generate salt bits
salt = super(bcrypt, self)._generate_salt(salt_size)
return bcrypt64.repair_unused(salt)
def _norm_salt(self, salt, **kwds):
salt = super(bcrypt, self)._norm_salt(salt, **kwds)
assert salt is not None, "HasSalt didn't generate new salt!"
changed, salt = bcrypt64.check_repair_unused(salt)
if changed:
# FIXME: if salt was provided by user, this message won't be
# correct. not sure if we want to throw error, or use different warning.
warn(
"encountered a bcrypt salt with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; see Passlib 1.5.3 changelog
|
openregister/food-premises-demo
|
fsa_approved_premises/frontend/views.py
|
Python
|
mit
| 4,293
| 0.002795
|
import collections
import jinja2
import requests
from flask import (
current_app,
Blueprint,
render_template,
request,
jsonify,
abort
)
frontend = Blueprint('frontend', __name__, template_folder='templates')
headers = {"Content-type": "application/json"}
@jinja2.contextfilter
@frontend.app_template_filter()
def format_link(context, value):
items = value.split(':')
register = current_app.config['POAO_SECTION_REGISTER']
return "<a href='%s/products-of-animal-origin-section/%s'>%s</a> %s" % (register, items[0],items[0],items[1])
@frontend.route('/')
def index():
premises_url = current_app.config['PREMISES_REGISTER']
url = "%s/search?_representation=json" % premises_url
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
abort(resp.status_code)
return render_template('index.html', data=resp.json())
@frontend.route('/search')
def search():
query = request.args.get('query', '')
page = request.args.get('page', 0)
premises_url = current_app.config['PREMISES_REGISTER']
url = "%s/search?_query=%s&_page=%s&_representation=json" % (premises_url, query, page)
|
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
abort(resp.status_code)
current_app.logger.info(resp.json())
return jsonify(resp.json())
@frontend.route('/premises/<int:id>')
def premises(id):
premises_register = current_app.config['PREMISES_REGISTER']
|
poao_premises_register = current_app.config['POAO_PREMISES_REGISTER']
address_register = current_app.config['ADDRESS_REGISTER']
food_category_register = current_app.config['FOOD_ESTABLISHMENT_CATEGORY_REGISTER']
try:
premises_url = '%s/premises/%d.json' % (premises_register, id)
resp = requests.get(premises_url, headers=headers)
resp.raise_for_status()
premises = resp.json()
poao_premises_url = '%s/premises/%d.json' % (poao_premises_register, id)
resp = requests.get(poao_premises_url, headers=headers)
resp.raise_for_status()
poao_premises = resp.json()
category_details = _get_category_details(poao_premises)
address_url = '%s/address/%d.json' % (address_register, id)
resp = requests.get(address_url, headers=headers)
resp.raise_for_status()
address = resp.json()
except requests.exceptions.HTTPError as e:
current_app.logger.info(e)
abort(resp.status_code)
return render_template('premises.html',
poao_premises_register=poao_premises_register,
premises=premises, poao_premises=poao_premises,
address=address,
category_details=category_details,
food_category_register=food_category_register)
Category = collections.namedtuple('Category', 'category_key, section_name, activity_name')
# This sort of stuff is a mess.
def _get_category_details(premises):
category_details = []
try:
for category in premises['entry']['food-establishment-categories']:
section_key, activity_key = category.split(':')
section_url = "%s/products-of-animal-origin-section/%s.json" % (current_app.config['POAO_SECTION_REGISTER'], section_key)
activity_url = "%s/products-of-animal-origin-activity/%s.json" % (current_app.config['POAO_ACTIVITY_REGISTER'], activity_key)
section_resp = requests.get(section_url, headers=headers)
activity_resp = requests.get(activity_url, headers=headers)
section_resp.raise_for_status()
activity_resp.raise_for_status()
section = section_resp.json()['entry']
activity = activity_resp.json()['entry']
category = Category(category_key=category,
section_name=section['name'],
activity_name=activity['name'])
category_details.append(category)
current_app.logger.info(category_details)
except requests.exceptions.HTTPError as e:
current_app.logger.info(e)
current_app.logger.info('Not much we can do at this point but return empty category_details')
return category_details
|
kylebegovich/ProjectEuler
|
Python/Progress/Problem719.py
|
Python
|
gpl-3.0
| 1,757
| 0.007968
|
from math import log10, floor
# N = 12
N = 101
# N = 1000001
def n_squares(n):
return [i**2 for i in range(2, n)]
# print(n_squares(11))
# print(n_squares(100))
##### This block from stackoverflow:
# https://stackoverflow.com/questions/37023774/all-ways-to-partition-a-string
import itertools
memo = {}
def multiSlice(s,cutpoints):
k = len(cutpoints)
if k == 0:
return [s]
else:
multislices = [s[:cutpoints[0]]]
multislices.extend(s[cutpoints[i]:cutpoints[i+1]] for i in range(k-1))
multislices.append(s[cutpoints[k-1]:])
return multislices
|
def allPartitions(s):
# if s in memo:
# return memo[s]
n = len(s)
cuts = list(range(1,n))
for k in range(1, n):
for cutpoints in itertools.combinations(cuts,k):
yield multiSlice(s,cutpoints)
##### En
|
d block
# print(list(allPartitions([int(i) for i in str(1234)])))
def list_sum(num_list):
outer_sum = 0
for sub_list in num_list:
inner_sum = 0
power = 1
for digit in sub_list[::-1]:
inner_sum += power * digit
power *= 10
outer_sum += inner_sum
return outer_sum
# print(list_sum([[1, 2], [3, 4]]))
# print(list_sum([[1, 2, 3, 4]]))
# print(list_sum([[1], [2], [3], [4]]))
def is_s_num(num):
sqrt = num**0.5
for part in allPartitions([int(i) for i in str(num)]):
if sqrt == list_sum(part):
return True
return False
# print(81, is_s_num(81))
# print(64, is_s_num(64))
# print(8281, is_s_num(8281))
# print(9801, is_s_num(9801))
def T(N):
squares = n_squares(N)
sum = 0
for n in squares:
if is_s_num(n):
print(n, "is true")
sum += n
return sum
print(T(N))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.