repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ActiveState/code
|
recipes/Python/436834_Yet_another_Design_Contract_module/recipe-436834.py
|
1
|
16029
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
###############################################################################
#
# Yet another invariant/pre-/postcondition design-by-contract support module.
#
# Written by Dmitry Dvoinikov <dmitry@targeted.org>
# Distributed under MIT license.
#
# The latest version, complete with self-tests can be downloaded from:
# http://www.targeted.org/python/recipes/ipdbc.py
#
# Sample usage:
#
# import ipdbc.py
#
# class Balloon(ContractBase): # demonstrates class invariant
# def invariant(self):
# return 0 <= self.weight < 1000 # returns True/False
# def __init__(self):
# self.weight = 0
# def fails(self): # upon return this throws PostInvariantViolationError
# self.weight = 1000
#
# class GuidedBalloon(Balloon): # demonstrates pre/post condition
# def pre_drop(self, _weight): # pre_ receives exact copy of arguments
# return self.weight >= _weight # returns True/False
# def drop(self, _weight):
# self.weight -= _weight;
# return self.weight # the result of the call is passed
# def post_drop(self, result, _weight): # as a second parameter to post_
# return result >= 0 # followed again by copy of arguments
#
# Note: GuidedBalloon().fails() still fails, since Balloon's invariant is
# inherited.
# Note: All the dbc infused methods are inherited in the mro-correct way.
# Note: Neither classmethods nor staticmethods are decorated, only "regular"
# instance-bound methods.
#
# (c) 2005, 2006 Dmitry Dvoinikov <dmitry@targeted.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__all__ = ["ContractBase", "ContractViolationError", "InvariantViolationError",
"PreInvariantViolationError", "PostInvariantViolationError",
"PreConditionViolationError", "PostConditionViolationError",
"PreconditionViolationError", "PostconditionViolationError" ]
CONTRACT_CHECKS_ENABLED = True # allows to turn contract checks off when needed
###############################################################################
class ContractViolationError(AssertionError): pass
class InvariantViolationError(ContractViolationError): pass
class PreInvariantViolationError(InvariantViolationError): pass
class PostInvariantViolationError(InvariantViolationError): pass
class PreConditionViolationError(ContractViolationError): pass
PreconditionViolationError = PreConditionViolationError # pep 316 calls it such
class PostConditionViolationError(ContractViolationError): pass
PostconditionViolationError = PostConditionViolationError # pep 316 calls it such
###############################################################################
from types import FunctionType
from sys import hexversion
have_python_24 = hexversion >= 0x2040000
################################################################################
def any(s, f = lambda e: bool(e)):
for e in s:
if f(e):
return True
else:
return False
################################################################################
def none(s, f = lambda e: bool(e)):
return not any(s, f)
################################################################################
def empty(s):
return len(s) == 0
################################################################################
def pick_first(s, f = lambda e: bool(e)):
for e in s:
if f(e):
return e
else:
return None
################################################################################
if not have_python_24:
def reversed(s):
r = list(s)
r.reverse()
return r
################################################################################
def merged_mro(*classes):
"""
Returns list of all classes' bases merged and mro-correctly ordered,
implemented as per http://www.python.org/2.3/mro.html
"""
if any(classes, lambda c: not isinstance(c, type)):
raise TypeError("merged_mro expects all it's parameters to be classes, got %s" %
pick_first(classes, lambda c: not isinstance(c, type)))
def merge(lists):
result = []
lists = [ (list_[0], list_[1:]) for list_ in lists ]
while not empty(lists):
good_head, tail = pick_first(lists, lambda ht1: none(lists, lambda ht2: ht1[0] in ht2[1])) or (None, None)
if good_head is None:
raise TypeError("Cannot create a consistent method resolution "
"order (MRO) for bases %s" %
", ".join([ cls.__name__ for cls in classes ]))
result += [ good_head ]
i = 0
while i < len(lists):
head, tail = lists[i]
if head == good_head:
if empty(tail):
del(lists[i])
else:
lists[i] = ( tail[0], tail[1:] )
i += 1
else:
i += 1
return result
merged = [ cls.mro() for cls in classes ] + [ list(classes) ]
return merge(merged)
###############################################################################
class ContractFactory(type):
def _wrap(_method, preinvariant, precondition, postcondition, postinvariant,
_classname, _methodname):
def preinvariant_check(result):
if not result:
raise PreInvariantViolationError(
"Class invariant does not hold before a call to %s.%s"
% (_classname, _methodname))
def precondition_check(result):
if not result:
raise PreConditionViolationError(
"Precondition failed before a call to %s.%s"
% (_classname, _methodname))
def postcondition_check(result):
if not result:
raise PostConditionViolationError(
"Postcondition failed after a call to %s.%s"
% (_classname, _methodname))
def postinvariant_check(result):
if not result:
raise PostInvariantViolationError(
"Class invariant does not hold after a call to %s.%s"
% (_classname, _methodname))
if preinvariant is not None and precondition is not None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is not None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is not None and precondition is not None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is not None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
return result
elif preinvariant is not None and precondition is None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is not None and precondition is None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is not None and precondition is None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
preinvariant_check(preinvariant(self))
result = _method(self, *args, **kwargs)
return result
elif preinvariant is None and precondition is not None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is not None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is None and precondition is not None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is not None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
precondition_check(precondition(self, *args, **kwargs))
result = _method(self, *args, **kwargs)
return result
elif preinvariant is None and precondition is None \
and postcondition is not None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is None \
and postcondition is not None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
postcondition_check(postcondition(self, result, *args, **kwargs))
return result
elif preinvariant is None and precondition is None \
and postcondition is None and postinvariant is not None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
postinvariant_check(postinvariant(self))
return result
elif preinvariant is None and precondition is None \
and postcondition is None and postinvariant is None:
def dbc_wrapper(self, *args, **kwargs):
result = _method(self, *args, **kwargs)
return result
if have_python_24:
dbc_wrapper.__name__ = _methodname
return dbc_wrapper
_wrap = staticmethod(_wrap)
def __new__(_class, _name, _bases, _dict):
# because the mro for the class being created is not yet available
# we'll have to build it by hand using our own mro implementation
mro = merged_mro(*_bases) # the lack of _class itself in mro is compensated ...
dict_with_bases = {}
for base in reversed(mro):
if hasattr(base, "__dict__"):
dict_with_bases.update(base.__dict__)
dict_with_bases.update(_dict) # ... here by explicitly adding it's method last
try:
invariant = dict_with_bases["invariant"]
except KeyError:
invariant = None
for name, target in dict_with_bases.iteritems():
if isinstance(target, FunctionType) and name != "__del__" and name != "invariant" \
and not name.startswith("pre_") and not name.startswith("post_"):
try:
pre = dict_with_bases["pre_%s" % name]
except KeyError:
pre = None
try:
post = dict_with_bases["post_%s" % name]
except KeyError:
post = None
# note that __del__ is not checked at all
_dict[name] = ContractFactory._wrap(target,
name != "__init__" and invariant or None,
pre or None, post or None, invariant or None,
_name, name)
return super(ContractFactory, _class).__new__(_class, _name, _bases, _dict)
class ContractBase(object):
if CONTRACT_CHECKS_ENABLED:
__metaclass__ = ContractFactory
###############################################################################
|
mit
| -315,259,735,851,058,900
| 43.401662
| 118
| 0.558924
| false
| 4.526687
| false
| false
| false
|
RobLoach/lutris
|
lutris/util/display.py
|
1
|
3005
|
import subprocess
from lutris.util.log import logger
def get_vidmodes():
xrandr_output = subprocess.Popen(["xrandr"],
stdout=subprocess.PIPE).communicate()[0]
return list([line for line in xrandr_output.decode().split("\n")])
def get_outputs():
"""Return list of tuples containing output name and geometry."""
outputs = []
vid_modes = get_vidmodes()
if not vid_modes:
logger.error("xrandr didn't return anything")
return []
for line in vid_modes:
parts = line.split()
if len(parts) < 2:
continue
if parts[1] == 'connected':
if len(parts) == 2:
continue
geom = parts[2] if parts[2] != 'primary' else parts[3]
if geom.startswith('('): # Screen turned off, no geometry
continue
outputs.append((parts[0], geom))
return outputs
def get_output_names():
return [output[0] for output in get_outputs()]
def turn_off_except(display):
for output in get_outputs():
if output[0] != display:
subprocess.Popen(["xrandr", "--output", output[0], "--off"])
def get_resolutions():
"""Return the list of supported screen resolutions."""
resolution_list = []
for line in get_vidmodes():
if line.startswith(" "):
resolution_list.append(line.split()[0])
return resolution_list
def get_current_resolution(monitor=0):
"""Return the current resolution for the desktop."""
resolution = list()
for line in get_vidmodes():
if line.startswith(" ") and "*" in line:
resolution.append(line.split()[0])
if monitor == 'all':
return resolution
else:
return resolution[monitor]
def change_resolution(resolution):
"""Change display resolution.
Takes a string for single monitors or a list of displays as returned
by get_outputs().
"""
if not resolution:
logger.warning("No resolution provided")
return
if isinstance(resolution, str):
logger.debug("Switching resolution to %s", resolution)
if resolution not in get_resolutions():
logger.warning("Resolution %s doesn't exist." % resolution)
else:
subprocess.Popen(["xrandr", "-s", resolution])
else:
for display in resolution:
display_name = display[0]
logger.debug("Switching to %s on %s", display[1], display[0])
display_geom = display[1].split('+')
display_resolution = display_geom[0]
position = (display_geom[1], display_geom[2])
subprocess.Popen([
"xrandr",
"--output", display_name,
"--mode", display_resolution,
"--pos", "{}x{}".format(position[0], position[1])
]).communicate()
def restore_gamma():
"""Restores gamma to a normal level."""
subprocess.Popen(["xgamma", "-gamma", "1.0"])
|
gpl-3.0
| 8,808,335,972,586,504,000
| 29.663265
| 77
| 0.575707
| false
| 4.214586
| false
| false
| false
|
ctsit/redi-dropper-client
|
app/redidropper/database/crud_mixin.py
|
1
|
1438
|
"""
Goal: simplify the code when interacting with entities
Usage when declaring a model:
import db
class MyEntity(db.Model, CRUDMixin):
id = db.Column('myID', db.Integer, primary_key=True)
data = db.Column('myData', db.String(255))
MyTableEntity.create(data="abc")
my = MyTableEntity(data="abc")
db.session.save(my, commit=False)
found = MyTableEntity.get_by_id(1) is not None
"""
from redidropper.main import db
class CRUDMixin(object):
""" Helper class flask-sqlalchemy entities """
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, id):
if any(
(isinstance(id, basestring) and id.isdigit(),
isinstance(id, (int, float))),
):
return cls.query.get(int(id))
return None
@classmethod
def create(cls, **kwargs):
""" Helper for session.add() + session.commit() """
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return self.save() if commit else self
def save(self, commit=True):
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
db.session.delete(self)
return commit and db.session.commit()
|
bsd-3-clause
| -703,312,452,427,858,200
| 25.145455
| 59
| 0.616134
| false
| 3.640506
| false
| false
| false
|
mesosphere/mesos-hydra
|
mrun.py
|
1
|
8006
|
#!/usr/bin/env python
import mesos
import mesos_pb2
import os
import logging
import re
import sys
import time
import math
import threading
import socket
import time
import tempfile
from optparse import OptionParser
from subprocess import *
def printOutput(p):
for line in p.stdout:
print line,
def startMPIExec(procs, slaves, program):
os.symlink(os.getcwd() + '/export', work_dir + "/export")
os.chdir(work_dir)
hosts = ",".join(slaves)
cmd = ["./export/bin/mpiexec.hydra", "-genv", "LD_LIBRARY_PATH", work_dir + "/libs", "-launcher", "manual", "-n", str(procs), "-hosts", str(hosts)]
cmd.extend(program)
p = Popen(cmd, stdout=PIPE)
proxy_args = []
while True:
line = p.stdout.readline()
if line == 'HYDRA_LAUNCH_END\n':
break
proxy_args.append(line)
# Print rest MPI output.
t = threading.Thread(target=printOutput, args=([p]))
t.start()
return proxy_args
def finalizeSlaves(callbacks):
time.sleep(1)
logging.info("Finalize slaves")
hosts = []
for slave in callbacks:
hosts.append(slave[0])
proxy_args = startMPIExec(total_procs, hosts, mpi_program)
proxy_id = 0
for slave in callbacks:
chost = slave[0]
cport = int(slave[1])
proxy_arg = proxy_args[proxy_id]
proxy_id += 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((chost, cport))
request = work_dir + ";" + proxy_arg
s.send(request)
s.close()
# TODO(nnielsen): Add retry logic; slave might not be listening yet.
logging.info("Done finalizing slaves")
class HydraScheduler(mesos.Scheduler):
def __init__(self, options):
self.proxiesLaunched = 0
self.proxiesRunning = 0
self.proxiesFinished = 0
self.options = options
self.startedExec = False
self.slaves = set()
self.callbacks = []
self.finalizeTriggered = False
def registered(self, driver, fid, masterInfo):
logging.info("Registered with framework ID %s" % fid.value)
def resourceOffers(self, driver, offers):
for offer in offers:
if self.proxiesLaunched == total_nodes:
driver.declineOffer(offer.id)
continue
cpus = 0
mem = 0
tasks = []
if offer.hostname in self.slaves:
logging.info("Declining offer: offer from slave already scheduled")
for resource in offer.resources:
if resource.name == "cpus":
cpus = resource.scalar.value
elif resource.name == "mem":
mem = resource.scalar.value
elif resource.name == "ports":
port = resource.ranges.range[0].begin
if cpus < cores_per_node or mem < mem_per_node:
logging.info("Declining offer due to too few resources")
driver.declineOffer(offer.id)
else:
tid = self.proxiesLaunched
self.proxiesLaunched += 1
logging.info("Launching proxy on offer %s from %s" % (offer.id, offer.hostname))
task = mesos_pb2.TaskInfo()
task.task_id.value = str(tid)
task.slave_id.value = offer.slave_id.value
task.name = "task %d " % tid
cpus = task.resources.add()
cpus.name = "cpus"
cpus.type = mesos_pb2.Value.SCALAR
cpus.scalar.value = cores_per_node
mem = task.resources.add()
mem.name = "mem"
mem.type = mesos_pb2.Value.SCALAR
mem.scalar.value = mem_per_node
ports = task.resources.add()
ports.name = "ports"
ports.type = mesos_pb2.Value.RANGES
r = ports.ranges.range.add()
r.begin = port
r.end = port
lib = task.command.environment.variables.add()
lib.name = "LD_LIBRARY_PATH"
lib.value = work_dir + "/libs"
hydra_uri = task.command.uris.add()
hydra_uri.value = "hdfs://" + name_node + "/hydra/hydra.tgz"
executable_uri = task.command.uris.add()
executable_uri.value = "hdfs://" + name_node + "/hydra/" + mpi_program[0]
task.command.value = "python hydra-proxy.py %d" % port
tasks.append(task)
logging.info("Replying to offer: launching proxy %d on host %s" % (tid, offer.hostname))
logging.info("Call-back at %s:%d" % (offer.hostname, port))
self.callbacks.append([offer.hostname, port])
self.slaves.add(offer.hostname)
driver.launchTasks(offer.id, tasks)
def statusUpdate(self, driver, update):
if (update.state == mesos_pb2.TASK_FAILED or
update.state == mesos_pb2.TASK_KILLED or
update.state == mesos_pb2.TASK_LOST):
logging.error("A task finished unexpectedly: " + update.message)
driver.stop()
if (update.state == mesos_pb2.TASK_RUNNING):
self.proxiesRunning += 1
# Trigger real launch when threshold is met.
if self.proxiesRunning >= total_nodes and not self.finalizeTriggered:
self.finalizeTriggered = True
threading.Thread(target = finalizeSlaves, args = ([self.callbacks])).start()
if (update.state == mesos_pb2.TASK_FINISHED):
self.proxiesFinished += 1
if self.proxiesFinished == total_nodes:
logging.info("All processes done, exiting")
driver.stop()
def offerRescinded(self, driver, offer_id):
logging.info("Offer %s rescinded" % offer_id)
if __name__ == "__main__":
parser = OptionParser(usage="Usage: %prog [options] mesos_master mpi_program")
parser.disable_interspersed_args()
parser.add_option("-N", "--nodes",
help="number of nodes to run processes (default 1)",
dest="nodes", type="int", default=1)
parser.add_option("-n", "--num",
help="total number of MPI processes (default 1)",
dest="procs", type="int", default=1)
parser.add_option("-c", "--cpus-per-task",
help="number of cores per MPI process (default 1)",
dest="cores", type="int", default=1)
parser.add_option("-m","--mem",
help="number of MB of memory per MPI process (default 1GB)",
dest="mem", type="int", default=1024)
parser.add_option("--proxy",
help="url to proxy binary", dest="proxy", type="string")
parser.add_option("--name",
help="framework name", dest="name", type="string")
parser.add_option("--hdfs",
help="HDFS Name node", dest="name_node", type="string")
parser.add_option("-p","--path",
help="path to look for MPICH2 binaries (mpiexec)",
dest="path", type="string", default="")
parser.add_option("-v", action="store_true", dest="verbose")
# Add options to configure cpus and mem.
(options,args) = parser.parse_args()
if len(args) < 2:
print >> sys.stderr, "At least two parameters required."
print >> sys.stderr, "Use --help to show usage."
exit(2)
if options.verbose == True:
logging.basicConfig(level=logging.INFO)
total_procs = options.procs
total_nodes = options.nodes
cores = options.cores
procs_per_node = math.ceil(total_procs / total_nodes)
cores_per_node = procs_per_node * cores
mem_per_node = options.mem
mpi_program = args[1:]
name_node = options.name_node
if name_node == None:
name_node = os.environ.get("HDFS_NAME_NODE")
if name_node == None:
print >> sys.stderr, "HDFS name node not found."
exit(2)
logging.info("Connecting to Mesos master %s" % args[0])
logging.info("Total processes %d" % total_procs)
logging.info("Total nodes %d" % total_nodes)
logging.info("Procs per node %d" % procs_per_node)
logging.info("Cores per node %d" % cores_per_node)
scheduler = HydraScheduler(options)
framework = mesos_pb2.FrameworkInfo()
framework.user = ""
if options.name is not None:
framework.name = options.name
else:
framework.name = "MPICH2 Hydra : %s" % mpi_program[0]
work_dir = tempfile.mkdtemp()
driver = mesos.MesosSchedulerDriver(
scheduler,
framework,
args[0])
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
apache-2.0
| 2,096,012,464,750,074,400
| 30.031008
| 149
| 0.624032
| false
| 3.517575
| false
| false
| false
|
walshjon/openmc
|
openmc/material.py
|
1
|
35739
|
from collections import OrderedDict
from copy import deepcopy
from numbers import Real, Integral
import warnings
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.data
import openmc.checkvalue as cv
from openmc.clean_xml import clean_xml_indentation
from .mixin import IDManagerMixin
# Units for density supported by OpenMC
DENSITY_UNITS = ['g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum',
'macro']
class Material(IDManagerMixin):
"""A material composed of a collection of nuclides/elements.
To create a material, one should create an instance of this class, add
nuclides or elements with :meth:`Material.add_nuclide` or
`Material.add_element`, respectively, and set the total material density
with `Material.set_density()`. The material can then be assigned to a cell
using the :attr:`Cell.fill` attribute.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
temperature : float, optional
Temperature of the material in Kelvin. If not specified, the material
inherits the default temperature applied to the model.
Attributes
----------
id : int
Unique identifier for the material
temperature : float
Temperature of the material in Kelvin.
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/m3',
'atom/b-cm', 'atom/cm3', 'sum', or 'macro'. The 'macro' unit only
applies in the case of a multi-group calculation.
depletable : bool
Indicate whether the material is depletable.
nuclides : list of tuple
List in which each item is a 3-tuple consisting of a nuclide string, the
percent density, and the percent type ('ao' or 'wo').
isotropic : list of str
Nuclides for which elastic scattering should be treated as though it
were isotropic in the laboratory system.
average_molar_mass : float
The average molar mass of nuclides in the material in units of grams per
mol. For example, UO2 with 3 nuclides will have an average molar mass
of 270 / 3 = 90 g / mol.
volume : float
Volume of the material in cm^3. This can either be set manually or
calculated in a stochastic volume calculation and added via the
:meth:`Material.add_volume_information` method.
paths : list of str
The paths traversed through the CSG tree to reach each material
instance. This property is initialized by calling the
:meth:`Geometry.determine_paths` method.
num_instances : int
The number of instances of this material throughout the geometry.
fissionable_mass : float
Mass of fissionable nuclides in the material in [g]. Requires that the
:attr:`volume` attribute is set.
"""
next_id = 1
used_ids = set()
def __init__(self, material_id=None, name='', temperature=None):
# Initialize class attributes
self.id = material_id
self.name = name
self.temperature = temperature
self._density = None
self._density_units = 'sum'
self._depletable = False
self._paths = None
self._num_instances = None
self._volume = None
self._atoms = {}
self._isotropic = []
# A list of tuples (nuclide, percent, percent type)
self._nuclides = []
# The single instance of Macroscopic data present in this material
# (only one is allowed, hence this is different than _nuclides, etc)
self._macroscopic = None
# If specified, a list of table names
self._sab = []
# If true, the material will be initialized as distributed
self._convert_to_distrib_comps = False
# If specified, this file will be used instead of composition values
self._distrib_otf_file = None
def __repr__(self):
string = 'Material\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tTemperature', self._temperature)
string += '{: <16}=\t{}'.format('\tDensity', self._density)
string += ' [{}]\n'.format(self._density_units)
string += '{: <16}\n'.format('\tS(a,b) Tables')
for sab in self._sab:
string += '{: <16}=\t{}\n'.format('\tS(a,b)', sab)
string += '{: <16}\n'.format('\tNuclides')
for nuclide, percent, percent_type in self._nuclides:
string += '{: <16}'.format('\t{}'.format(nuclide))
string += '=\t{: <12} [{}]\n'.format(percent, percent_type)
if self._macroscopic is not None:
string += '{: <16}\n'.format('\tMacroscopic Data')
string += '{: <16}'.format('\t{}'.format(self._macroscopic))
return string
@property
def name(self):
return self._name
@property
def temperature(self):
return self._temperature
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def depletable(self):
return self._depletable
@property
def paths(self):
if self._paths is None:
raise ValueError('Material instance paths have not been determined. '
'Call the Geometry.determine_paths() method.')
return self._paths
@property
def num_instances(self):
if self._num_instances is None:
raise ValueError(
'Number of material instances have not been determined. Call '
'the Geometry.determine_paths() method.')
return self._num_instances
@property
def nuclides(self):
return self._nuclides
@property
def isotropic(self):
return self._isotropic
@property
def convert_to_distrib_comps(self):
return self._convert_to_distrib_comps
@property
def distrib_otf_file(self):
return self._distrib_otf_file
@property
def average_molar_mass(self):
# Get a list of all the nuclides, with elements expanded
nuclide_densities = self.get_nuclide_densities()
# Using the sum of specified atomic or weight amounts as a basis, sum
# the mass and moles of the material
mass = 0.
moles = 0.
for nuc, vals in nuclide_densities.items():
if vals[2] == 'ao':
mass += vals[1] * openmc.data.atomic_mass(nuc)
moles += vals[1]
else:
moles += vals[1] / openmc.data.atomic_mass(nuc)
mass += vals[1]
# Compute and return the molar mass
return mass / moles
@property
def volume(self):
return self._volume
@name.setter
def name(self, name):
if name is not None:
cv.check_type('name for Material ID="{}"'.format(self._id),
name, str)
self._name = name
else:
self._name = ''
@temperature.setter
def temperature(self, temperature):
cv.check_type('Temperature for Material ID="{}"'.format(self._id),
temperature, (Real, type(None)))
self._temperature = temperature
@depletable.setter
def depletable(self, depletable):
cv.check_type('Depletable flag for Material ID="{}"'.format(self.id),
depletable, bool)
self._depletable = depletable
@volume.setter
def volume(self, volume):
if volume is not None:
cv.check_type('material volume', volume, Real)
self._volume = volume
@isotropic.setter
def isotropic(self, isotropic):
cv.check_iterable_type('Isotropic scattering nuclides', isotropic,
str)
self._isotropic = list(isotropic)
@property
def fissionable_mass(self):
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
Z = openmc.data.zam(nuc)[0]
if Z >= 90:
density += 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
return density*self.volume
@classmethod
def from_hdf5(cls, group):
"""Create material from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
Returns
-------
openmc.Material
Material instance
"""
mat_id = int(group.name.split('/')[-1].lstrip('material '))
name = group['name'].value.decode() if 'name' in group else ''
density = group['atom_density'].value
nuc_densities = group['nuclide_densities'][...]
nuclides = group['nuclides'].value
# Create the Material
material = cls(mat_id, name)
material.depletable = bool(group.attrs['depletable'])
# Read the names of the S(a,b) tables for this Material and add them
if 'sab_names' in group:
sab_tables = group['sab_names'].value
for sab_table in sab_tables:
name = sab_table.decode()
material.add_s_alpha_beta(name)
# Set the Material's density to atom/b-cm as used by OpenMC
material.set_density(density=density, units='atom/b-cm')
# Add all nuclides to the Material
for fullname, density in zip(nuclides, nuc_densities):
name = fullname.decode().strip()
material.add_nuclide(name, percent=density, percent_type='ao')
return material
def add_volume_information(self, volume_calc):
"""Add volume information to a material.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'material':
if self.id in volume_calc.volumes:
self._volume = volume_calc.volumes[self.id].n
self._atoms = volume_calc.atoms[self.id]
else:
raise ValueError('No volume information found for this material.')
else:
raise ValueError('No volume information found for this material.')
def set_density(self, units, density=None):
"""Set the density of the material
Parameters
----------
units : {'g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum', 'macro'}
Physical units of density.
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
cv.check_value('density units', units, DENSITY_UNITS)
self._density_units = units
if units == 'sum':
if density is not None:
msg = 'Density "{}" for Material ID="{}" is ignored ' \
'because the unit is "sum"'.format(density, self.id)
warnings.warn(msg)
else:
if density is None:
msg = 'Unable to set the density for Material ID="{}" ' \
'because a density value must be given when not using ' \
'"sum" unit'.format(self.id)
raise ValueError(msg)
cv.check_type('the density for Material ID="{}"'.format(self.id),
density, Real)
self._density = density
@distrib_otf_file.setter
def distrib_otf_file(self, filename):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
if not isinstance(filename, str) and filename is not None:
msg = 'Unable to add OTF material file to Material ID="{}" with a ' \
'non-string name "{}"'.format(self._id, filename)
raise ValueError(msg)
self._distrib_otf_file = filename
@convert_to_distrib_comps.setter
def convert_to_distrib_comps(self):
# TODO: remove this when distributed materials are merged
warnings.warn('This feature is not yet implemented in a release '
'version of openmc')
self._convert_to_distrib_comps = True
def add_nuclide(self, nuclide, percent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str
Nuclide to add, e.g., 'Mo95'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
"""
cv.check_type('nuclide', nuclide, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
if self._macroscopic is not None:
msg = 'Unable to add a Nuclide to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
# If nuclide name doesn't look valid, give a warning
try:
Z, _, _ = openmc.data.zam(nuclide)
except ValueError as e:
warnings.warn(str(e))
else:
# For actinides, have the material be depletable by default
if Z >= 89:
self.depletable = True
self._nuclides.append((nuclide, percent, percent_type))
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : str
Nuclide to remove
"""
cv.check_type('nuclide', nuclide, str)
# If the Material contains the Nuclide, delete it
for nuc in self._nuclides:
if nuclide == nuc[0]:
self._nuclides.remove(nuc)
break
def add_macroscopic(self, macroscopic):
"""Add a macroscopic to the material. This will also set the
density of the material to 1.0, unless it has been otherwise set,
as a default for Macroscopic cross sections.
Parameters
----------
macroscopic : str
Macroscopic to add
"""
# Ensure no nuclides, elements, or sab are added since these would be
# incompatible with macroscopics
if self._nuclides or self._sab:
msg = 'Unable to add a Macroscopic data set to Material ID="{}" ' \
'with a macroscopic value "{}" as an incompatible data ' \
'member (i.e., nuclide or S(a,b) table) ' \
'has already been added'.format(self._id, macroscopic)
raise ValueError(msg)
if not isinstance(macroscopic, str):
msg = 'Unable to add a Macroscopic to Material ID="{}" with a ' \
'non-string value "{}"'.format(self._id, macroscopic)
raise ValueError(msg)
if self._macroscopic is None:
self._macroscopic = macroscopic
else:
msg = 'Unable to add a Macroscopic to Material ID="{}". ' \
'Only one Macroscopic allowed per ' \
'Material.'.format(self._id)
raise ValueError(msg)
# Generally speaking, the density for a macroscopic object will
# be 1.0. Therefore, lets set density to 1.0 so that the user
# doesnt need to set it unless its needed.
# Of course, if the user has already set a value of density,
# then we will not override it.
if self._density is None:
self.set_density('macro', 1.0)
def remove_macroscopic(self, macroscopic):
"""Remove a macroscopic from the material
Parameters
----------
macroscopic : str
Macroscopic to remove
"""
if not isinstance(macroscopic, str):
msg = 'Unable to remove a Macroscopic "{}" in Material ID="{}" ' \
'since it is not a string'.format(self._id, macroscopic)
raise ValueError(msg)
# If the Material contains the Macroscopic, delete it
if macroscopic == self._macroscopic:
self._macroscopic = None
def add_element(self, element, percent, percent_type='ao', enrichment=None):
"""Add a natural element to the material
Parameters
----------
element : str
Element to add, e.g., 'Zr'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
"""
cv.check_type('nuclide', element, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
if self._macroscopic is not None:
msg = 'Unable to add an Element to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if enrichment is not None:
if not isinstance(enrichment, Real):
msg = 'Unable to add an Element to Material ID="{}" with a ' \
'non-floating point enrichment value "{}"'\
.format(self._id, enrichment)
raise ValueError(msg)
elif element != 'U':
msg = 'Unable to use enrichment for element {} which is not ' \
'uranium for Material ID="{}"'.format(element, self._id)
raise ValueError(msg)
# Check that the enrichment is in the valid range
cv.check_less_than('enrichment', enrichment, 100./1.008)
cv.check_greater_than('enrichment', enrichment, 0., equality=True)
if enrichment > 5.0:
msg = 'A uranium enrichment of {} was given for Material ID='\
'"{}". OpenMC assumes the U234/U235 mass ratio is '\
'constant at 0.008, which is only valid at low ' \
'enrichments. Consider setting the isotopic ' \
'composition manually for enrichments over 5%.'.\
format(enrichment, self._id)
warnings.warn(msg)
# Make sure element name is just that
if not element.isalpha():
raise ValueError("Element name should be given by the "
"element's symbol, e.g., 'Zr'")
# Add naturally-occuring isotopes
element = openmc.Element(element)
for nuclide in element.expand(percent, percent_type, enrichment):
self.add_nuclide(*nuclide)
def add_s_alpha_beta(self, name, fraction=1.0):
r"""Add an :math:`S(\alpha,\beta)` table to the material
Parameters
----------
name : str
Name of the :math:`S(\alpha,\beta)` table
fraction : float
The fraction of relevant nuclei that are affected by the
:math:`S(\alpha,\beta)` table. For example, if the material is a
block of carbon that is 60% graphite and 40% amorphous then add a
graphite :math:`S(\alpha,\beta)` table with fraction=0.6.
"""
if self._macroscopic is not None:
msg = 'Unable to add an S(a,b) table to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if not isinstance(name, str):
msg = 'Unable to add an S(a,b) table to Material ID="{}" with a ' \
'non-string table name "{}"'.format(self._id, name)
raise ValueError(msg)
cv.check_type('S(a,b) fraction', fraction, Real)
cv.check_greater_than('S(a,b) fraction', fraction, 0.0, True)
cv.check_less_than('S(a,b) fraction', fraction, 1.0, True)
new_name = openmc.data.get_thermal_name(name)
if new_name != name:
msg = 'OpenMC S(a,b) tables follow the GND naming convention. ' \
'Table "{}" is being renamed as "{}".'.format(name, new_name)
warnings.warn(msg)
self._sab.append((new_name, fraction))
def make_isotropic_in_lab(self):
self.isotropic = [x[0] for x in self._nuclides]
def get_nuclides(self):
"""Returns all nuclides in the material
Returns
-------
nuclides : list of str
List of nuclide names
"""
return [x[0] for x in self._nuclides]
def get_nuclide_densities(self):
"""Returns all nuclides in the material and their densities
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are 3-tuples of
(nuclide, density percent, density percent type)
"""
nuclides = OrderedDict()
for nuclide, density, density_type in self._nuclides:
nuclides[nuclide] = (nuclide, density, density_type)
return nuclides
def get_nuclide_atom_densities(self):
"""Returns all nuclides in the material and their atomic densities in
units of atom/b-cm
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are tuples of
(nuclide, density in atom/b-cm)
"""
# Expand elements in to nuclides
nuclides = self.get_nuclide_densities()
sum_density = False
if self.density_units == 'sum':
sum_density = True
density = 0.
elif self.density_units == 'macro':
density = self.density
elif self.density_units == 'g/cc' or self.density_units == 'g/cm3':
density = -self.density
elif self.density_units == 'kg/m3':
density = -0.001 * self.density
elif self.density_units == 'atom/b-cm':
density = self.density
elif self.density_units == 'atom/cm3' or self.density_units == 'atom/cc':
density = 1.E-24 * self.density
# For ease of processing split out nuc, nuc_density,
# and nuc_density_type in to separate arrays
nucs = []
nuc_densities = []
nuc_density_types = []
for nuclide in nuclides.items():
nuc, nuc_density, nuc_density_type = nuclide[1]
nucs.append(nuc)
nuc_densities.append(nuc_density)
nuc_density_types.append(nuc_density_type)
nucs = np.array(nucs)
nuc_densities = np.array(nuc_densities)
nuc_density_types = np.array(nuc_density_types)
if sum_density:
density = np.sum(nuc_densities)
percent_in_atom = np.all(nuc_density_types == 'ao')
density_in_atom = density > 0.
sum_percent = 0.
# Convert the weight amounts to atomic amounts
if not percent_in_atom:
for n, nuc in enumerate(nucs):
nuc_densities[n] *= self.average_molar_mass / \
openmc.data.atomic_mass(nuc)
# Now that we have the atomic amounts, lets finish calculating densities
sum_percent = np.sum(nuc_densities)
nuc_densities = nuc_densities / sum_percent
# Convert the mass density to an atom density
if not density_in_atom:
density = -density / self.average_molar_mass * 1.E-24 \
* openmc.data.AVOGADRO
nuc_densities = density * nuc_densities
nuclides = OrderedDict()
for n, nuc in enumerate(nucs):
nuclides[nuc] = (nuc, nuc_densities[n])
return nuclides
def get_mass_density(self, nuclide=None):
"""Return mass density of one or all nuclides
Parameters
----------
nuclides : str, optional
Nuclide for which density is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Density of the nuclide/material in [g/cm^3]
"""
mass_density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
density_i = 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
if nuclide is None or nuclide == nuc:
mass_density += density_i
return mass_density
def get_mass(self, nuclide=None):
"""Return mass of one or all nuclides.
Note that this method requires that the :attr:`Material.volume` has
already been set.
Parameters
----------
nuclides : str, optional
Nuclide for which mass is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Mass of the nuclide/material in [g]
"""
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
return self.volume*self.get_mass_density(nuclide)
def clone(self, memo=None):
"""Create a copy of this material with a new unique ID.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Material
The clone of this material
"""
if memo is None:
memo = {}
# If no nemoize'd clone exists, instantiate one
if self not in memo:
# Temporarily remove paths -- this is done so that when the clone is
# made, it doesn't create a copy of the paths (which are specific to
# an instance)
paths = self._paths
self._paths = None
clone = deepcopy(self)
clone.id = None
clone._num_instances = None
# Restore paths on original instance
self._paths = paths
# Memoize the clone
memo[self] = clone
return memo[self]
def _get_nuclide_xml(self, nuclide, distrib=False):
xml_element = ET.Element("nuclide")
xml_element.set("name", nuclide[0])
if not distrib:
if nuclide[2] == 'ao':
xml_element.set("ao", str(nuclide[1]))
else:
xml_element.set("wo", str(nuclide[1]))
return xml_element
def _get_macroscopic_xml(self, macroscopic):
xml_element = ET.Element("macroscopic")
xml_element.set("name", macroscopic)
return xml_element
def _get_nuclides_xml(self, nuclides, distrib=False):
xml_elements = []
for nuclide in nuclides:
xml_elements.append(self._get_nuclide_xml(nuclide, distrib))
return xml_elements
def to_xml_element(self, cross_sections=None):
"""Return XML representation of the material
Parameters
----------
cross_sections : str
Path to an XML cross sections listing file
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing material data
"""
# Create Material XML element
element = ET.Element("material")
element.set("id", str(self._id))
if len(self._name) > 0:
element.set("name", str(self._name))
if self._depletable:
element.set("depletable", "true")
# Create temperature XML subelement
if self.temperature is not None:
subelement = ET.SubElement(element, "temperature")
subelement.text = str(self.temperature)
# Create density XML subelement
if self._density is not None or self._density_units == 'sum':
subelement = ET.SubElement(element, "density")
if self._density_units != 'sum':
subelement.set("value", str(self._density))
subelement.set("units", self._density_units)
else:
raise ValueError('Density has not been set for material {}!'
.format(self.id))
if not self._convert_to_distrib_comps:
if self._macroscopic is None:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides)
for subelement in subelements:
element.append(subelement)
else:
# Create macroscopic XML subelements
subelement = self._get_macroscopic_xml(self._macroscopic)
element.append(subelement)
else:
subelement = ET.SubElement(element, "compositions")
comps = []
allnucs = self._nuclides
dist_per_type = allnucs[0][2]
for nuc in allnucs:
if nuc[2] != dist_per_type:
msg = 'All nuclides and elements in a distributed ' \
'material must have the same type, either ao or wo'
raise ValueError(msg)
comps.append(nuc[1])
if self._distrib_otf_file is None:
# Create values and units subelements
subsubelement = ET.SubElement(subelement, "values")
subsubelement.text = ' '.join([str(c) for c in comps])
subsubelement = ET.SubElement(subelement, "units")
subsubelement.text = dist_per_type
else:
# Specify the materials file
subsubelement = ET.SubElement(subelement, "otf_file_path")
subsubelement.text = self._distrib_otf_file
if self._macroscopic is None:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides,
distrib=True)
for subelement_nuc in subelements:
subelement.append(subelement_nuc)
else:
# Create macroscopic XML subelements
subsubelement = self._get_macroscopic_xml(self._macroscopic)
subelement.append(subsubelement)
if self._sab:
for sab in self._sab:
subelement = ET.SubElement(element, "sab")
subelement.set("name", sab[0])
if sab[1] != 1.0:
subelement.set("fraction", str(sab[1]))
if self._isotropic:
subelement = ET.SubElement(element, "isotropic")
subelement.text = ' '.join(self._isotropic)
return element
class Materials(cv.CheckedList):
"""Collection of Materials used for an OpenMC simulation.
This class corresponds directly to the materials.xml input file. It can be
thought of as a normal Python list where each member is a
:class:`Material`. It behaves like a list as the following example
demonstrates:
>>> fuel = openmc.Material()
>>> clad = openmc.Material()
>>> water = openmc.Material()
>>> m = openmc.Materials([fuel])
>>> m.append(water)
>>> m += [clad]
Parameters
----------
materials : Iterable of openmc.Material
Materials to add to the collection
cross_sections : str
Indicates the path to an XML cross section listing file (usually named
cross_sections.xml). If it is not set, the
:envvar:`OPENMC_CROSS_SECTIONS` environment variable will be used for
continuous-energy calculations and
:envvar:`OPENMC_MG_CROSS_SECTIONS` will be used for multi-group
calculations to find the path to the HDF5 cross section file.
multipole_library : str
Indicates the path to a directory containing a windowed multipole
cross section library. If it is not set, the
:envvar:`OPENMC_MULTIPOLE_LIBRARY` environment variable will be used. A
multipole library is optional.
"""
def __init__(self, materials=None):
super().__init__(Material, 'materials collection')
self._cross_sections = None
self._multipole_library = None
if materials is not None:
self += materials
@property
def cross_sections(self):
return self._cross_sections
@property
def multipole_library(self):
return self._multipole_library
@cross_sections.setter
def cross_sections(self, cross_sections):
cv.check_type('cross sections', cross_sections, str)
self._cross_sections = cross_sections
@multipole_library.setter
def multipole_library(self, multipole_library):
cv.check_type('cross sections', multipole_library, str)
self._multipole_library = multipole_library
def append(self, material):
"""Append material to collection
Parameters
----------
material : openmc.Material
Material to append
"""
super().append(material)
def insert(self, index, material):
"""Insert material before index
Parameters
----------
index : int
Index in list
material : openmc.Material
Material to insert
"""
super().insert(index, material)
def make_isotropic_in_lab(self):
for material in self:
material.make_isotropic_in_lab()
def _create_material_subelements(self, root_element):
for material in sorted(self, key=lambda x: x.id):
root_element.append(material.to_xml_element(self.cross_sections))
def _create_cross_sections_subelement(self, root_element):
if self._cross_sections is not None:
element = ET.SubElement(root_element, "cross_sections")
element.text = str(self._cross_sections)
def _create_multipole_library_subelement(self, root_element):
if self._multipole_library is not None:
element = ET.SubElement(root_element, "multipole_library")
element.text = str(self._multipole_library)
def export_to_xml(self, path='materials.xml'):
"""Export material collection to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'materials.xml'.
"""
root_element = ET.Element("materials")
self._create_cross_sections_subelement(root_element)
self._create_multipole_library_subelement(root_element)
self._create_material_subelements(root_element)
# Clean the indentation in the file to be user-readable
clean_xml_indentation(root_element)
# Write the XML Tree to the materials.xml file
tree = ET.ElementTree(root_element)
tree.write(path, xml_declaration=True, encoding='utf-8')
|
mit
| -6,152,514,071,550,136,000
| 33.867317
| 83
| 0.574303
| false
| 4.134066
| false
| false
| false
|
Inspq/ansible
|
lib/ansible/plugins/connection/netconf.py
|
1
|
4517
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import socket
import json
import signal
import logging
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.connection import ConnectionBase, ensure_connect
from ansible.module_utils.six.moves import StringIO
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml
except ImportError:
raise AnsibleError("ncclient is not installed")
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
logging.getLogger('ncclient').setLevel(logging.INFO)
class Connection(ConnectionBase):
''' NetConf connections '''
transport = 'netconf'
has_pipelining = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._network_os = self._play_context.network_os or 'default'
display.display('network_os is set to %s' % self._network_os, log_only=True)
self._manager = None
self._connected = False
def log(self, msg):
msg = 'h=%s u=%s %s' % (self._play_context.remote_addr, self._play_context.remote_user, msg)
logger.debug(msg)
def _connect(self):
super(Connection, self)._connect()
display.display('ssh connection done, stating ncclient', log_only=True)
allow_agent = True
if self._play_context.password is not None:
allow_agent = False
key_filename = None
if self._play_context.private_key_file:
key_filename = os.path.expanduser(self._play_context.private_key_file)
if not self._network_os:
raise AnsibleConnectionError('network_os must be set for netconf connections')
try:
self._manager = manager.connect(
host=self._play_context.remote_addr,
port=self._play_context.port or 830,
username=self._play_context.remote_user,
password=self._play_context.password,
key_filename=str(key_filename),
hostkey_verify=C.HOST_KEY_CHECKING,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
allow_agent=allow_agent,
timeout=self._play_context.timeout,
device_params={'name': self._network_os}
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(str(exc))
if not self._manager.connected:
return (1, '', 'not connected')
display.display('ncclient manager object created successfully', log_only=True)
self._connected = True
return (0, self._manager.session_id, '')
def close(self):
if self._manager:
self._manager.close_session()
self._connected = False
super(Connection, self).close()
@ensure_connect
def exec_command(self, request):
"""Sends the request to the node and returns the reply
"""
if request == 'open_session()':
return (0, 'ok', '')
req = to_ele(request)
if req is None:
return (1, '', 'unable to parse request')
try:
reply = self._manager.rpc(req)
except RPCError as exc:
return (1, '', to_xml(exc.xml))
return (0, reply.data_xml, '')
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
|
gpl-3.0
| -3,208,970,745,957,846,000
| 31.496403
| 100
| 0.640469
| false
| 4.080397
| false
| false
| false
|
nathanaevitas/odoo
|
openerp/release.py
|
1
|
2634
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
RELEASE_LEVELS = [ALPHA, BETA, RELEASE_CANDIDATE, FINAL] = ['alpha', 'beta', 'candidate', 'final']
RELEASE_LEVELS_DISPLAY = {ALPHA: ALPHA,
BETA: BETA,
RELEASE_CANDIDATE: 'rc',
FINAL: ''}
# version_info format: (MAJOR, MINOR, MICRO, RELEASE_LEVEL, SERIAL)
# inspired by Python's own sys.version_info, in order to be
# properly comparable using normal operarors, for example:
# (6,1,0,'beta',0) < (6,1,0,'candidate',1) < (6,1,0,'candidate',2)
# (6,1,0,'candidate',2) < (6,1,0,'final',0) < (6,1,2,'final',0)
version_info = (8, 0, 0, FINAL, 0)
version = '.'.join(map(str, version_info[:2])) + RELEASE_LEVELS_DISPLAY[version_info[3]] + str(version_info[4] or '')
series = serie = major_version = '.'.join(map(str, version_info[:2]))
product_name = 'Odoo'
description = 'Odoo Server'
long_desc = '''Odoo is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and XML-RPC interfaces.
'''
classifiers = """Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU Affero General Public License v3
Programming Language :: Python
"""
url = 'https://www.odoo.com'
author = 'OpenERP S.A.'
author_email = 'info@odoo.com'
license = 'AGPL-3'
nt_service_name = "odoo-server-" + series
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
version += '-20151104'
# hash = 'a69205d'
|
agpl-3.0
| -2,245,725,345,032,258,300
| 44.413793
| 117
| 0.643888
| false
| 3.545087
| false
| false
| false
|
kovidgoyal/kitty
|
kittens/tui/operations.py
|
1
|
11292
|
#!/usr/bin/env python3
# vim:fileencoding=utf-8
# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from contextlib import contextmanager
from functools import wraps
from typing import (
IO, Any, Callable, Dict, Generator, Optional, Tuple, TypeVar, Union
)
from kitty.rgb import Color, color_as_sharp, to_color
from kitty.typing import GraphicsCommandType, HandlerType, ScreenSize
from .operations_stub import CMD
GraphicsCommandType, ScreenSize # needed for stub generation
S7C1T = '\033 F'
SAVE_CURSOR = '\0337'
RESTORE_CURSOR = '\0338'
SAVE_PRIVATE_MODE_VALUES = '\033[?s'
RESTORE_PRIVATE_MODE_VALUES = '\033[?r'
SAVE_COLORS = '\033[#P'
RESTORE_COLORS = '\033[#Q'
MODES = dict(
LNM=(20, ''),
IRM=(4, ''),
DECKM=(1, '?'),
DECSCNM=(5, '?'),
DECOM=(6, '?'),
DECAWM=(7, '?'),
DECARM=(8, '?'),
DECTCEM=(25, '?'),
MOUSE_BUTTON_TRACKING=(1000, '?'),
MOUSE_MOTION_TRACKING=(1002, '?'),
MOUSE_MOVE_TRACKING=(1003, '?'),
FOCUS_TRACKING=(1004, '?'),
MOUSE_UTF8_MODE=(1005, '?'),
MOUSE_SGR_MODE=(1006, '?'),
MOUSE_URXVT_MODE=(1015, '?'),
ALTERNATE_SCREEN=(1049, '?'),
BRACKETED_PASTE=(2004, '?'),
)
F = TypeVar('F')
all_cmds: Dict[str, Callable] = {}
def cmd(f: F) -> F:
all_cmds[f.__name__] = f # type: ignore
return f
@cmd
def set_mode(which: str, private: bool = True) -> str:
num, private_ = MODES[which]
return '\033[{}{}h'.format(private_, num)
@cmd
def reset_mode(which: str) -> str:
num, private = MODES[which]
return '\033[{}{}l'.format(private, num)
@cmd
def clear_screen() -> str:
return '\033[H\033[2J'
@cmd
def clear_to_end_of_screen() -> str:
return '\033[J'
@cmd
def clear_to_eol() -> str:
return '\033[K'
@cmd
def reset_terminal() -> str:
return '\033]\033\\\033c'
@cmd
def bell() -> str:
return '\a'
@cmd
def beep() -> str:
return '\a'
@cmd
def set_window_title(value: str) -> str:
return '\033]2;' + value.replace('\033', '').replace('\x9c', '') + '\033\\'
@cmd
def set_line_wrapping(yes_or_no: bool) -> str:
return set_mode('DECAWM') if yes_or_no else reset_mode('DECAWM')
@cmd
def set_cursor_visible(yes_or_no: bool) -> str:
return set_mode('DECTCEM') if yes_or_no else reset_mode('DECTCEM')
@cmd
def set_cursor_position(x: int, y: int) -> str: # (0, 0) is top left
return '\033[{};{}H'.format(y + 1, x + 1)
@cmd
def move_cursor_by(amt: int, direction: str) -> str:
suffix = {'up': 'A', 'down': 'B', 'right': 'C', 'left': 'D'}[direction]
return f'\033[{amt}{suffix}'
@cmd
def set_cursor_shape(shape: str = 'block', blink: bool = True) -> str:
val = {'block': 1, 'underline': 3, 'bar': 5}.get(shape, 1)
if not blink:
val += 1
return '\033[{} q'.format(val)
@cmd
def set_scrolling_region(screen_size: Optional['ScreenSize'] = None, top: Optional[int] = None, bottom: Optional[int] = None) -> str:
if screen_size is None:
return '\033[r'
if top is None:
top = 0
if bottom is None:
bottom = screen_size.rows - 1
if bottom < 0:
bottom = screen_size.rows - 1 + bottom
else:
bottom += 1
return '\033[{};{}r'.format(top + 1, bottom + 1)
@cmd
def scroll_screen(amt: int = 1) -> str:
return '\033[' + str(abs(amt)) + ('T' if amt < 0 else 'S')
STANDARD_COLORS = {name: i for i, name in enumerate(
'black red green yellow blue magenta cyan gray'.split())}
STANDARD_COLORS['white'] = STANDARD_COLORS['gray']
UNDERLINE_STYLES = {name: i + 1 for i, name in enumerate(
'straight double curly'.split())}
ColorSpec = Union[int, str, Tuple[int, int, int]]
def color_code(color: ColorSpec, intense: bool = False, base: int = 30) -> str:
if isinstance(color, str):
e = str((base + 60 if intense else base) + STANDARD_COLORS[color])
elif isinstance(color, int):
e = '{}:5:{}'.format(base + 8, max(0, min(color, 255)))
else:
e = '{}:2:{}:{}:{}'.format(base + 8, *color)
return e
@cmd
def sgr(*parts: str) -> str:
return '\033[{}m'.format(';'.join(parts))
@cmd
def colored(
text: str,
color: ColorSpec,
intense: bool = False,
reset_to: Optional[ColorSpec] = None,
reset_to_intense: bool = False
) -> str:
e = color_code(color, intense)
return '\033[{}m{}\033[{}m'.format(e, text, 39 if reset_to is None else color_code(reset_to, reset_to_intense))
@cmd
def faint(text: str) -> str:
return colored(text, 'black', True)
@cmd
def styled(
text: str,
fg: Optional[ColorSpec] = None,
bg: Optional[ColorSpec] = None,
fg_intense: bool = False,
bg_intense: bool = False,
italic: Optional[bool] = None,
bold: Optional[bool] = None,
underline: Optional[str] = None,
underline_color: Optional[ColorSpec] = None,
reverse: Optional[bool] = None
) -> str:
start, end = [], []
if fg is not None:
start.append(color_code(fg, fg_intense))
end.append('39')
if bg is not None:
start.append(color_code(bg, bg_intense, 40))
end.append('49')
if underline_color is not None:
if isinstance(underline_color, str):
underline_color = STANDARD_COLORS[underline_color]
start.append(color_code(underline_color, base=50))
end.append('59')
if underline is not None:
start.append('4:{}'.format(UNDERLINE_STYLES[underline]))
end.append('4:0')
if italic is not None:
s, e = (start, end) if italic else (end, start)
s.append('3')
e.append('23')
if bold is not None:
s, e = (start, end) if bold else (end, start)
s.append('1')
e.append('22')
if reverse is not None:
s, e = (start, end) if reverse else (end, start)
s.append('7')
e.append('27')
if not start:
return text
return '\033[{}m{}\033[{}m'.format(';'.join(start), text, ';'.join(end))
def serialize_gr_command(cmd: Dict[str, Union[int, str]], payload: Optional[bytes] = None) -> bytes:
from .images import GraphicsCommand
gc = GraphicsCommand()
for k, v in cmd.items():
setattr(gc, k, v)
return gc.serialize(payload or b'')
@cmd
def gr_command(cmd: Union[Dict, 'GraphicsCommandType'], payload: Optional[bytes] = None) -> str:
if isinstance(cmd, dict):
raw = serialize_gr_command(cmd, payload)
else:
raw = cmd.serialize(payload or b'')
return raw.decode('ascii')
@cmd
def clear_images_on_screen(delete_data: bool = False) -> str:
from .images import GraphicsCommand
gc = GraphicsCommand()
gc.a = 'd'
gc.d = 'A' if delete_data else 'a'
return gc.serialize().decode('ascii')
def init_state(alternate_screen: bool = True) -> str:
ans = (
S7C1T + SAVE_CURSOR + SAVE_PRIVATE_MODE_VALUES + reset_mode('LNM') +
reset_mode('IRM') + reset_mode('DECKM') + reset_mode('DECSCNM') +
set_mode('DECARM') + set_mode('DECAWM') +
set_mode('DECTCEM') + reset_mode('MOUSE_BUTTON_TRACKING') +
reset_mode('MOUSE_MOTION_TRACKING') + reset_mode('MOUSE_MOVE_TRACKING') +
reset_mode('FOCUS_TRACKING') + reset_mode('MOUSE_UTF8_MODE') +
reset_mode('MOUSE_SGR_MODE') + reset_mode('MOUSE_UTF8_MODE') +
set_mode('BRACKETED_PASTE') + SAVE_COLORS +
'\033[*x' # reset DECSACE to default region select
)
if alternate_screen:
ans += set_mode('ALTERNATE_SCREEN') + reset_mode('DECOM')
ans += clear_screen()
ans += '\033[>31u' # extended keyboard mode
return ans
def reset_state(normal_screen: bool = True) -> str:
ans = ''
ans += '\033[<u' # restore keyboard mode
if normal_screen:
ans += reset_mode('ALTERNATE_SCREEN')
ans += RESTORE_PRIVATE_MODE_VALUES
ans += RESTORE_CURSOR
ans += RESTORE_COLORS
return ans
@contextmanager
def cursor(write: Callable[[str], None]) -> Generator[None, None, None]:
write(SAVE_CURSOR)
yield
write(RESTORE_CURSOR)
@contextmanager
def alternate_screen(f: Optional[IO[str]] = None) -> Generator[None, None, None]:
f = f or sys.stdout
print(set_mode('ALTERNATE_SCREEN'), end='', file=f)
yield
print(reset_mode('ALTERNATE_SCREEN'), end='', file=f)
@contextmanager
def raw_mode(fd: Optional[int] = None) -> Generator[None, None, None]:
import tty
import termios
if fd is None:
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
yield
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
@cmd
def set_default_colors(
fg: Optional[Union[Color, str]] = None,
bg: Optional[Union[Color, str]] = None,
cursor: Optional[Union[Color, str]] = None,
select_bg: Optional[Union[Color, str]] = None,
select_fg: Optional[Union[Color, str]] = None
) -> str:
ans = ''
def item(which: Optional[Union[Color, str]], num: int) -> None:
nonlocal ans
if which is None:
ans += '\x1b]1{}\x1b\\'.format(num)
else:
if isinstance(which, Color):
q = color_as_sharp(which)
else:
x = to_color(which)
assert x is not None
q = color_as_sharp(x)
ans += '\x1b]{};{}\x1b\\'.format(num, q)
item(fg, 10)
item(bg, 11)
item(cursor, 12)
item(select_bg, 17)
item(select_fg, 19)
return ans
@cmd
def write_to_clipboard(data: Union[str, bytes], use_primary: bool = False) -> str:
if isinstance(data, str):
data = data.encode('utf-8')
from base64 import standard_b64encode
fmt = 'p' if use_primary else 'c'
def esc(chunk: str) -> str:
return '\x1b]52;{};{}\x07'.format(fmt, chunk)
ans = esc('!') # clear clipboard buffer
for chunk in (data[i:i+512] for i in range(0, len(data), 512)):
s = standard_b64encode(chunk).decode('ascii')
ans += esc(s)
return ans
@cmd
def request_from_clipboard(use_primary: bool = False) -> str:
return '\x1b]52;{};?\x07'.format('p' if use_primary else 'c')
# Boilerplate to make operations available via Handler.cmd {{{
def writer(handler: HandlerType, func: Callable) -> Callable:
@wraps(func)
def f(*a: Any, **kw: Any) -> None:
handler.write(func(*a, **kw))
return f
def commander(handler: HandlerType) -> CMD:
ans = CMD()
for name, func in all_cmds.items():
setattr(ans, name, writer(handler, func))
return ans
def func_sig(func: Callable) -> Generator[str, None, None]:
import inspect
import re
s = inspect.signature(func)
for val in s.parameters.values():
yield re.sub(r'ForwardRef\([\'"](\w+?)[\'"]\)', r'\1', str(val).replace('NoneType', 'None'))
def as_type_stub() -> str:
ans = [
'from typing import * # noqa',
'from kitty.typing import GraphicsCommandType, ScreenSize',
'from kitty.rgb import Color',
'import kitty.rgb',
]
methods = []
for name, func in all_cmds.items():
args = ', '.join(func_sig(func))
if args:
args = ', ' + args
methods.append(' def {}(self{}) -> str: pass'.format(name, args))
ans += ['', '', 'class CMD:'] + methods
return '\n'.join(ans) + '\n\n\n'
# }}}
|
gpl-3.0
| -8,834,894,098,076,934,000
| 26.144231
| 133
| 0.590418
| false
| 3.067645
| false
| false
| false
|
DYWCn/mxonline
|
MXOnline/apps/origanization/migrations/0001_initial.py
|
1
|
3648
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-24 08:13
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CityDict',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u673a\u6784\u540d\u79f0')),
('desc', models.CharField(max_length=100, verbose_name='\u57ce\u5e02\u63cf\u8ff0')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u52a0\u5165\u65f6\u95f4')),
],
options={
'verbose_name': '\u673a\u6784',
'verbose_name_plural': '\u673a\u6784',
},
),
migrations.CreateModel(
name='CourseOrg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u673a\u6784\u540d\u79f0')),
('desc', models.TextField(verbose_name='\u673a\u6784\u4ecb\u7ecd')),
('click_num', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u91cf')),
('fav_num', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u4eba\u6570')),
('cover_image', models.ImageField(upload_to='org/cover_img/%Y/%m', verbose_name='\u5c01\u9762')),
('address', models.CharField(max_length=100, verbose_name='\u673a\u6784\u5730\u5740')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u52a0\u5165\u65f6\u95f4')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='origanization.CityDict', verbose_name='\u6240\u5728\u57ce\u5e02')),
],
options={
'verbose_name': '\u673a\u6784',
'verbose_name_plural': '\u673a\u6784',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u8bb2\u5e08\u59d3\u540d')),
('work_years', models.IntegerField(default=0, verbose_name='\u5de5\u4f5c\u65f6\u95f4')),
('work_company', models.CharField(max_length=20, verbose_name='\u6240\u5c5e\u673a\u6784')),
('characters', models.CharField(max_length=50, verbose_name='\u6559\u5b66\u7279\u70b9')),
('click_num', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u91cf')),
('fav_num', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u4eba\u6570')),
('cover_image', models.ImageField(upload_to='org/cover_img/%Y/%m', verbose_name='\u5c01\u9762')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u52a0\u5165\u65f6\u95f4')),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='origanization.CourseOrg', verbose_name='\u673a\u6784')),
],
options={
'verbose_name': '\u8bb2\u5e08\u4fe1\u606f',
'verbose_name_plural': '\u8bb2\u5e08\u4fe1\u606f',
},
),
]
|
mit
| -4,469,214,834,793,879,000
| 52.647059
| 159
| 0.587171
| false
| 3.313351
| false
| false
| false
|
priyaganti/rockstor-core
|
src/rockstor/storageadmin/urls/share.py
|
1
|
2128
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf.urls import patterns, url
from storageadmin.views import (ShareListView, ShareDetailView, ShareACLView,
SnapshotView, ShareCommandView,)
from django.conf import settings
share_regex = settings.SHARE_REGEX
snap_regex = share_regex
snap_command = 'clone'
share_command = 'rollback|clone'
urlpatterns = patterns(
'',
url(r'^$', ShareListView.as_view(), name='share-view'),
url(r'^/(?P<sname>%s)$' % share_regex, ShareDetailView.as_view(),
name='share-view'),
url(r'^/(?P<sname>%s)/(?P<command>force)$'
% share_regex, ShareDetailView.as_view(),),
# Individual snapshots don't have detailed representation in the web-ui. So
# thre is no need for SnapshotDetailView.
url(r'^/(?P<sname>%s)/snapshots$' % share_regex,
SnapshotView.as_view(), name='snapshot-view'),
url(r'^/(?P<sname>%s)/snapshots/(?P<snap_name>%s)$' % (share_regex,
snap_regex),
SnapshotView.as_view(), name='snapshot-view'),
url(r'^/(?P<sname>%s)/snapshots/(?P<snap_name>%s)/(?P<command>%s)$' %
(share_regex, snap_regex, snap_command), SnapshotView.as_view()),
url(r'^/(?P<sname>%s)/acl$' % share_regex, ShareACLView.as_view(),
name='acl-view'),
url(r'^/(?P<sname>%s)/(?P<command>%s)$' % (share_regex, share_command),
ShareCommandView.as_view()),
)
|
gpl-3.0
| -8,414,302,478,397,085,000
| 39.150943
| 79
| 0.659774
| false
| 3.552588
| false
| false
| false
|
globocom/database-as-a-service
|
dbaas/maintenance/migrations/0015_auto__add_maintenanceparameters.py
|
1
|
5853
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MaintenanceParameters'
db.create_table(u'maintenance_maintenanceparameters', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')
(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')
(auto_now=True, blank=True)),
('parameter_name', self.gf(
'django.db.models.fields.CharField')(max_length=100)),
('function_name', self.gf(
'django.db.models.fields.CharField')(max_length=100)),
('maintenance', self.gf('django.db.models.fields.related.ForeignKey')(
related_name=u'maintenance_params', to=orm['maintenance.Maintenance'])),
))
db.send_create_signal(u'maintenance', ['MaintenanceParameters'])
def backwards(self, orm):
# Deleting model 'MaintenanceParameters'
db.delete_table(u'maintenance_maintenanceparameters')
models = {
u'maintenance.hostmaintenance': {
'Meta': {'unique_together': "((u'host', u'maintenance'),)", 'object_name': 'HostMaintenance', 'index_together': "[[u'host', u'maintenance']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_maintenance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance'", 'to': u"orm['maintenance.Maintenance']"}),
'rollback_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenance': {
'Meta': {'object_name': 'Maintenance'},
'affected_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostsid': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '10000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_script': ('django.db.models.fields.TextField', [], {}),
'maximum_workers': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'rollback_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenanceparameters': {
'Meta': {'object_name': 'MaintenanceParameters'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance_params'", 'to': u"orm['maintenance.Maintenance']"}),
'parameter_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['maintenance']
|
bsd-3-clause
| -3,582,005,703,774,428,700
| 68.678571
| 191
| 0.572185
| false
| 3.711477
| false
| false
| false
|
PermutaTriangle/Permuta
|
permuta/perm_sets/permset.py
|
1
|
7411
|
import multiprocessing
from itertools import islice
from typing import ClassVar, Dict, Iterable, List, NamedTuple, Optional, Union
from ..patterns import MeshPatt, Perm
from ..permutils import is_finite, is_insertion_encodable, is_polynomial
from .basis import Basis, MeshBasis
class AvBase(NamedTuple):
"""A base class for Av to define instance variables without having to use
__init__ in Av.
"""
basis: Union[Basis, MeshBasis]
cache: List[Dict[Perm, Optional[List[int]]]]
class Av(AvBase):
"""A permutation class defined by its minimal basis."""
_FORBIDDEN_BASIS = Basis(Perm())
_VALUE_ERROR_MSG = "Basis should be non-empty without the empty perm!"
_BASIS_ONLY_MSG = "Only supported for Basis!"
_CLASS_CACHE: ClassVar[Dict[Union[Basis, MeshBasis], "Av"]] = {}
_CACHE_LOCK = multiprocessing.Lock()
def __new__(
cls,
basis: Union[
Basis,
MeshBasis,
Iterable[Perm],
Iterable[Union[Perm, MeshPatt]],
],
) -> "Av":
if not isinstance(basis, (Basis, MeshBasis)):
return Av.from_iterable(basis)
if len(basis) == 0 or basis == Av._FORBIDDEN_BASIS:
raise ValueError(Av._VALUE_ERROR_MSG)
instance = Av._CLASS_CACHE.get(basis)
if instance is None:
new_instance: "Av" = AvBase.__new__(cls, basis, [{Perm(): [0]}])
Av._CLASS_CACHE[basis] = new_instance
return new_instance
return instance
@classmethod
def clear_cache(cls) -> None:
"""Clear the instance cache."""
cls._CLASS_CACHE = {}
@classmethod
def from_string(cls, basis) -> "Av":
"""Create a permutation class from a string. Basis can be either zero or one
based and seperated by anything. MeshBasis is not supported.
"""
return cls(Basis.from_string(basis))
@classmethod
def from_iterable(
cls, basis: Union[Iterable[Perm], Iterable[Union[Perm, MeshPatt]]]
) -> "Av":
"""
Create a permutation class from a basis defined by an iterable of patterns.
"""
if MeshBasis.is_mesh_basis(basis):
return cls(MeshBasis(*basis))
return cls(Basis(*basis))
def is_finite(self) -> bool:
"""Check if the perm class is finite."""
if isinstance(self.basis, MeshBasis):
raise NotImplementedError(Av._BASIS_ONLY_MSG)
return is_finite(self.basis)
def is_polynomial(self) -> bool:
"""Check if the perm class has polynomial growth."""
if isinstance(self.basis, MeshBasis):
raise NotImplementedError(Av._BASIS_ONLY_MSG)
return is_polynomial(self.basis)
def is_insertion_encodable(self) -> bool:
"""Check if the perm class is insertion encodable."""
if isinstance(self.basis, MeshBasis):
raise NotImplementedError(Av._BASIS_ONLY_MSG)
return is_insertion_encodable(self.basis)
def first(self, count: int) -> Iterable[Perm]:
"""Generate the first `count` permutation in this permutation class given
that it has that many, if not all are generated.
"""
yield from islice(self._all(), count)
def of_length(self, length: int) -> Iterable[Perm]:
"""
Generate all perms of a given length that belong to this permutation class.
"""
return iter(self._get_level(length))
def up_to_length(self, length: int) -> Iterable[Perm]:
"""Generate all perms up to and including a given length that
belong to this permutation class.
"""
for n in range(length + 1):
yield from self.of_length(n)
def count(self, length: int) -> int:
"""Return the nubmber of permutations of a given length."""
return len(self._get_level(length))
def enumeration(self, length: int) -> List[int]:
"""Return the enumeration of this permutation class up and including a given
length."""
return [self.count(i) for i in range(length + 1)]
def __contains__(self, other: object):
if isinstance(other, Perm):
return other in self._get_level(len(other))
return False
def is_subclass(self, other: "Av"):
"""Check if a sublcass of another permutation class."""
return all(p1 not in self for p1 in other.basis)
def _ensure_level(self, level_number: int) -> None:
start = max(0, len(self.cache) - 2)
if isinstance(self.basis, Basis):
self._ensure_level_classical_pattern_basis(level_number)
else:
self._ensure_level_mesh_pattern_basis(level_number)
for i in range(start, level_number - 1):
self.cache[i] = {perm: None for perm in self.cache[i]}
def _ensure_level_classical_pattern_basis(self, level_number: int) -> None:
# We build new elements from existing ones
lengths = {len(b) for b in self.basis}
max_size = max(lengths)
for nplusone in range(len(self.cache), level_number + 1):
n = nplusone - 1
new_level: Dict[Perm, Optional[List[int]]] = dict()
last_level = self.cache[-1]
check_length = nplusone in lengths
smaller_elems = {b for b in self.basis if len(b) == nplusone}
def valid_insertions(perm):
# pylint: disable=cell-var-from-loop
res = None
for i in range(max(0, n - max_size), n):
val = perm[i]
subperm = perm.remove(i)
spots = self.cache[n - 1][subperm]
acceptable = [k for k in spots if k <= val]
acceptable.extend(k + 1 for k in spots if k >= val)
if res is None:
res = frozenset(acceptable)
res = res.intersection(acceptable)
if not res:
break
return res if res is not None else range(nplusone)
for perm, lis in last_level.items():
for value in valid_insertions(perm):
new_perm = perm.insert(index=nplusone, new_element=value)
if not check_length or new_perm not in smaller_elems:
new_level[new_perm] = []
assert lis is not None
lis.append(value)
self.cache.append(new_level)
def _ensure_level_mesh_pattern_basis(self, level_number: int) -> None:
self.cache.extend(
{p: None for p in Perm.of_length(i) if p.avoids(*self.basis)}
for i in range(len(self.cache), level_number + 1)
)
def _get_level(self, level_number: int) -> Dict[Perm, Optional[List[int]]]:
with Av._CACHE_LOCK:
self._ensure_level(level_number)
return self.cache[level_number]
def _all(self) -> Iterable[Perm]:
length = 0
while True:
gen = (p for p in self.of_length(length))
first: Optional[Perm] = next(gen, None)
if first is None:
break
yield first
yield from gen
length += 1
def __str__(self) -> str:
return f"Av({','.join(str(p) for p in self.basis)})"
def __repr__(self) -> str:
return f"Av({repr(self.basis)})"
|
bsd-3-clause
| 2,888,055,935,576,423,400
| 36.619289
| 84
| 0.575361
| false
| 3.917019
| false
| false
| false
|
jensck/fluidity
|
fluidity/incubator/next_actions_view.py
|
1
|
4059
|
import gtk
import pango
class NextActionsView(gtk.VBox):
"""Simple class for display of Next Actions"""
def __init__(self):
super(NextActionsView, self).__init__()
self._liststore = gtk.ListStore(bool, str, int, str)
self._treeview = gtk.TreeView()
self._treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self._treeview.set_model(self._liststore)
self._actions = []
# Although this module requires a gtk.ListStore with a fixed format
# (bool, str, int, str), the code for supplying that ListStore
# is in the NextActionsModel module.
# we're assuming that a checkbox in a list of tasks, along with the
# strikethrough text for completed actions, will be enough to let the
# user know what the column is, instead of trying to fit the longer label
done_renderer = gtk.CellRendererToggle()
done_renderer.set_property("activatable", True)
done_renderer.connect("toggled", self.on_done_toggled)
done_column = gtk.TreeViewColumn(None, done_renderer, active=0)
summary_cell = gtk.CellRendererText()
summary_column = gtk.TreeViewColumn(None, summary_cell, text=1)
summary_column.set_cell_data_func(summary_cell, _format_func, data=None)
context_cell = gtk.CellRendererText()
context_column = gtk.TreeViewColumn(None, context_cell, text=3)
context_column.props.sizing = gtk.TREE_VIEW_COLUMN_AUTOSIZE
for col in done_column, context_column, summary_column:
self._treeview.append_column(col)
self._treeview.props.headers_visible = False
self._treeview.props.rules_hint = True
self.pack_start(self._treeview, True, True, 0)
def get_selected_model_objects(self):
"""Return the selected *objects*, ala Kiwi's ObjectList."""
model, selected_rows = self._treeview.get_selection().get_selected_rows()
# have to do this goofy i[0] because .get_selected_rows() returns
# *tuples* (representing "paths" in GTK-speak) with the first member
# as the index of the row
return [self._actions[i[0]] for i in selected_rows]
def set_actions(self, actions):
self.clear()
self._actions.extend(actions)
for action in actions:
self._liststore.append(_convert_na_to_iterable(action))
def clear(self):
self._actions = [] # Gross. Why don't Python lists have a .clear()?
self._liststore.clear()
def on_done_toggled(self, cell_renderer, path, data = None):
action = self._actions[int(path[0])]
action.complete = not action.complete
# self._liststore.row_changed(path, self._liststore.get_iter(path))
# cell_renderer.set_active(action.complete)
self._liststore[path][0] = action.complete
def _convert_na_to_iterable(na):
item = list()
item.append(na.complete)
item.append(na.summary)
item.append(na.priority)
item.append(na.context)
return item
def _format_func(column, cell, model, my_iter):
"""Format gtk.TreeView cell according to priority and completion status
of a gee_tee_dee.NextAction.
"""
# Using this font makes the UltraHeavy, Normal, and UltraLight text
# weights clearly distinguishable from one another.
# cell.set_property("font", "Sans 12")
if (model.get_value(my_iter, 0) == True):
# First check completion status of task (column 0 of the model)
# and set "strikethrough" for display of completed tasks.
cell.set_property("strikethrough", True)
else:
cell.set_property("strikethrough", False)
if model.get_value(my_iter, 2) == 1:
# Now check priority of task and set text weight accordingly.
cell.set_property("weight", pango.WEIGHT_HEAVY)
elif model.get_value(my_iter, 2) == 3:
cell.set_property("weight", pango.WEIGHT_ULTRALIGHT)
else:
cell.set_property("weight", pango.WEIGHT_NORMAL)
|
gpl-3.0
| -4,824,018,486,579,330,000
| 40.845361
| 81
| 0.647204
| false
| 3.765306
| false
| false
| false
|
goldengod/dep
|
plib.py
|
1
|
20556
|
#some import
import numpy
import itertools
import random
import math
import os.path
import urllib2
import sys
#global variables
n = None
N = None
Dk = None
Du = None
Dc = None
e = None
r = None
x = None
xa = None
ASW = None
EXC = None
INS = None
pASW = None
pEXC = None
pINS = None
#help function
def help():
print("""
VARIABLES
---------
n permutation size
N total number of permutations
Dk diameter with Kendall's tau distance
Du diameter with Ulam distance
Dc diameter with Cayley distance
e identity permutation
r identity reversed permutation
x a random permutation
xa antipodal permutation of x with respect to exchanges
ASW set of adjacent swap generators (in tuple normal form)
EXC set of exchange generators (in tuple normal form)
INS set of insertion generators (in tuple normal form)
pASW ASW in permutation form
pEXC EXC in permutation form
pINS INS in permutation form
FUNCTIONS
---------
reset(n) re-initialize environment with a new permutation size
fact(n) factorial of n
inv(x) inverse of x
rev(x) reverse of x
compl(x) complement of x
dot(x,y) composition x*y
prand() a random permutation
isPerm(x) check if the list x is a permutation
asw(i) adjacent swap (i,i+1) [callable also with a tuple arg]
exc(i,j) exchange (i,j) [callable also with a tuple arg]
ins(i,j) insertion (i,j) [callable also with a tuple arg]
asw_nf(i) tuple normal form for adjacent swap (i,j) with j=i+1
exc_nf(t) tuple normal form for exchange (t[0],t[1]) with t[1]>t[0]
ins_nf(t) tuple normal form for insertion (t[0],t[1]) with t[1]>t[0] only for case t[1]=t[0]+1
aswToTuple(x) convert an adjacent swap from permutation form to tuple form
excToTuple(x) convert an exchange from permutation form to tuple form
insToTuple(x) convert an insertion from permutation form to tuple form
swap(x,i,j) swap items at position i and j in x (inplace)
insert(x,i,j) shift item at position i to position j in x (inplace)
dk(x,y) Kendall's tau distance between x and y
du(x,y) Ulam distance between x and y
dc(x,y) Cayley distance between x and y
ninver(x) number of inversion in x
inver(x) set of inversions in x
ainver(x) list of adjacent inversions in x
lis(x) standard lis of x
llis(x) length of a lis of x
alis(x) all the lis of x
urlis(x) unfirom random lis of x
ind(l,x) indexes of x where values in list l appear
cycles(x) cycle decomposition of x
ncycles(x) number of cycles of the decomposition of x
cycleToPerm(c) build a permutation corresponding to cycle c
prandUnredLis() random permutation whose lis is "unreducible"
hasUnredLis(x) check if the lis of x can be reduced or not
lisldsRedCases(x) print and return lis/lds reduction cases after applying all insertion generators (if additional False is passed it doesnt print)
printLisLdsRedCases(d)print the result of "lisldsRedCases"
bothLisLdsIncrease() get a random permutation x + ins generator g such that x*g increases both lis and lds lengths
prand1c() random permutation whith only one cycle
stirling1u(n,k) [n,k] unsigned stirling number of the 1st kind
npermWithCycles(k) number of permutations with k cycles
lds(x) standard lds of x
llds(x) length of a lds of x
alds(x) all the lds of x
urlds(x) uniform random lds of x
mahonian(n,k) [n,k] mahonian number
npermWithInvers(k) number of permutations with k inversions
seqA047874(n,k) [n,k] number of the sequence A047874 (it works only till n=60 and requires the file at https://oeis.org/A047874/b047874.txt or internet)
npermWithLisLength(k) number of permutations with a lis of length k (it works only tille n=60 the file at https://oeis.org/A047874/b047874.txt or internet)
applySeq(x,s) return x*s[0]*s[1]*...
composeSeq(s) return s[0]*s[1]*...
mapAswSeq(s) from a sequence of ASW tuples return a sequence of permutations
mapExcSeq(s) from a sequence of EXC tuples return a sequence of permutations
mapInsSeq(s) from a sequence of INS tuples return a sequence of permutations
randbs(x) return a sequence of ASW tuples that sorts x
randDecAsw(x) return a ASW decomposition of x
randss(x) return a sequence of EXC tuples that sorts x
randmergess(x) return a sequence of EXC tuples that UNsorts x
randDecExc(x) return a EXC decomposition of x
randis(x,randlis) return a sequence of INS tuples that sorts x (UNIFORM STEP NOT IMPLEMENTED) (the randlis function as parameter is optional)
randDecIns(x,randlis) return a INS decomposition of x (see randis)
checkAllInsDiamRev() return true if for all permutations x the Ulam distance between x and rev(x) equals the Ulam diameter
ssort(x) return the sequence of EXC using classical selection sort
expInertia(nexp,q) write how many inertia anomalies with q adj.swaps are over nexp random experiments
all_asw_decomp(x) return all the decompositions (using ASW) of x
checkAverage(x,y) check if the a*x+(1-a)*y has equiprobability
perm2str(x) return string representation of the permutation x
""")
#test function
#def test():
# pass
#default permutation size
DEFAULT_n = 10
#reset/init functions (with global variables declaration)
def reset(size=DEFAULT_n):
#global variables
global n,N,Dk,Du,Dc,e,r,ASW,EXC,INS,pASW,pEXC,pINS
#permutation size
n = size
#total number of permutations
N = fact(n)
#diameters
Dk = n*(n-1)/2
Du = n-1
Dc = n-1
#useful permutations
e = range(n)
r = e[::-1]
x = prand()
xa = applySeq(x,mapExcSeq(randmergess(x)))
#generators sets
ASW = set()
for i in range(n-1):
ASW.add((i,i+1))
pASW = sorted(map(lambda p : asw(p), ASW))
EXC = set()
for i in range(n):
for j in range(i+1,n):
EXC.add((i,j))
pEXC = sorted(map(lambda p : exc(p), EXC))
INS = set()
for i in range(n):
for j in filter(lambda j : j!=i and j!=i-1,range(n)):
INS.add((i,j))
pINS = sorted(map(lambda p : ins(p), INS))
#copy variables to the main module scope
import __main__
__main__.n = n
__main__.N = N
__main__.Dk = Dk
__main__.Du = Du
__main__.Dc = Dc
__main__.e = e
__main__.r = r
__main__.x = x
__main__.xa = xa
__main__.ASW = ASW
__main__.pASW = pASW
__main__.EXC = EXC
__main__.pEXC = pEXC
__main__.INS = INS
__main__.pINS = pINS
init = reset
#some basic functions
def fact(n):
return math.factorial(n)
def inv(x):
z = [None]*n
for i in range(n):
z[x[i]] = i
return z
def rev(x):
return x[::-1]
def dot(x,y):
return [x[v] for v in y]
def prand():
return numpy.random.permutation(n).tolist()
def isPerm(x):
return sorted(x)==e
def compl(x):
return [n-1-v for v in x]
#generators to permutation functions
def asw(g1,g2=-1):
if type(g1) is tuple:
return exc(g1[0],g1[0]+1)
else:
return exc(g1,g1+1)
def exc(g1,g2=-1):
if type(g1) is tuple:
i,j = g1
else:
i,j = g1,g2
z = e[:]
z[i],z[j] = z[j],z[i]
return z
def ins(g1,g2=-1):
if type(g1) is tuple:
i,j = g1
else:
i,j = g1,g2
if i<j:
return range(i) + range(i+1,j+1) + [i] + range(j+1,n)
else:
return range(j) + [i] + range(j,i) + range(i+1,n)
def asw_nf(t):
if type(t) is not tuple:
return (t,t+1)
return exc_nf(t)
def exc_nf(t):
return tuple(sorted(t))
def ins_nf(t):
return tuple(sorted(t)) if t[0]==t[1]+1 else t
def aswToTuple(x):
t = excToTuple(x)
if t[1]!=t[0]+1:
print("It is not an adjacent swap!!!")
return (t[0],t[0]+1)
def excToTuple(x):
diff = [i==x[i] for i in range(n)]
if diff.count(False)!=2:
print("It is not an exchange!!!")
return tuple([i for i,v in enumerate(diff) if not v])
def insToTuple(x):
diff = [i==x[i] for i in range(n)]
if diff.count(False)<2:
print("It is not an insertion!!!")
first,last = diff.index(False),n-1-diff[::-1].index(False)
if any(diff[first:last]):
print("It is not an insertion!!!")
if x[first]==first+1: #i<j
if x[first:last-1]!=range(first+1,last):
print("It is not an insertion!!!")
return (first,last)
else: #i>j
if x[first+1:last]!=range(first,last-1) or x[first]!=last:
print("It is not an insertion!!!")
return (last,first)
#swap and insert inplace
def swap(x,i,j=-1):
if j==-1:
j = i+1
x[i],x[j] = x[j],x[i]
def insert(x,i,j):
t = x[i]
del x[i]
x.insert(j,t)
#distances
def dk(x,y):
return ninver(dot(inv(y),x))
def du(x,y):
return n-llis(dot(inv(y),x))
def dc(x,y):
return n-ncycles(dot(inv(y),x))
#inversion function
def ninver(x):
return len(inver(x))
def inver(x):
return set([(i,j) for i,j in itertools.combinations(range(n),2) if x[i]>x[j]])
def ainver(x):
return [(i,i+1) for i in range(n-1) if x[i]>x[i+1]]
#lis functions
def lis(x):
#see http://rosettacode.org/wiki/Longest_increasing_subsequence#Python
X = x[:]
N = len(X)
P = [0 for i in range(N)]
M = [0 for i in range(N+1)]
L = 0
for i in range(N):
lo = 1
hi = L
while lo <= hi:
mid = (lo+hi)//2
if (X[M[mid]] < X[i]):
lo = mid+1
else:
hi = mid-1
newL = lo
P[i] = M[newL-1]
M[newL] = i
if (newL > L):
L = newL
S = []
k = M[L]
for i in range(L-1, -1, -1):
S.append(X[k])
k = P[k]
return S[::-1]
def llis(x):
return len(lis(x))
def alis(x):
#see http://stackoverflow.com/questions/9554266/finding-all-possible-longest-increasing-subsequence?rq=1
count = [1]*n
def longestIncreaseSubsequence(seq):
n = len(seq)
for i in range(1,n):
maxi = 0
for j in range(i-1,-1,-1):
if seq[j]<seq[i]:
maxi = max(maxi,count[j])
count[i] = maxi + 1
maxi = 0
for i in range(len(count)):
if count[i]>maxi:
maxi = count[i]
return maxi
def allLIS(a,k,count,arr,maxi,result):
if k==maxi:
lista = []
for i in range(maxi,0,-1):
lista.append(arr[a[i]])
result.append(lista)
else:
k = k+1
candidates = [None]*len(arr)
ncandidate = 0
for i in range(a[k-1],-1,-1):
if count[i]==maxi-k+1 and (arr[i]<arr[a[k-1]] or count[i]==maxi):
candidates[ncandidate] = i
ncandidate = ncandidate + 1
for i in range(0,ncandidate):
a[k] = candidates[i]
allLIS(a,k,count,arr,maxi,result)
maxi = longestIncreaseSubsequence(x)
a = [None]*(maxi+1)
a[0] = len(x)-1
result = []
allLIS(a,0,count,x,maxi,result)
return result
def urlis(x):
return random.choice(alis(x))
def ind(l,x):
return [x.index(v) for v in l]
#cycles functions
def cycles(perm):
#see https://gist.github.com/begriffs/2211881
remain = set(perm)
result = []
while len(remain) > 0:
n = remain.pop()
cycle = [n]
while True:
n = perm[n]
if n not in remain:
break
remain.remove(n)
cycle.append(n)
result.append(cycle)
return result
def ncycles(x):
return len(cycles(x))
def cycleToPerm(c):
z = range(n)
for k in range(len(c)-1):
i = c[k]
j = c[k+1]
z[i] = j
z[c[-1]] = c[0]
return z
#lis reduction functions
def prandUnredLis():
while True:
x = prand()
lx = llis(x)
flag = True
for pins in pINS:
y = dot(x,pins)
ly = llis(y)
if ly<lx:
flag = False
if flag:
return x
def hasUnredLis(x):
lx = llis(x)
for pins in pINS:
y = dot(x,pins)
if llis(y)<lx:
return False
return True
def lisldsRedCases(x,verbose=True):
r = { "<<":0, "<=":0, "<>":0, "=<":0, "==":0, "=>":0, "><":0, ">=":0, ">>":0, "other":0 }
l1x,l2x = llis(x),llds(x)
for g in pINS:
y = dot(x,g)
l1y,l2y = llis(y),llds(y)
if l1y==l1x-1 and l2y==l2x-1:
r["<<"] += 1
elif l1y==l1x-1 and l2y==l2x:
r["<="] += 1
elif l1y==l1x-1 and l2y==l2x+1:
r["<>"] += 1
elif l1y==l1x and l2y==l2x-1:
r["=<"] += 1
elif l1y==l1x and l2y==l2x:
r["=="] += 1
elif l1y==l1x and l2y==l2x+1:
r["=>"] += 1
elif l1y==l1x+1 and l2y==l2x-1:
r["><"] += 1
elif l1y==l1x+1 and l2y==l2x:
r[">="] += 1
elif l1y==l1x+1 and l2y==l2x+1:
r[">>"] += 1
else:
r["other"] += 1
if verbose:
printLisLdsRedCases(r)
return r
def printLisLdsRedCases(d):
print("ID")
print("<< : "+(str(d["<<"] if "<<" in d else 0)))
print("<= : "+(str(d["<="] if "<=" in d else 0)))
print("<> : "+(str(d["<>"] if "<>" in d else 0)))
print("=< : "+(str(d["=<"] if "=<" in d else 0)))
print("== : "+(str(d["=="] if "==" in d else 0)))
print("=> : "+(str(d["=>"] if "=>" in d else 0)))
print(">< : "+(str(d["><"] if "><" in d else 0)))
print(">= : "+(str(d[">="] if ">=" in d else 0)))
print(">> : "+(str(d[">>"] if ">>" in d else 0)))
print("other : "+(str(d["other"] if "other" in d else 0)))
def bothLisLdsIncrease():
while True:
x = prand()
l1x,l2x = llis(x),llds(x)
for g in pINS:
y = dot(x,g)
l1y,l2y = llis(y),llds(y)
if l1y>l1x and l2y>l2x:
return [x,g]
#random permutation with only 1 cycle
def prand1c():
x = [None]*n
c = range(1,n)
i = 0
while c:
j = random.choice(c)
c.remove(j)
x[i] = j
i = j
x[i] = 0
return x
#cycle distribution functions
def stirling1u(n,k):
#stirling number of the 1st kind unsigned
if n==0 and k==0:
return 1
if n==0 or k==0:
return 0
return (n-1)*stirling1u(n-1,k) + stirling1u(n-1,k-1)
def npermWithCycles(k):
return stirling1u(n,k)
#lds functions
def lds(x):
return compl(lis(compl(x)))
def llds(x):
return len(lds(x))
def alds(x):
return [compl(l) for l in alis(compl(x))]
def urlds(x):
return compl(urlis(compl(x)))
#inversion distribution functions
def mahonian(n,k):
#see http://stackoverflow.com/questions/19372991/number-of-n-element-permutations-with-exactly-k-inversions
def mahonian_row(n):
i = 1
result = [1]
while i < n:
prev = result[:]
result = [0] * int(1 + ((i + 1) * 0.5) * (i))
m = [1] * (i + 1)
for j in range(len(m)):
for k in range(len(prev)):
result[k+j] += m[j] * prev[k]
i = i + 1
return result
return mahonian_row(n)[k]
def npermWithInvers(k):
return mahonian(n,k)
#lis length distribution function
def seqA047874(n,k):
#see https://oeis.org/A047874 and https://oeis.org/A047874/b047874.txt
if n>60:
print("Impossible to compute this value for n greater than 60")
lineno = n*(n-1)/2 + k
fn = "b047874.txt"
if os.path.exists(fn):
with open(fn,"r") as f:
for line in f:
if int(line.split()[0])==lineno:
return int(line.split()[1])
else:
print "Trying to read the file from web https://oeis.org/A047874/b047874.txt"
un = "https://oeis.org/A047874/b047874.txt"
txt = urllib2.urlopen(un).read().split("\n")
for line in txt:
if int(line.split()[0])==lineno:
return int(line.split()[1])
return -1
def npermWithLisLength(k):
return seqA047874(n,k)
#randomized sorting algorithms
def applySeq(x,s):
z = x[:]
for y in s:
z = dot(z,y)
return z
def composeSeq(s):
return applySeq(e,s)
def mapAswSeq(s):
return map(lambda p : asw(p), s)
def mapExcSeq(s):
return map(lambda p : exc(p), s)
def mapInsSeq(s):
return map(lambda p : ins(p), s)
def randbs(x):
y = x[:]
s = []
ai = ainver(y)
while len(ai)>0:
sw = random.choice(ai)
swap(y,sw[0],sw[1])
ai = ainver(y)
s.append(sw)
return s
def randDecAsw(x):
return randbs(inv(x))
def randss(x):
y = x[:]
s = []
cyc = cycles(y)
while len(cyc)<n:
cyc = filter(lambda c : len(c)>1,cyc)
q = list(numpy.cumsum([len(c)*(len(c)-1)/2 for c in cyc]))
tot = q[-1]
r = random.randint(0,tot-1)
for i in range(len(cyc)):
if r<q[i]:
c = i
c = cyc[c]
i = random.choice(c)
c.remove(i)
j = random.choice(c)
s.append(exc_nf((i,j)))
swap(y,i,j)
cyc = cycles(y)
return s
def randmergess(x):
y = x[:]
s = []
cyc = cycles(y)
while len(cyc)>1:
w = list(numpy.cumsum([len(cyc[k])*(n-len(cyc[k])) for k in range(len(cyc))]))
r = random.randint(0,w[-1]-1)
for c1 in range(len(cyc)):
if r<w[c1]:
break
i = random.choice(cyc[c1])
del cyc[c1]
w = list(numpy.cumsum(map(lambda c : len(c),cyc)))
r = random.randint(0,w[-1]-1)
for c2 in range(len(cyc)):
if r<w[c2]:
break
j = random.choice(cyc[c2])
s.append(exc_nf((i,j)))
swap(y,i,j)
cyc = cycles(y)
return s
def randDecExc(x):
return randss(inv(x))
def randis(x,randlis=urlis):
y = x[:]
s = []
lis = randlis(y)
while len(lis)<n:
u = [i for i in range(n) if i not in lis]
i = random.choice(ind(u,y))
ival = y[i]
for b in range(len(lis)):
if lis[b]>ival:
break
else:
b = len(lis)
if b==0:
a,b = 0,y.index(lis[0])
elif b==len(lis):
a,b = y.index(lis[-1]),n-1
else:
a,b = y.index(lis[b-1]),y.index(lis[b])
if a==b:
j = a
elif i<a:
j = random.randint(a,b-1)
elif i>b:
j = random.randint(a+1,b)
else:
j = None
print("Problem with randis")
s.append(ins_nf((i,j)))
lis.append(ival)
lis = sorted(lis)
insert(y,i,j)
if lis not in alis(y):
print("BIG PROBLEM")
return s
def decInsSeq(x,randlis=urlis):
return randis(inv(x),randlis)
def checkAllInsDiamRev():
#return true if for all permutations x the Ulam distance between x and rev(x) equals the Ulam diameter
#return false otherwise
for p in itertools.permutations(e):
x = list(p)
r = rev(x)
if du(x,r)!=Du:
return False
return True
def ssort(x):
y = x[:]
s = []
for j in range(0,n-1):
imin = j
for i in range(j+1,n):
if y[i]<y[imin]:
imin = i
if imin!=j:
t = y[j]
y[j] = y[imin]
y[imin] = t
s.append(exc_nf((j,imin)))
return s
def expInertia(nexp=1000,q=1):
anomalies = 0
for i in xrange(nexp):
x = prand()
dx = randDecAsw(x)
#y = dot(x,asw(dx[0]))
y = [x[k] for k in xrange(n)] #
for j in xrange(q): #
if j>=len(dx): #
break #
y = dot(y,asw(dx[j])) #
wx = ninver(x)
wy = ninver(y)
#if wy!=wx+1:
if wy!=wx+q: #
anomalies += 1
print "Anomalies: " + str(anomalies) + " / " + str(nexp)
def all_asw_decomp_aux(a,L,sc,ld):
n=len(a)
if L==[]:
ld.append(sc)
else:
for i in L:
L1=[j for j in L if j!=i]
sc1=[i]+sc
swap(a,i,i+1) #scambia(a,i,i+1)
if i<n-2 and i+1 not in L1 and a[i+1]>a[i+2]:
L1.append(i+1)
if i>0 and i-1 not in L1 and a[i-1]>a[i]:
L1.append(i-1)
all_asw_decomp_aux(a,L1,sc1,ld)
swap(a,i,i+1) #scambia(a,i,i+1)
def all_asw_decomp(a):
ld=[]
n=len(a)
L=[i for i in range(n-1) if a[i]>a[i+1]]
all_asw_decomp_aux(a,L,[],ld)
return ld
def perm2str(x):
s = ""
for i in range(len(x)):
s += str(x[i])
if i<len(x)-1:
s += ","
return s
def checkAverage(q):
for j in range(q):
x = prand()
y = prand()
if dk(x,y)<=1:
continue
#x+a*(y-x) and y+(1-a)*(x-y)
#z=y-x=x^-1*y and w=x-y=y^-1*x=inv(y-x)
z = dot(inv(x),y)
w = inv(z)
adz = all_asw_decomp(z)
l = len(adz[0])
k = random.randint(1,l-1) #in [1,l-1]
#k generators from (y-x) and l-k generators from (x-y)
dict1 = {}
dict2 = {}
for d in adz:
zz = x
for i in range(k):
zz = dot(zz,asw(d[i]))
sz = perm2str(zz)
if sz in dict1:
dict1[sz] += 1
else:
dict1[sz] = 1
ww = y
drev = rev(d)
for i in range(l-k):
ww = dot(ww,asw(drev[i]))
sw = perm2str(ww)
if sw in dict2:
dict2[sw] += 1
else:
dict2[sw] = 1
if dict1!=dict2:
return False;
return True
#init the environment and print usage
init()
#test()
help()
|
gpl-2.0
| -3,328,842,759,987,136,500
| 22.886199
| 158
| 0.570393
| false
| 2.430074
| false
| false
| false
|
RobinQuetin/CAIRIS-web
|
cairis/cairis/WeaknessAnalysisPanel.py
|
1
|
1665
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from BasePanel import BasePanel
import Asset
from WeaknessAnalysisNotebook import WeaknessAnalysisNotebook
class WeaknessAnalysisPanel(BasePanel):
def __init__(self,parent,cvName,envName):
BasePanel.__init__(self,parent,armid.WEAKNESSANALYSIS_ID)
self.theAssetId = None
mainSizer = wx.BoxSizer(wx.VERTICAL)
nbBox = wx.StaticBox(self,-1)
nbSizer = wx.StaticBoxSizer(nbBox,wx.VERTICAL)
mainSizer.Add(nbSizer,1,wx.EXPAND)
nbSizer.Add(WeaknessAnalysisNotebook(self,cvName,envName),1,wx.EXPAND)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
applyButton = wx.Button(self,armid.WEAKNESSANALYSIS_BUTTONCOMMIT_ID,"Apply")
buttonSizer.Add(applyButton)
closeButton = wx.Button(self,wx.ID_CANCEL,"Cancel")
buttonSizer.Add(closeButton)
mainSizer.Add(buttonSizer,0,wx.CENTER)
self.SetSizer(mainSizer)
|
apache-2.0
| -8,500,019,050,988,761,000
| 38.642857
| 80
| 0.76036
| false
| 3.627451
| false
| false
| false
|
gizwits/gservice_sdk_py
|
gservice/api/client.py
|
1
|
1504
|
#coding:utf-8
'''
moduls::APIClient
~~~~~~~~~~~~~~~~~
request handler
'''
import requests
import json
from ..calls.g_login import login as login_call
class APIClient(object):
def __init__(self):
self.session = requests.Session()
self.token = None
self.uid = None
self.expire_at = None
self.headers = {"Content-Type": "application/json"}
def set_token(self, token):
'''Set token manually to avoid having to login repeatedly'''
self.token = token
self.headers["X-Gizwits-User-token"] = self.token
def login(self, acc, pwd):
'''login to gservice
'''
r = self.send_request(login_call(acc, pwd))
self.set_token(r['token'])
self.uid = r['uid']
self.expire_at = r['expire_at']
def send_request(self, request, timeout=30):
'''
:param request: A prepared Request object for the request.
:type request_method: Request
:param timeout: Timeout duration in seconds.
:type timeout: int
:returns: dict
'''
request.headers = self.headers
# Include the session headers in the request
request.headers.update(self.session.headers)
if request.data == []:
# let the body clean.
# request.data = json.dumps({})
pass
else:
request.data = json.dumps(request.data)
r = self.session.send(request.prepare(), timeout=timeout)
return r
|
mit
| 4,340,423,305,111,544,300
| 26.345455
| 68
| 0.577128
| false
| 4.010667
| false
| false
| false
|
hugoShaka/photo-mailer
|
mailerv2.py
|
1
|
5032
|
# -*- coding: utf-8 -*-
import getpass
import sys, os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from iptcinfo import IPTCInfo
import logging
logging.basicConfig(filename='error.log')
class photo:
"""A photo can be sent, it also contains the recipient's email address"""
def __init__(self,fileloc):
self.location=fileloc
try:
self.info=IPTCInfo(self.location)
self.addr=str(self.info.data['object name'])
self.IPTCred=True
except Exception:
print(fileloc+" No valid IPTC tags found.")
self.IPTCred=False
def generateEmail(self,mailaccount,message):
if self.addr=='':
self.email=None
print("Warning no valid email address for : "+self.location)
else:
self.email=mail(self.addr,self.location,mailaccount,message)
self.sent=False
def send(self):
if self.sent:
print('Warning :Re-sending the email')
self.email.send()
self.sent=True
class folder:
"""Contains the path to the photos. Can be scanned to get the photos and can be used to mass edit/send the mails/the photos"""
def __init__(self,pathtofolder):
self.path=pathtofolder
self.files=[]
def scan(self):
howManyImport=0
for root, dirs, files in os.walk(self.path):
for file in files:
if file.endswith(".jpg"):
importedPhoto=photo(os.path.join(root, file))
if importedPhoto.IPTCred:
self.files.append(importedPhoto)
howManyImport+=1
print(str(howManyImport) + " files were sucessfully imported")
def generateEmails(self,mailaccount,message):
for pic in self.files:
pic.generateEmail(mailaccount,message)
class mailaccount:
"""Compte mail et parametres de connexion au serveur
"""
def __init__(self):
self.sender="Club Photo"
self.port="587"
self.smtp="smtp.rez-gif.supelec.fr"
self.login="None"
self.connected=False
def credentials(self):
self.login=raw_input("Login : ")
self.pwd=getpass.getpass("Mot de passe : \n")
def showMailSettings(self):
print("\n--------------------\n MailServer Settings \n--------------------\n")
print("sender : "+self.sender+"\nsmtp server : "+self.smtp+"\nport : "+self.port+"\nlogin : ")
if not self.connected:
print("Status : not connected")
else:
print("Status : connected as : "+self.login)
def editMailSettings(self):
self.sender=raw_input("sender ?")
self.smtp=raw_input("server ?")
self.port=raw_input("port?")
def log(self):
try:
self.mailserver=smtplib.SMTP(self.smtp,self.port)
self.mailserver.ehlo()
self.mailserver.starttls()
self.mailserver.ehlo()
self.mailserver.login(self.login,self.pwd)
self.connected=True
except (socket.error) as err:
print("Socket error:.({0}): {1}".format(e.errno, e.strerror))
self.connected=False
def unlog(self):
self.mailserver.quit()
self.connected=False
class mail:
"""Objet mail qui possède les methodes pour etre envoye, recupere ses parametres d'un objet mailaccount"""
def __init__(self,reciever,photo,mailaccount,message):
if reciever==None:
print("\n /!\ Email not created due to invalid email address")
else:
self.msg=MIMEMultipart()
self.msg['From'] = mailaccount.sender
self.msg['To'] = reciever
self.msg['Subject'] = message.subject
self.msg.attach(MIMEText(message.generate()))
pj=open(photo, 'rb')
self.msg.attach(MIMEApplication(pj.read(),Content_Disposition='attachement;filename="%s"' % os.path.basename(photo),Name=os.path.basename(photo)))
pj.close()
print("Mail to : "+reciever+" successfully generated")
def send(self):
"""Send the mail object"""
if (mailaccount.connected):
mailaccount.mailserver.sendmail(mailaccount.sender, self.msg['To'], self.msg.as_string())
else :
mailaccount.log()
mailaccount.mailserver.sendmail(mailaccount.sender, self.msg['To'], self.msg.as_string())
class message:
"""A class to manage the e-mail text"""
def __init__(self,text='No text'):
self.text=text
self.sign=''
self.subject='Your photo'
def generate(self):
return self.text+'\n-- \n'+self.sign
def main():
print("Not yet")
mailacc=mailaccount()
mailacc.showMailSettings()
if (raw_input("\nEdit mail server settings ? (y/N)")=='y'):
mailacc.editMailSettings()
print("Please enter your credentials")
mailacc.credentials()
print("Testing the settings")
mailacc.log()
if mailacc.connected:
print("\nSuccessfully logged :) \n")
else:
print("Exiting")
pathto=raw_input("Choose your folder")
currentFolder=folder('/home/shaka/Downloads/photos/')
currentFolder.scan()
currentMessage=message()
currentMessage.text=raw_input("enter the email's body text")
currentFolder.generateEmails(mailacc,currentMessage)
main()
|
apache-2.0
| -2,709,665,014,651,973,000
| 30.248447
| 152
| 0.667661
| false
| 3.496178
| false
| false
| false
|
CWBudde/Ink2SmartCanvas
|
Ink2SmartCanvas/svg/Text.py
|
1
|
1318
|
from ink2canvas.svg.AbstractShape import AbstractShape
class Text(AbstractShape):
def textHelper(self, tspan):
val = ""
if tspan.text:
val += tspan.text
for ts in tspan:
val += self.textHelper(ts)
if tspan.tail:
val += tspan.tail
return val
def setTextStyle(self, style):
keys = ("font-style", "font-weight", "font-size", "font-family")
text = []
for key in keys:
if key in style:
text.append(style[key])
self.canvasContext.setFont(" ".join(text))
def getData(self):
x = self.attr("x")
y = self.attr("y")
return x, y
def draw(self, isClip=False):
x, y = self.getData()
style = self.getStyle()
if self.hasTransform():
transMatrix = self.getTransform()
self.canvasContext.transform(*transMatrix) # unpacks argument list
self.setStyle(style)
self.setTextStyle(style)
for tspan in self.node:
text = self.textHelper(tspan)
_x = float(tspan.get("x"))
_y = float(tspan.get("y"))
self.canvasContext.fillText(text, _x, _y)
self.gradientHelper.setGradientFill()
self.gradientHelper.setGradientStroke()
|
gpl-2.0
| -2,483,348,612,490,102,300
| 29.651163
| 78
| 0.552352
| false
| 3.842566
| false
| false
| false
|
rrah/PyLiteCo
|
__main__.py
|
1
|
1670
|
"""Main launcher for pyliteco.
Author: Robert Walker <rrah99@gmail.com>
Copyright (C) 2015 Robert Walker
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
import pyliteco.watchdog
import pyliteco.version
import sys
if __name__ == '__main__':
formatter_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(formatter_string)
logging.basicConfig(filename = 'pyliteco.log', format = formatter_string)
root = logging.getLogger()
root.setLevel(logging.NOTSET)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
root.addHandler(ch)
logger = logging.getLogger(__name__)
logger.info('Starting up as app. v{}'.format(pyliteco.version.VERSION))
import argparse
parser = argparse.ArgumentParser("Start the pyliteco program")
parser.add_argument('-c', dest = 'config_file_entered', default = None, metavar = 'Config file')
thread = pyliteco.watchdog.Watchdog_Thread(**vars(parser.parse_args()))
thread.start()
|
gpl-2.0
| -2,271,374,031,831,028,500
| 35.326087
| 100
| 0.711976
| false
| 4.1133
| false
| false
| false
|
KayJohnston/jackies-map
|
edi.py
|
1
|
4447
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import shlex
import logging
import time
import cmd
import argparse
import traceback
if __name__ == '__main__':
print("Loading environment...")
import env
log = logging.getLogger("edi")
# Now env is loaded, import the apps
import ship
import edts
import close_to
import coords
import distance
import find
import galmath
import fuel_usage
class EDI(cmd.Cmd):
def __init__(self):
# super (EDI, self).__init__()
cmd.Cmd.__init__(self)
self.prompt = "EDI> "
self.state = {}
def run_application(self, ns, args):
try:
args = shlex.split(args)
app = ns.Application(args, True, self.state)
app.run()
except KeyboardInterrupt:
log.debug("Interrupt detected")
pass
except SystemExit:
pass
except Exception as e:
log.error("Error in application: {}".format(e))
log.debug(traceback.format_exc())
pass
return True
def run_help(self, ns):
try:
ns.Application(['-h'], True, self.state).run()
except SystemExit:
pass
return True
#
# Begin commands
#
def help_edts(self):
return self.run_help(edts)
def do_edts(self, args):
return self.run_application(edts, args)
def help_distance(self):
return self.run_help(distance)
def do_distance(self, args):
return self.run_application(distance, args)
def help_raikogram(self):
return self.help_distance()
def do_raikogram(self, args):
return self.do_distance(args)
def help_close_to(self):
return self.run_help(close_to)
def do_close_to(self, args):
return self.run_application(close_to, args)
def help_coords(self):
return self.run_help(coords)
def do_coords(self, args):
return self.run_application(coords, args)
def help_find(self):
return self.run_help(find)
def do_find(self, args):
return self.run_application(find, args)
def help_galmath(self):
return self.run_help(galmath)
def do_galmath(self, args):
return self.run_application(galmath, args)
def help_fuel_usage(self):
return self.run_help(fuel_usage)
def do_fuel_usage(self, args):
return self.run_application(fuel_usage, args)
def help_set_verbosity(self):
print("usage: set_verbosity N")
print("")
print("Set log level (0-3)")
return True
def do_set_verbosity(self, args):
env.set_verbosity(int(args))
return True
def help_set_ship(self):
print("usage: set_ship -m N -t N -f NC [-c N]")
print("")
print("Set the current ship to be used in other commands")
return True
def do_set_ship(self, args):
ap = argparse.ArgumentParser(fromfile_prefix_chars="@", prog = "set_ship")
ap.add_argument("-f", "--fsd", type=str, required=True, help="The ship's frame shift drive in the form 'A6 or '6A'")
ap.add_argument("-m", "--mass", type=float, required=True, help="The ship's unladen mass excluding fuel")
ap.add_argument("-t", "--tank", type=float, required=True, help="The ship's fuel tank size")
ap.add_argument("-c", "--cargo", type=int, default=0, help="The ship's cargo capacity")
try:
argobj = ap.parse_args(shlex.split(args))
except SystemExit:
return True
s = ship.Ship(argobj.fsd, argobj.mass, argobj.tank, argobj.cargo)
self.state['ship'] = s
print("")
print("Ship [FSD: {0}, mass: {1:.1f}T, fuel: {2:.0f}T]: jump range {3:.2f}Ly ({4:.2f}Ly)".format(s.fsd.drive, s.mass, s.tank_size, s.range(), s.max_range()))
print("")
return True
def help_quit(self):
print("Exit this shell by typing \"exit\", \"quit\" or Control-D.")
return True
def do_quit(self, args):
return False
def help_exit(self):
return self.help_quit()
def do_exit(self, args):
return False
#
# End commands
#
def do_EOF(self, args):
print()
return False
def precmd(self, line):
self.start_time = time.time()
return line
def postcmd(self, retval, line):
if retval is False:
return True
log.debug("Command complete, time taken: {0:.4f}s".format(time.time() - self.start_time))
# Prevent EOF showing up in the list of commands
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
cmds = [c for c in cmds if c != "EOF"]
cmd.Cmd.print_topics(self, header, cmds, cmdlen, maxcol)
if __name__ == '__main__':
env.start()
EDI().cmdloop()
env.stop()
|
bsd-3-clause
| -2,765,339,657,584,099,000
| 22.908602
| 161
| 0.641106
| false
| 3.272259
| false
| false
| false
|
cokelaer/spectrum
|
test/test_correlog.py
|
1
|
1904
|
from spectrum import CORRELOGRAMPSD, CORRELATION, pcorrelogram, marple_data
from spectrum import data_two_freqs
from pylab import log10, plot, savefig, linspace
from numpy.testing import assert_array_almost_equal, assert_almost_equal
def test_correlog():
psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)
assert_almost_equal(psd[0], 0.138216970)
assert_almost_equal(psd[1000-1], 7.900110787)
assert_almost_equal(psd[2000-1], 0.110103858)
assert_almost_equal(psd[3000-1], 0.222184134)
assert_almost_equal(psd[4000-1], -0.036255277)
assert_almost_equal(psd[4096-1], 0.1391839711)
return psd
def test_correlog_auto_cross():
"""Same as test_correlog but x and y provided"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16)
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16)
assert_array_almost_equal(psd1, psd2)
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='CORRELATION')
assert_array_almost_equal(psd1, psd2)
def test_correlog_correlation_method():
"""test correlogramPSD playing with method argument"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='xcorr')
assert_array_almost_equal(psd1, psd2)
def test_pcorrelogram_class():
p = pcorrelogram(marple_data, lag=16)
p()
print(p)
p = pcorrelogram(data_two_freqs(), lag=16)
p.plot()
print(p)
def test_CORRELOGRAMPSD_others():
p = CORRELOGRAMPSD(marple_data, marple_data, lag=16, NFFT=None)
def create_figure():
psd = test_correlog()
f = linspace(-0.5, 0.5, len(psd))
psd = cshift(psd, len(psd)/2)
plot(f, 10*log10(psd/max(psd)))
savefig('psd_corr.png')
if __name__ == "__main__":
create_figure()
|
bsd-3-clause
| -7,416,000,210,789,189,000
| 33.618182
| 93
| 0.697479
| false
| 2.708393
| true
| false
| false
|
pklaus/PyOscilloskop
|
gui/rigolUi.py
|
1
|
3717
|
#!/usr/bin/python
# -*- encoding: UTF8 -*-
# pyOscilloskop
#
# Copyright (19.2.2011) Sascha Brinkmann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import os
from pyoscilloskop import rigolScope
from pyoscilloskop import RigolError
class RigolUI(object):
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file("oscilloskopControl.glade")
self.builder.connect_signals(self)
self.win = self.builder.get_object('window1')
def showOscilloskopInformations(self):
scope = self.scope
builder = self.builder
builder.get_object("labelConnectedToDevice").set_text(scope.getName() + " (" + scope.getDevice() + ")")
builder.get_object("checkChannel1Showchannel").set_active(scope.getChannel1().isChannelActive())
builder.get_object("textChannel1Voltage").set_text(str(scope.getChannel1().getVoltageScale()) + " V/DIV")
builder.get_object("textChannel1Offset").set_text(str(scope.getChannel1().getVoltageOffset()) + " V")
builder.get_object("checkChannel2Showchannel").set_active(scope.getChannel2().isChannelActive())
builder.get_object("textChannel2Voltage").set_text(str(scope.getChannel2().getVoltageScale()) + " V/DIV")
builder.get_object("textChannel2Offset").set_text(str(scope.getChannel2().getVoltageOffset()) + " V")
builder.get_object("textTimeAxisScale").set_text(str(scope.getTimeScale()) + "S/DIV")
builder.get_object("textTimeAxisOffset").set_text(str(scope.getTimescaleOffset()) + " S")
scope.reactivateControlButtons()
def run(self):
try:
self.scope = rigolScope.RigolScope('/dev/ttyUSB0')
## To get more debug output, do:
self.scope.debugLevel = 5
self.win.set_title("Oscilloskope remote control")
self.figureCounter = 1
self.showOscilloskopInformations()
except RigolError as e:
self.info_msg("You have to turn on your scope and connect it to the computer.\n\n%s" % e, gtk.MESSAGE_ERROR)
self.quit()
try:
gtk.main()
except KeyboardInterrupt:
pass
def quit(self):
gtk.main_quit()
def on_window1_delete_event(self, *args):
self.quit()
def info_msg(self, msg, messageType=gtk.MESSAGE_INFO):
dlg = gtk.MessageDialog(parent=self.win, type=messageType, buttons=gtk.BUTTONS_OK, message_format=msg)
dlg.run()
dlg.destroy()
def on_buttonShow_clicked(self, *args):
self.plotFigure()
def plotFigure(self):
print("Plot figure")
parameter = " -p"
if(self.builder.get_object("checkRestartAfterAquring").get_active()):
parameter += " -r"
if(not(self.builder.get_object("checkChannel1Showchannel").get_active())):
parameter += " -1"
if(not(self.builder.get_object("checkChannel2Showchannel").get_active())):
parameter += " -2"
os.system("rigolCli.py " + parameter)
if __name__ == '__main__':
rigolUiApp = RigolUI()
rigolUiApp.run()
|
gpl-3.0
| -7,523,303,664,822,341,000
| 36.545455
| 120
| 0.658596
| false
| 3.567179
| false
| false
| false
|
yvesalexandre/bandicoot
|
bandicoot/tests/test_parsers.py
|
1
|
5328
|
# The MIT License (MIT)
#
# Copyright (c) 2015-2016 Massachusetts Institute of Technology.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test the import of CSV files.
"""
import bandicoot as bc
from bandicoot.core import Record, Position
from datetime import datetime as dt
import unittest
import os
class TestParsers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dir_changed = False
def setUp(self):
if not TestParsers._dir_changed:
abspath = os.path.abspath(__file__)
name = abspath.index(os.path.basename(__file__))
abspath = abspath[:name]
os.chdir(abspath)
TestParsers._dir_changed = True
def test_read_orange(self):
user = bc.io.read_orange("u_test", "samples", describe=False)
self.assertEqual(len(user.records), 500)
def test_read_csv(self):
user = bc.read_csv("u_test2", "samples", describe=False)
self.assertEqual(len(user.records), 500)
def test_read_csv_with_recharges(self):
user = bc.read_csv("A", "samples/manual", describe=False,
recharges_path="samples/manual/recharges")
self.assertEqual(len(user.recharges), 5)
def test_read_csv_antenna_id_no_places(self):
user = bc.read_csv("u_test_antennas", "samples", describe=False)
self.assertEqual(user.records[1],
Record(interaction='call',
direction='in',
correspondent_id='770000001',
datetime=dt(2013, 12, 16, 5, 39, 30),
call_duration=0,
position=Position('13084', None)))
result = {'allweek': {'allday': None}}
radius = bc.spatial.radius_of_gyration(user, groupby=None)
self.assertEqual(radius, result)
def test_read_csv_antenna_id(self):
user = bc.read_csv("u_test_antennas", "samples",
antennas_path="samples/towers.csv", describe=False)
self.assertEqual(user.records[1],
Record(interaction='call',
direction='in',
correspondent_id='770000001',
datetime=dt(2013, 12, 16, 5, 39, 30),
call_duration=0,
position=Position('13084', None)))
radius = bc.spatial.radius_of_gyration(user, groupby=None)
self.assertGreater(radius['allweek']['allday'], 0)
def test_read_csv_no_position(self):
user = bc.read_csv("u_test_no_position", "samples", describe=False)
self.assertEqual(user.records[1],
Record(interaction='call',
direction='in',
correspondent_id='770000001',
datetime=dt(2013, 12, 16, 5, 39, 30),
call_duration=0,
position=Position()))
def test_read_csv_attributes(self):
user = bc.read_csv("u_test2", "samples",
attributes_path="samples/attributes", describe=False)
self.assertEqual(user.attributes, {
'gender': 'male',
'age': '42',
'is_subscriber': 'True',
'individual_id': '7atr8f53fg41'
})
def test_read_duration_format(self):
raw = {
'antenna_id': '11201|11243',
'call_duration': '873',
'correspondent_id': 'A',
'datetime': '2014-06-01 01:00:00',
'direction': 'out',
'interaction': 'call'
}
rv = bc.io._parse_record(raw, duration_format='seconds').call_duration
self.assertEqual(rv, 873)
raw['call_duration'] = '00:14:33'
rv = bc.io._parse_record(raw, duration_format='%H:%M:%S').call_duration
self.assertEqual(rv, 873)
raw['call_duration'] = '1433'
rv = bc.io._parse_record(raw, duration_format='%M%S').call_duration
self.assertEqual(rv, 873)
raw['call_duration'] = ''
rv = bc.io._parse_record(raw, duration_format='seconds').call_duration
self.assertEqual(rv, None)
|
mit
| 3,988,811,326,115,238,400
| 39.06015
| 80
| 0.579955
| false
| 4.08589
| true
| false
| false
|
flavour/ifrc_qa
|
modules/s3db/supply.py
|
1
|
104079
|
# -*- coding: utf-8 -*-
""" Sahana Eden Supply Model
@copyright: 2009-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SupplyModel",
"S3SupplyDistributionModel",
"supply_item_rheader",
"supply_item_controller",
"supply_item_entity_controller",
"supply_catalog_rheader",
"supply_item_entity_category",
"supply_item_entity_country",
"supply_item_entity_organisation",
"supply_item_entity_contacts",
"supply_item_entity_status",
"supply_ItemRepresent",
#"supply_ItemCategoryRepresent",
"supply_get_shipping_code",
)
import re
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3dal import Row
from s3layouts import S3PopupLink
# @ToDo: Put the most common patterns at the top to optimise
um_patterns = ["\sper\s?(.*)$", # CHOCOLATE, per 100g
#"\((.*)\)$", # OUTWARD REGISTER for shipping (50 sheets)
"([0-9]+\s?(gramm?e?s?|L|g|kg))$", # Navarin de mouton 285 grammes
",\s(kit|pair|btl|bottle|tab|vial)\.?$", # STAMP, IFRC, Englishlue, btl.
"\s(bottle)\.?$", # MINERAL WATER, 1.5L bottle
",\s((bag|box|kit) of .*)\.?$", # (bag, diplomatic) LEAD SEAL, bag of 100
]
# =============================================================================
class S3SupplyModel(S3Model):
"""
Generic Supply functionality such as catalogs and items that is used
across multiple modules.
@ToDo: Break this class up where possible
- is this just supply_item_alt?
"""
names = ("supply_brand",
"supply_catalog",
"supply_item_category",
"supply_item_category_id",
"supply_item",
"supply_item_entity",
"supply_catalog_item",
"supply_item_pack",
"supply_item_alt",
"supply_item_id",
"supply_item_entity_id",
"supply_item_pack_id",
"supply_kit_item",
"supply_item_represent",
"supply_item_category_represent",
"supply_item_add",
"supply_item_pack_quantity",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
float_represent = IS_FLOAT_AMOUNT.represent
NONE = current.messages["NONE"]
format = auth.permission.format
if format == "html":
i18n = {"in_inv": T("in Stock"),
"no_packs": T("No Packs for Item"),
}
s3.js_global.append('''i18n.in_inv="%s"''' % i18n["in_inv"])
s3.js_global.append('''i18n.no_packs="%s"''' % i18n["no_packs"])
# =====================================================================
# Brand
#
tablename = "supply_brand"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_BRAND = T("Create Brand")
crud_strings[tablename] = Storage(
label_create = ADD_BRAND,
title_display = T("Brand Details"),
title_list = T("Brands"),
title_update = T("Edit Brand"),
label_list_button = T("List Brands"),
label_delete_button = T("Delete Brand"),
msg_record_created = T("Brand added"),
msg_record_modified = T("Brand updated"),
msg_record_deleted = T("Brand deleted"),
msg_list_empty = T("No Brands currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename)
brand_id = S3ReusableField("brand_id", "reference %s" % tablename,
label = T("Brand"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "supply_brand.id",
represent,
sort=True)
),
sortby = "name",
comment = S3PopupLink(c = "supply",
f = "brand",
label = ADD_BRAND,
title = T("Brand"),
tooltip = T("The list of Brands are maintained by the Administrators."),
),
)
# =====================================================================
# Catalog (of Items)
#
tablename = "supply_catalog"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
self.org_organisation_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_CATALOG = T("Create Catalog")
crud_strings[tablename] = Storage(
label_create = ADD_CATALOG,
title_display = T("Catalog Details"),
title_list = T("Catalogs"),
title_update = T("Edit Catalog"),
label_list_button = T("List Catalogs"),
label_delete_button = T("Delete Catalog"),
msg_record_created = T("Catalog added"),
msg_record_modified = T("Catalog updated"),
msg_record_deleted = T("Catalog deleted"),
msg_list_empty = T("No Catalogs currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename)
catalog_id = S3ReusableField("catalog_id", "reference %s" % tablename,
default = 1,
label = T("Catalog"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "supply_catalog.id",
represent,
sort=True,
# Restrict to catalogs the user can update
updateable=True,
)),
sortby = "name",
comment=S3PopupLink(c = "supply",
f = "catalog",
label = ADD_CATALOG,
title = T("Catalog"),
tooltip = T("The list of Catalogs are maintained by the Administrators."),
),
)
# Components
add_components(tablename,
# Categories
supply_item_category = "catalog_id",
# Catalog Items
supply_catalog_item = "catalog_id",
)
# =====================================================================
# Item Category
#
asset = settings.has_module("asset")
telephone = settings.get_asset_telephones()
vehicle = settings.has_module("vehicle")
item_category_represent = supply_ItemCategoryRepresent()
item_category_represent_nocodes = \
supply_ItemCategoryRepresent(use_code=False)
if format == "xls":
parent_represent = item_category_represent_nocodes
else:
parent_represent = item_category_represent
tablename = "supply_item_category"
define_table(tablename,
catalog_id(),
#Field("level", "integer"),
Field("parent_item_category_id",
"reference supply_item_category",
label = T("Parent"),
ondelete = "RESTRICT",
represent = parent_represent,
),
Field("code", length=16,
label = T("Code"),
#required = True,
),
Field("name", length=128,
label = T("Name"),
),
Field("can_be_asset", "boolean",
default = True,
label = T("Items in Category can be Assets"),
represent = s3_yes_no_represent,
readable = asset,
writable = asset,
),
Field("is_telephone", "boolean",
default = False,
label = T("Items in Category are Telephones"),
represent = s3_yes_no_represent,
readable = telephone,
writable = telephone,
),
Field("is_vehicle", "boolean",
default = False,
label = T("Items in Category are Vehicles"),
represent = s3_yes_no_represent,
readable = vehicle,
writable = vehicle,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ITEM_CATEGORY = T("Create Item Category")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM_CATEGORY,
title_display = T("Item Category Details"),
title_list = T("Item Categories"),
title_update = T("Edit Item Category"),
label_list_button = T("List Item Categories"),
label_delete_button = T("Delete Item Category"),
msg_record_created = T("Item Category added"),
msg_record_modified = T("Item Category updated"),
msg_record_deleted = T("Item Category deleted"),
msg_list_empty = T("No Item Categories currently registered"))
# Reusable Field
item_category_requires = IS_EMPTY_OR(
IS_ONE_OF(db, "supply_item_category.id",
item_category_represent_nocodes,
sort=True)
)
item_category_comment = S3PopupLink(c = "supply",
f = "item_category",
label = ADD_ITEM_CATEGORY,
title = T("Item Category"),
tooltip = ADD_ITEM_CATEGORY,
)
# @todo: make lazy_table
table = db[tablename]
table.parent_item_category_id.requires = item_category_requires
item_category_id = S3ReusableField("item_category_id", "reference %s" % tablename,
comment = item_category_comment,
label = T("Category"),
ondelete = "RESTRICT",
represent = item_category_represent,
requires = item_category_requires,
sortby = "name",
)
item_category_script = '''
$.filterOptionsS3({
'trigger':'catalog_id',
'target':'item_category_id',
'lookupPrefix':'supply',
'lookupResource':'item_category',
})'''
# Components
add_components(tablename,
# Child categories
supply_item_category = "parent_item_category_id",
)
configure(tablename,
deduplicate = self.supply_item_category_duplicate,
onvalidation = self.supply_item_category_onvalidate,
)
# =====================================================================
# Item
#
# These are Template items
# Instances of these become Inventory Items & Request items
#
tablename = "supply_item"
define_table(tablename,
catalog_id(),
# Needed to auto-create a catalog_item
item_category_id(
script = item_category_script
),
Field("name", length=128, notnull=True,
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
Field("code", length=16,
label = T("Code"),
represent = lambda v: v or NONE,
),
Field("um", length=128, notnull=True,
default = "piece",
label = T("Unit of Measure"),
requires = IS_NOT_EMPTY(),
),
brand_id(),
Field("kit", "boolean",
default = False,
label = T("Kit?"),
represent = lambda opt: \
(opt and [T("Yes")] or [NONE])[0],
),
Field("model", length=128,
label = T("Model/Type"),
represent = lambda v: v or NONE,
),
Field("year", "integer",
label = T("Year of Manufacture"),
represent = lambda v: v or NONE,
),
Field("weight", "double",
label = T("Weight (kg)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("length", "double",
label = T("Length (m)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("width", "double",
label = T("Width (m)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("height", "double",
label = T("Height (m)"),
represent = lambda v: \
float_represent(v, precision=2),
),
Field("volume", "double",
label = T("Volume (m3)"),
represent = lambda v: \
float_represent(v, precision=2),
),
# These comments do *not* pull through to an Inventory's Items or a Request's Items
s3_comments(),
*s3_meta_fields())
# Categories in Progress
#table.item_category_id_0.label = T("Category")
#table.item_category_id_1.readable = table.item_category_id_1.writable = False
#table.item_category_id_2.readable = table.item_category_id_2.writable = False
# CRUD strings
ADD_ITEM = T("Create Item")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM,
title_display = T("Item Details"),
title_list = T("Items"),
title_update = T("Edit Item"),
label_list_button = T("List Items"),
label_delete_button = T("Delete Item"),
msg_record_created = T("Item added"),
msg_record_modified = T("Item updated"),
msg_record_deleted = T("Item deleted"),
msg_list_empty = T("No Items currently registered"),
msg_match = T("Matching Items"),
msg_no_match = T("No Matching Items")
)
supply_item_represent = supply_ItemRepresent(show_link=True)
# Reusable Field
supply_item_id = S3ReusableField("item_id",
"reference %s" % tablename, # 'item_id' for backwards-compatibility
label = T("Item"),
ondelete = "RESTRICT",
represent = supply_item_represent,
requires = IS_ONE_OF(db, "supply_item.id",
supply_item_represent,
sort=True),
sortby = "name",
widget = S3AutocompleteWidget("supply", "item"),
comment=S3PopupLink(c = "supply",
f = "item",
label = ADD_ITEM,
title = T("Item"),
tooltip = T("Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog."),
),
)
# ---------------------------------------------------------------------
filter_widgets = [
S3TextFilter(["code",
"name",
"model",
#"item_category_id$name",
"comments",
],
label = T("Search"),
comment = T("Search for an item by its code, name, model and/or comment."),
#_class = "filter-search",
),
S3OptionsFilter("brand_id",
# @ToDo: Introspect need for header based on # records
#header = True,
#label = T("Brand"),
represent = "%(name)s",
widget = "multiselect",
),
S3OptionsFilter("year",
comment = T("Search for an item by Year of Manufacture."),
# @ToDo: Introspect need for header based on # records
#header = True,
label = T("Year"),
widget = "multiselect",
),
]
report_options = Storage(defaults=Storage(rows="name",
cols="item_category_id",
fact="count(brand_id)",
),
)
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
]
configure(tablename,
deduplicate = self.supply_item_duplicate,
filter_widgets = filter_widgets,
onaccept = self.supply_item_onaccept,
orderby = "supply_item.name",
report_options = report_options,
summary = summary,
)
# Components
add_components(tablename,
# Catalog Items
supply_catalog_item = "item_id",
# Packs
supply_item_pack = "item_id",
# Inventory Items
inv_inv_item = "item_id",
# Order Items
inv_track_item = "item_id",
# Procurement Plan Items
proc_plan_item = "item_id",
# Request Items
req_req_item = "item_id",
# Supply Kit Items
supply_kit_item = "parent_item_id",
# Supply Kit Items (with link table)
#supply_item = {"name": "kit_item",
# "link": "supply_kit_item",
# "joinby": "parent_item_id",
# "key": "item_id"
# "actuate": "hide",
# },
)
# Optional components
if settings.get_supply_use_alt_name():
add_components(tablename,
# Alternative Items
supply_item_alt="item_id",
)
# =====================================================================
# Catalog Item
#
# This resource is used to link Items with Catalogs (n-to-n)
# Item Categories are also Catalog specific
#
tablename = "supply_catalog_item"
define_table(tablename,
catalog_id(),
item_category_id(
script = item_category_script
),
supply_item_id(script=None), # No Item Pack Filter
s3_comments(), # These comments do *not* pull through to an Inventory's Items or a Request's Items
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Catalog Item"),
title_display = T("Item Catalog Details"),
title_list = T("Catalog Items"),
title_update = T("Edit Catalog Item"),
title_upload = T("Import Catalog Items"),
label_list_button = T("List Catalog Items"),
label_delete_button = T("Delete Catalog Item"),
msg_record_created = T("Catalog Item added"),
msg_record_modified = T("Catalog Item updated"),
msg_record_deleted = T("Catalog Item deleted"),
msg_list_empty = T("No Catalog Items currently registered"),
msg_match = T("Matching Catalog Items"),
msg_no_match = T("No Matching Catalog Items")
)
# Filter Widgets
filter_widgets = [
S3TextFilter([#These lines are causing issues...very slow - perhaps broken
#"comments",
#"item_category_id$code",
#"item_category_id$name",
#"item_id$brand_id$name",
#"item_category_id$parent_item_category_id$code"
#"item_category_id$parent_item_category_id$name"
"item_id$code",
"item_id$name",
"item_id$model",
"item_id$comments"
],
label = T("Search"),
comment = T("Search for an item by its code, name, model and/or comment."),
),
S3OptionsFilter("catalog_id",
label = T("Catalog"),
comment = T("Search for an item by catalog."),
#represent ="%(name)s",
cols = 3,
hidden = True,
),
S3OptionsFilter("item_category_id",
label = T("Category"),
comment = T("Search for an item by category."),
represent = item_category_represent_nocodes,
cols = 3,
hidden = True,
),
S3OptionsFilter("item_id$brand_id",
label = T("Brand"),
comment = T("Search for an item by brand."),
#represent ="%(name)s",
cols = 3,
hidden = True,
),
]
configure(tablename,
deduplicate = self.supply_catalog_item_duplicate,
filter_widgets = filter_widgets,
)
# =====================================================================
# Item Pack
#
# Items can be distributed in different containers
#
tablename = "supply_item_pack"
define_table(tablename,
supply_item_id(empty=False),
Field("name", length=128,
notnull=True, # Ideally this would reference another table for normalising Pack names
default = T("piece"),
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
Field("quantity", "double", notnull=True,
default = 1,
label = T("Quantity"),
represent = lambda v: \
float_represent(v, precision=2),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ITEM_PACK = T("Create Item Pack")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM_PACK,
title_display = T("Item Pack Details"),
title_list = T("Item Packs"),
title_update = T("Edit Item Pack"),
label_list_button = T("List Item Packs"),
label_delete_button = T("Delete Item Pack"),
msg_record_created = T("Item Pack added"),
msg_record_modified = T("Item Pack updated"),
msg_record_deleted = T("Item Pack deleted"),
msg_list_empty = T("No Item Packs currently registered"))
# ---------------------------------------------------------------------
# Reusable Field
item_pack_represent = supply_ItemPackRepresent(lookup="supply_item_pack")
item_pack_id = S3ReusableField("item_pack_id", "reference %s" % tablename,
label = T("Pack"),
ondelete = "RESTRICT",
represent = item_pack_represent,
# Do not display any packs initially
# will be populated by filterOptionsS3
requires = IS_ONE_OF_EMPTY_SELECT(db,
"supply_item_pack.id",
item_pack_represent,
sort=True,
# @ToDo: Enforce "Required" for imports
# @ToDo: Populate based on item_id in controller instead of IS_ONE_OF_EMPTY_SELECT
# filterby = "item_id",
# filter_opts = (....),
),
script = '''
$.filterOptionsS3({
'trigger':'item_id',
'target':'item_pack_id',
'lookupPrefix':'supply',
'lookupResource':'item_pack',
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''',
sortby = "name",
#comment=S3PopupLink(c = "supply",
# f = "item_pack",
# label = ADD_ITEM_PACK,
# title = T("Item Packs"),
# tooltip = T("The way in which an item is normally distributed"),
# ),
)
configure(tablename,
deduplicate = self.supply_item_pack_duplicate,
)
# Components
add_components(tablename,
# Inventory Items
inv_inv_item = "item_pack_id",
)
# =====================================================================
# Supply Kit Item Table
#
# For defining what items are in a kit
tablename = "supply_kit_item"
define_table(tablename,
supply_item_id("parent_item_id",
label = T("Parent Item"),
comment = None,
),
supply_item_id("item_id",
label = T("Kit Item"),
),
Field("quantity", "double",
label = T("Quantity"),
represent = lambda v: \
float_represent(v, precision=2),
),
item_pack_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Item to Kit"),
title_display = T("Kit Item Details"),
title_list = T("Kit Items"),
title_update = T("Edit Kit Item"),
label_list_button = T("List Kit Items"),
label_delete_button = T("Remove Item from Kit"),
msg_record_created = T("Item added to Kit"),
msg_record_modified = T("Kit Item updated"),
msg_record_deleted = T("Item removed from Kit"),
msg_list_empty = T("No Items currently in this Kit"))
# =====================================================================
# Alternative Items
#
# If the desired item isn't found, then these are designated as
# suitable alternatives
#
tablename = "supply_item_alt"
define_table(tablename,
supply_item_id(notnull=True),
Field("quantity", "double", notnull=True,
default = 1,
label = T("Quantity"),
represent = lambda v: \
float_represent(v, precision=2),
comment = DIV(_class = "tooltip",
_title = "%s|%s" %
(T("Quantity"),
T("The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item")
)
),
),
supply_item_id("alt_item_id", notnull=True),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Alternative Item"),
title_display = T("Alternative Item Details"),
title_list = T("Alternative Items"),
title_update = T("Edit Alternative Item"),
label_list_button = T("List Alternative Items"),
label_delete_button = T("Delete Alternative Item"),
msg_record_created = T("Alternative Item added"),
msg_record_modified = T("Alternative Item updated"),
msg_record_deleted = T("Alternative Item deleted"),
msg_list_empty = T("No Alternative Items currently registered"))
# =====================================================================
# Item Super-Entity
#
# This super entity provides a common way to provide a foreign key to supply_item
# - it allows searching/reporting across Item types easily.
#
item_types = Storage(asset_asset = T("Asset"),
asset_item = T("Asset Item"),
inv_inv_item = T("Warehouse Stock"),
inv_track_item = T("Order Item"),
proc_plan_item = T("Planned Procurement Item"),
)
tablename = "supply_item_entity"
self.super_entity(tablename, "item_entity_id", item_types,
# @ToDo: Make Items Trackable?
#super_link("track_id", "sit_trackable"),
#location_id(),
supply_item_id(),
item_pack_id(),
Field("quantity", "double", notnull=True,
default = 1.0,
label = T("Quantity"),
),
*s3_ownerstamp())
# Reusable Field
item_id = super_link("item_entity_id", "supply_item_entity",
#writable = True,
#readable = True,
#label = T("Status"),
#represent = item_represent,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3ItemAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Item"),
# current.messages.AUTOCOMPLETE_HELP))
)
# Filter Widgets
filter_widgets = [
S3TextFilter(name = "item_entity_search_text",
label = T("Search"),
comment = T("Search for an item by text."),
field = ["item_id$name",
#"item_id$item_category_id$name",
#"site_id$name"
]
),
S3OptionsFilter("item_id$item_category_id",
label = T("Code Share"),
comment = T("If none are selected, then all are searched."),
#represent = "%(name)s",
cols = 2,
),
#S3OptionsFilter("country",
# label = current.messages.COUNTRY,
# comment = T("If none are selected, then all are searched."),
# #represent = "%(name)s",
# cols = 2,
# ),
]
# Configuration
configure(tablename,
filter_widgets = filter_widgets,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(supply_item_id = supply_item_id,
supply_item_entity_id = item_id,
supply_item_category_id = item_category_id,
supply_item_pack_id = item_pack_id,
supply_item_represent = supply_item_represent,
supply_item_category_represent = item_category_represent,
supply_item_pack_quantity = SupplyItemPackQuantity,
supply_item_add = self.supply_item_add,
supply_item_pack_represent = item_pack_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
supply_item_id = S3ReusableField("item_id", "integer",
writable=False,
readable=False)
supply_item_category_id = S3ReusableField("item_category_id", "integer",
writable=False,
readable=False)
item_id = S3ReusableField("item_entity_id", "integer",
writable=False,
readable=False)()
item_pack_id = S3ReusableField("item_pack_id", "integer",
writable=False,
readable=False)
return dict(supply_item_id = supply_item_id,
supply_item_category_id = supply_item_category_id,
supply_item_entity_id = item_id,
supply_item_pack_id = item_pack_id,
supply_item_pack_quantity = lambda tablename: lambda row: 0,
)
# -------------------------------------------------------------------------
@staticmethod
def supply_item_category_onvalidate(form):
"""
Checks that either a Code OR a Name are entered
"""
# If there is a tracking number check that it is unique within the org
if not (form.vars.code or form.vars.name):
errors = form.errors
errors.code = errors.name = current.T("An Item Category must have a Code OR a Name.")
# -------------------------------------------------------------------------
@staticmethod
def supply_item_add(quantity_1, pack_quantity_1,
quantity_2, pack_quantity_2):
"""
Adds item quantities together, accounting for different pack
quantities.
Returned quantity according to pack_quantity_1
Used by controllers/inv.py & modules/s3db/inv.py
"""
if pack_quantity_1 == pack_quantity_2:
# Faster calculation
quantity = quantity_1 + quantity_2
else:
quantity = ((quantity_1 * pack_quantity_1) +
(quantity_2 * pack_quantity_2)) / pack_quantity_1
return quantity
# -------------------------------------------------------------------------
@staticmethod
def item_represent(id):
"""
Represent an item entity in option fields or list views
- unused, we use VirtualField instead
@ToDo: Migrate to S3Represent
"""
if not id:
return current.messages["NONE"]
db = current.db
if isinstance(id, Row) and "instance_type" in id:
# Do not repeat the lookup if already done by IS_ONE_OF
item = id
instance_type = item.instance_type
else:
item_table = db.supply_item_entity
item = db(item_table._id == id).select(item_table.instance_type,
limitby=(0, 1)).first()
try:
instance_type = item.instance_type
except:
return current.messages.UNKNOWN_OPT
T = current.T
if instance_type == "inv_inv_item":
item_str = T("In Stock")
elif instance_type == "inv_track_item":
s3db = current.s3db
itable = s3db[instance_type]
rtable = s3db.inv_recv
query = (itable.item_entity_id == id) & \
(rtable.id == itable.recv_id)
eta = db(query).select(rtable.eta,
limitby=(0, 1)).first().eta
item_str = T("Due %(date)s") % dict(date=eta)
else:
return current.messages.UNKNOWN_OPT
return item_str
# -------------------------------------------------------------------------
@staticmethod
def supply_item_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
code = data.get("code")
if code:
# Same Code => definitely duplicate
table = item.table
query = (table.deleted != True) & \
(table.code.lower() == code.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
else:
name = data.get("name")
if not name:
# No way to match
return
um = data.get("um")
if not um:
# Try to extract UM from Name
name, um = item_um_from_name(name)
table = item.table
query = (table.deleted != True) & \
(table.name.lower() == name.lower())
if um:
query &= (table.um.lower() == um.lower())
catalog_id = data.get("catalog_id")
if catalog_id:
query &= (table.catalog_id == catalog_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_item_category_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
table = item.table
query = (table.deleted != True)
name = data.get("name")
if name:
query &= (table.name.lower() == name.lower())
code = data.get("code")
if code:
query &= (table.code.lower() == code.lower())
catalog_id = data.get("catalog_id")
if catalog_id:
query &= (table.catalog_id == catalog_id)
parent_category_id = data.get("parent_category_id")
if parent_category_id:
query &= (table.parent_category_id == parent_category_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_catalog_item_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
table = item.table
query = (table.deleted != True)
item_id = data.get("item_id")
if item_id:
query &= (table.item_id == item_id)
catalog_id = data.get("catalog_id")
if catalog_id:
query &= (table.catalog_id == catalog_id)
item_category_id = data.get("item_category_id")
if item_category_id:
query &= (table.item_category_id == item_category_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_item_pack_duplicate(item):
"""
Callback function used to look for duplicates during
the import process
@param item: the S3ImportItem to check
"""
data = item.data
table = item.table
query = (table.deleted != True)
name = data.get("name")
if name:
query &= (table.name.lower() == name.lower())
item_id = data.get("item_id")
if item_id:
query &= (table.item_id == item_id)
quantity = data.get("quantity")
if quantity:
query &= (table.quantity == quantity)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def supply_item_onaccept(form):
"""
Create a catalog_item for this item
Update the UM (Unit of Measure) in the supply_item_pack table
"""
db = current.db
vars = form.vars
item_id = vars.id
catalog_id = vars.catalog_id
catalog_item_id = None
citable = db.supply_catalog_item
query = (citable.item_id == item_id) & \
(citable.deleted == False )
rows = db(citable).select(citable.id)
if not len(rows):
# Create supply_catalog_item
catalog_item_id = \
citable.insert(catalog_id = catalog_id,
item_category_id = vars.item_category_id,
item_id = item_id
)
# Update if the catalog/category has changed - if there is only supply_catalog_item
elif len(rows) == 1:
catalog_item_id = rows.first().id
catalog_item_id = \
db(citable.id == catalog_item_id
).update(catalog_id = catalog_id,
item_category_id = vars.item_category_id,
item_id = item_id
)
#current.auth.s3_set_record_owner(citable, catalog_item_id, force_update=True)
# Update UM
um = vars.um or db.supply_item.um.default
table = db.supply_item_pack
# Try to update the existing record
query = (table.item_id == item_id) & \
(table.quantity == 1) & \
(table.deleted == False)
if db(query).update(name = um) == 0:
# Create a new item packet
table.insert(item_id = item_id,
name = um,
quantity = 1)
if vars.kit:
# Go to that tab afterwards
url = URL(args=["[id]", "kit_item"])
current.s3db.configure("supply_item",
create_next=url,
update_next=url,
)
# =============================================================================
class S3SupplyDistributionModel(S3Model):
"""
Supply Distribution Model
- depends on Stats module
A Distribution is an Item (which could be a Kit) distributed to a single Location
- usually as part of an Activity
"""
names = ("supply_distribution_item",
"supply_distribution",
)
def model(self):
settings = current.deployment_settings
if not settings.has_module("stats"):
# Distribution Model needs Stats module enabling
return {}
T = current.T
db = current.db
s3 = current.response.s3
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Distribution Item: supply items which can be distributed
#
tablename = "supply_distribution_item"
define_table(tablename,
super_link("parameter_id", "stats_parameter"),
self.supply_item_entity_id,
self.supply_item_id(ondelete = "RESTRICT",
required = True,
),
# @ToDo: Hide this field & populate onaccept from the item_id represent
Field("name", length=128, unique=True,
#label = T("Distribution Item Name"),
label = T("Label"),
requires = IS_NOT_IN_DB(db,
"supply_distribution_item.name",
),
),
*s3_meta_fields())
# CRUD Strings
ADD_ITEM = T("Add Distribution Item")
crud_strings[tablename] = Storage(
label_create = ADD_ITEM,
title_display = T("Distribution Item"),
title_list = T("Distribution Items"),
title_update = T("Edit Distribution Item"),
label_list_button = T("List Distribution Items"),
msg_record_created = T("Distribution Item Added"),
msg_record_modified = T("Distribution Item Updated"),
msg_record_deleted = T("Distribution Item Deleted"),
msg_list_empty = T("No Distribution Items Found")
)
# Resource Configuration
configure(tablename,
onaccept = self.supply_distribution_item_onaccept,
super_entity = ("stats_parameter", "supply_item_entity"),
)
# ---------------------------------------------------------------------
# Distribution: actual distribution of a supply item
#
tablename = "supply_distribution"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Component (each Distribution can link to a single Project)
#self.project_project_id(),
# Component (each Distribution can link to a single Activity)
self.project_activity_id(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Item"),
instance_types = ("supply_distribution_item",),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
comment = S3PopupLink(c = "supply",
f = "distribution_item",
vars = {"prefix": "supply",
"child": "parameter_id"},
title=ADD_ITEM,
),
),
self.gis_location_id(),
Field("value", "integer",
label = T("Quantity"),
requires = IS_INT_IN_RANGE(0, None),
represent = lambda v: \
IS_INT_AMOUNT.represent(v),
),
s3_date("date",
#empty = False,
label = T("Start Date"),
),
s3_date("end_date",
#empty = False,
label = T("End Date"),
start_field = "supply_distribution_date",
default_interval = 12,
),
#self.stats_source_id(),
Field.Method("year", self.supply_distribution_year),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_DIST = T("Add Distribution")
crud_strings[tablename] = Storage(
label_create = ADD_DIST,
title_display = T("Distribution Details"),
title_list = T("Distributions"),
title_update = T("Edit Distribution"),
title_report = T("Distribution Report"),
label_list_button = T("List Distributions"),
msg_record_created = T("Distribution Added"),
msg_record_modified = T("Distribution Updated"),
msg_record_deleted = T("Distribution Deleted"),
msg_list_empty = T("No Distributions Found")
)
# Reusable Field
#represent = S3Represent(lookup=tablename,
# field_sep = " ",
# fields=["value", "parameter_id"])
# Resource Configuration
# ---------------------------------------------------------------------
def year_options():
"""
returns a dict of the options for the year virtual field
used by the search widget
orderby needed for postgres
"""
table = db.supply_distribution
query = (table.deleted == False)
min_field = table.date.min()
date_min = db(query).select(min_field,
orderby=min_field,
limitby=(0, 1)
).first()
start_year = date_min and date_min[min_field].year
max_field = table.date.max()
date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)
).first()
last_start_year = date_max and date_max[max_field].year
max_field = table.end_date.max()
date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)
).first()
last_end_year = date_max and date_max[max_field].year
end_year = max(last_start_year, last_end_year)
if not start_year or not end_year:
return {start_year:start_year} or {end_year:end_year}
years = {}
for year in xrange(start_year, end_year + 1):
years[year] = year
return years
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
# Normally only used in Report
filter_widgets = [
#S3TextFilter([#"item_id$name",
# if settings.get_project_projects():
# "activity_id$project_id$name",
# "activity_id$project_id$code",
# "location_id",
# "comments"
# ],
# label = T("Search Distributions"),
# ),
S3LocationFilter("location_id",
levels=levels,
widget="multiselect"
),
S3OptionsFilter("activity_id$activity_organisation.organisation_id",
widget="multiselect"
),
S3OptionsFilter("parameter_id",
label = T("Item"),
widget="multiselect"
),
# @ToDo: Range Slider using start_date & end_date
#S3DateFilter("date",
# )
# @ToDo: OptionsFilter working with Lazy VF
#S3OptionsFilter("year",
# label=T("Year"),
# options = year_options,
# widget="multiselect",
# hidden=True,
# ),
]
list_fields = ["activity_id$activity_organisation.organisation_id",
(T("Item"), "parameter_id"),
"value",
(T("Year"), "year"),
]
report_fields = ["activity_id$activity_organisation.organisation_id",
(T("Item"), "parameter_id"),
"parameter_id",
(T("Year"), "year"),
]
if settings.get_project_sectors():
report_fields.append("activity_id$sector_activity.sector_id")
filter_widgets.insert(0,
S3OptionsFilter("activity_id$sector_activity.sector_id",
# Doesn't allow translation
#represent="%(name)s",
widget="multiselect",
#hidden=True,
))
if settings.get_project_hazards():
report_fields.append("activity_id$project_id$hazard.name")
if settings.get_project_projects():
list_fields.insert(0, "activity_id$project_id")
report_fields.append("activity_id$project_id")
filter_widgets.append(
S3OptionsFilter("activity_id$project_id",
widget="multiselect"
),
#S3OptionsFilter("activity_id$project_id$organisation_id",
# label = T("Lead Organization"),
# widget="multiselect"
# ),
#S3OptionsFilter("activity_id$project_id$partner.organisation_id",
# label = T("Partners"),
# widget="multiselect"),
#S3OptionsFilter("activity_id$project_id$donor.organisation_id",
# label = T("Donors"),
# location_level="L1",
# widget="multiselect")
)
if settings.get_project_themes():
report_fields.append("activity_id$project_id$theme.name")
filter_widgets.append(
S3OptionsFilter("activity_id$project_id$theme_project.theme_id",
# Doesn't allow translation
#represent="%(name)s",
widget="multiselect",
#hidden=True,
))
for level in levels:
lfield = "location_id$%s" % level
list_fields.append(lfield)
report_fields.append(lfield)
if "L0" in levels:
default_row = "location_id$L0"
elif "L1" in levels:
default_row = "location_id$L1"
else:
default_row = "activity_id$activity_organisation.organisation_id"
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = [(T("Number of Items"), "sum(value)"),
],
defaults = Storage(rows = default_row,
cols = "parameter_id",
fact = "sum(value)",
totals = True,
),
# Needed for Virtual Field
extra_fields = ["date",
"end_date",
]
)
configure(tablename,
context = {"location": "location_id",
"organisation": "activity_id$organisation_activity.organisation_id",
},
deduplicate = S3Duplicate(primary = ("activity_id",
"location_id",
"parameter_id",
),
),
filter_widgets = filter_widgets,
onaccept = self.supply_distribution_onaccept,
report_options = report_options,
super_entity = "stats_data",
)
# Pass names back to global scope (s3.*)
return {}
# ---------------------------------------------------------------------
@staticmethod
def supply_distribution_item_onaccept(form):
"""
Update supply_distribution_item name from supply_item_id
"""
db = current.db
dtable = db.supply_distribution_item
ltable = db.supply_item
record_id = form.vars.id
query = (dtable.id == record_id) & \
(ltable.id == dtable.item_id)
item = db(query).select(dtable.name,
ltable.name,
limitby=(0, 1)).first()
if item and not item[dtable.name]:
db(dtable.id == record_id).update(name = item[ltable.name])
return
# ---------------------------------------------------------------------
@staticmethod
def supply_distribution_onaccept(form):
"""
Set supply_distribution location, start_date and end_date
from activity
This is for when the data is created after the project_activity
- CSV imports into project_activity
- Inline forms in project_activity
"""
db = current.db
dtable = db.supply_distribution
record_id = form.vars.id
# Get the full record
record = db(dtable.id == record_id).select(dtable.activity_id,
dtable.location_id,
dtable.date,
dtable.end_date,
limitby=(0, 1)
).first()
try:
location_id = record.location_id
start_date = record.date
end_date = record.end_date
except:
# Exit Gracefully
current.log.warning("Cannot find Distribution: %s" % record_id)
return
activity_id = record.activity_id
if not activity_id:
# Nothing we can do
return
# Read Activity
atable = db.project_activity
activity = db(atable.id == activity_id).select(atable.location_id,
atable.date,
atable.end_date,
limitby=(0, 1)
).first()
try:
a_location_id = activity.location_id
a_start_date = activity.date
a_end_date = activity.end_date
except:
# Exit Gracefully
current.log.warning("Cannot find Activity: %s" % activity_id)
return
data = {}
if a_location_id and a_location_id != location_id:
data["location_id"] = a_location_id
if a_start_date and a_start_date != start_date:
data["date"] = a_start_date
if a_end_date and a_end_date != end_date:
data["end_date"] = a_end_date
if data:
# Update Distribution details
db(dtable.id == record_id).update(**data)
# ---------------------------------------------------------------------
@staticmethod
def supply_distribution_year(row):
""" Virtual field for the supply_distribution table """
if hasattr(row, "supply_distribution"):
row = row.supply_distribution
try:
date = row.date
except AttributeError:
date = None
try:
end_date = row.end_date
except AttributeError:
end_date = None
if not date and not end_date:
return []
elif not end_date:
return [date.year]
elif not date:
return [end_date.year]
else:
return list(xrange(date.year, end_date.year + 1))
# =============================================================================
class supply_ItemRepresent(S3Represent):
""" Representation of Supply Items """
def __init__(self,
translate=False,
show_link=False,
show_um=False,
multiple=False):
self.show_um = show_um
# Need a custom lookup to join with Brand
self.lookup_rows = self.custom_lookup_rows
fields = ["supply_item.id",
"supply_item.name",
"supply_item.model",
"supply_brand.name",
]
if show_um:
fields.append("supply_item.um")
super(supply_ItemRepresent,
self).__init__(lookup="supply_item",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for item rows, does a
left join with the brand. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the supply_item IDs
"""
db = current.db
itable = current.s3db.supply_item
btable = db.supply_brand
left = btable.on(btable.id == itable.brand_id)
qty = len(values)
if qty == 1:
query = (itable.id == values[0])
limitby = (0, 1)
else:
query = (itable.id.belongs(values))
limitby = (0, qty)
rows = db(query).select(*self.fields,
left=left,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the supply_item Row
"""
name = row["supply_item.name"]
model = row["supply_item.model"]
brand = row["supply_brand.name"]
fields = []
if name:
fields.append(name)
if model:
fields.append(model)
if brand:
fields.append(brand)
name = " - ".join(fields)
if self.show_um:
um = row["supply_item.um"]
if um:
name = "%s (%s)" % (name, um)
return s3_str(name)
# =============================================================================
class supply_ItemPackRepresent(S3Represent):
""" Representation of Supply Item Packs """
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for item_pack rows, does a left join with
the item.
@param key: the primary key of the lookup table
@param values: the supply_item_pack IDs
@param fields: the fields to lookup (unused in this class,
retained for API compatibility)
"""
db = current.db
table = self.table
itable = db.supply_item
qty = len(values)
if qty == 1:
query = (key == values[0])
else:
query = (key.belongs(values))
left = itable.on(table.item_id == itable.id)
rows = db(query).select(table.id,
table.name,
table.quantity,
itable.um,
left=left,
limitby=(0, qty),
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the Row (usually joined supply_item_pack/supply_item)
@todo: implement translate option
"""
try:
item = row.supply_item
pack = row.supply_item_pack
except AttributeError:
# Missing join (external query?)
item = {"um": "Piece"}
pack = row
name = pack.get("name")
if not name:
return current.messages.UNKNOWN_OPT
quantity = pack.get("quantity")
if quantity == 1 or quantity is None:
return name
else:
# Include pack description (quantity x units of measurement)
return "%s (%s x %s)" % (name, quantity, item.get("um"))
# =============================================================================
class supply_ItemCategoryRepresent(S3Represent):
""" Representation of Supply Item Categories """
def __init__(self,
translate=False,
show_link=False,
use_code=True,
multiple=False):
self.use_code = use_code
# Need a custom lookup to join with Parent/Catalog
self.lookup_rows = self.custom_lookup_rows
fields = ["supply_item_category.id",
"supply_item_category.name",
# Always-included since used as fallback if no name
"supply_item_category.code",
"supply_catalog.name",
"supply_parent_item_category.name",
"supply_grandparent_item_category.name",
"supply_grandparent_item_category.parent_item_category_id",
]
super(supply_ItemCategoryRepresent,
self).__init__(lookup="supply_item_category",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for item category rows, does a
left join with the parent category. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the supply_item_category IDs
"""
db = current.db
table = current.s3db.supply_item_category
ctable = db.supply_catalog
ptable = db.supply_item_category.with_alias("supply_parent_item_category")
gtable = db.supply_item_category.with_alias("supply_grandparent_item_category")
left = [ctable.on(ctable.id == table.catalog_id),
ptable.on(ptable.id == table.parent_item_category_id),
gtable.on(gtable.id == ptable.parent_item_category_id),
]
qty = len(values)
if qty == 1:
query = (table.id == values[0])
limitby = (0, 1)
else:
query = (table.id.belongs(values))
limitby = (0, qty)
rows = db(query).select(*self.fields,
left=left,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the supply_item_category Row
"""
use_code = self.use_code
name = row["supply_item_category.name"]
code = row["supply_item_category.code"]
catalog = row["supply_catalog.name"]
parent = row["supply_parent_item_category.name"]
if use_code:
name = code
elif not name:
name = code
if parent:
if use_code:
# Compact format
sep = "-"
else:
sep = " - "
name = "%s%s%s" % (name, sep, parent)
grandparent = row["supply_grandparent_item_category.name"]
if grandparent:
name = "%s%s%s" % (name, sep, grandparent)
# Check for Great-grandparent
# Trade-off "all in 1 row" vs "too many joins"
greatgrandparent = row["supply_grandparent_item_category.parent_item_category_id"]
if greatgrandparent:
# Assume no more than 6 levels of interest
db = current.db
table = current.s3db.supply_item_category
ptable = db.supply_item_category.with_alias("supply_parent_item_category")
gtable = db.supply_item_category.with_alias("supply_grandparent_item_category")
left = [ptable.on(ptable.id == table.parent_item_category_id),
gtable.on(gtable.id == ptable.parent_item_category_id),
]
query = (table.id == greatgrandparent)
fields = [table.name,
table.code,
ptable.name,
ptable.code,
gtable.name,
gtable.code,
]
row = db(query).select(*fields,
left=left,
limitby=(0, 1)).first()
if row:
if use_code:
greatgrandparent = row["supply_item_category.code"]
greatgreatgrandparent = row["supply_parent_item_category.code"]
else:
greatgrandparent = row["supply_item_category.name"] or row["supply_item_category.code"]
greatgreatgrandparent = row["supply_parent_item_category.name"] or row["supply_parent_item_category.code"]
name = "%s%s%s" % (name, sep, greatgrandparent)
if greatgreatgrandparent:
name = "%s%s%s" % (name, sep, greatgreatgrandparent)
if use_code:
greatgreatgreatgrandparent = row["supply_grandparent_item_category.code"]
else:
greatgreatgreatgrandparent = row["supply_grandparent_item_category.name"] or row["supply_grandparent_item_category.code"]
if greatgreatgreatgrandparent:
name = "%s%s%s" % (name, sep, greatgreatgreatgrandparent)
if catalog:
name = "%s > %s" % (catalog, name)
return s3_str(name)
# =============================================================================
def item_um_from_name(name):
"""
Retrieve the Unit of Measure from a name
"""
for um_pattern in um_patterns:
m = re.search(um_pattern, name)
if m:
um = m.group(1).strip()
# Rename name from um
name = re.sub(um_pattern, "", name)
# Remove trailing , & wh sp
name = re.sub("(,)$", "", name).strip()
return (name, um)
return (name, None)
# =============================================================================
def supply_catalog_rheader(r):
""" Resource Header for Catalogs """
if r.representation == "html":
catalog = r.record
if catalog:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Categories"), "item_category"),
(T("Items"), "catalog_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
catalog.name,
),
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(catalog.organisation_id),
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
def supply_item_rheader(r):
""" Resource Header for Items """
if r.representation == "html":
item = r.record
if item:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Packs"), "item_pack"),
(T("Alternative Items"), "item_alt"),
(T("In Inventories"), "inv_item"),
(T("Requested"), "req_item"),
(T("In Catalogs"), "catalog_item"),
]
if item.kit == True:
tabs.append((T("Kit Items"), "kit_item"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR( TH("%s: " % table.name.label),
item.name,
),
TR( TH("%s: " % table.brand_id.label),
table.brand_id.represent(item.brand_id),
),
TR( TH("%s: " % table.model.label),
item.model or current.messages["NONE"],
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
class SupplyItemPackQuantity(object):
""" Virtual Field for pack_quantity """
def __init__(self, tablename):
self.tablename = tablename
def __call__(self, row):
default = 0
tablename = self.tablename
if hasattr(row, tablename):
row = object.__getattribute__(row, tablename)
try:
item_pack_id = row.item_pack_id
except AttributeError:
return default
if item_pack_id:
return item_pack_id.quantity
else:
return default
# =============================================================================
def supply_item_entity_category(row):
""" Virtual field: category """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
try:
item_id = row.item_id
except AttributeError:
return None
table = current.s3db.supply_item
query = (table.id == item_id)
record = current.db(query).select(table.item_category_id,
limitby=(0, 1)).first()
if record:
return table.item_category_id.represent(record.item_category_id)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def supply_item_entity_country(row):
""" Virtual field: country """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
itable = s3db[instance_type]
ltable = s3db.gis_location
if instance_type == "inv_inv_item":
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(stable.site_id == itable.site_id) & \
(ltable.id == stable.location_id)
record = current.db(query).select(ltable.L0,
limitby=(0, 1)).first()
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.recv_id) & \
(stable.site_id == rtable.site_id) & \
(ltable.id == stable.location_id)
record = current.db(query).select(ltable.L0,
limitby=(0, 1)).first()
elif instance_type == "proc_plan_item":
ptable = s3db.proc_plan
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(ptable.id == itable.plan_id) & \
(stable.site_id == ptable.site_id) & \
(ltable.id == stable.location_id)
record = current.db(query).select(ltable.L0,
limitby=(0, 1)).first()
else:
# @ToDo: Assets and req_items
record = None
if record:
return record.L0 or current.T("Unknown")
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def supply_item_entity_organisation(row):
""" Virtual field: organisation """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
organisation_represent = s3db.org_OrganisationRepresent(acronym=False)
itable = s3db[instance_type]
if instance_type == "inv_inv_item":
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(stable.site_id == itable.site_id)
record = current.db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
elif instance_type == "proc_plan_item":
rtable = s3db.proc_plan
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.plan_id) & \
(stable.site_id == rtable.site_id)
record = current.db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
stable = s3db.org_site
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.recv_id) & \
(stable.site_id == rtable.site_id)
record = current.db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
else:
# @ToDo: Assets and req_items
record = None
if record:
return organisation_represent(record.organisation_id)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
def supply_item_entity_contacts(row):
""" Virtual field: contacts (site_id) """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
db = current.db
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
itable = s3db[instance_type]
if instance_type == "inv_inv_item":
query = (itable[ekey] == entity_id)
record = db(query).select(itable.site_id,
limitby=(0, 1)).first()
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.recv_id)
record = db(query).select(rtable.site_id,
limitby=(0, 1)).first()
elif instance_type == "proc_plan_item":
ptable = s3db.proc_plan
query = (itable[ekey] == entity_id) & \
(ptable.id == itable.plan_id)
record = db(query).select(ptable.site_id,
limitby=(0, 1)).first()
else:
# @ToDo: Assets and req_items
record = None
default = current.messages["NONE"]
if not record:
return default
otable = s3db.org_office
query = (otable.site_id == record.site_id)
office = db(query).select(otable.id,
otable.comments,
limitby=(0, 1)).first()
if office:
if current.request.extension in ("xls", "pdf"):
if office.comments:
return office.comments
else:
return default
elif office.comments:
comments = s3_comments_represent(office.comments,
show_link=False)
else:
comments = default
return A(comments,
_href = URL(f="office", args = [office.id]))
else:
return default
# -------------------------------------------------------------------------
def supply_item_entity_status(row):
""" Virtual field: status """
if hasattr(row, "supply_item_entity"):
row = row.supply_item_entity
else:
return None
db = current.db
s3db = current.s3db
etable = s3db.supply_item_entity
ekey = etable._id.name
try:
instance_type = row.instance_type
except AttributeError:
return None
try:
entity_id = row[ekey]
except AttributeError:
return None
itable = s3db[instance_type]
status = None
if instance_type == "inv_inv_item":
query = (itable[ekey] == entity_id)
record = current.db(query).select(itable.expiry_date,
limitby=(0, 1)).first()
if record:
T = current.T
if record.expiry_date:
status = T("Stock Expires %(date)s") % \
dict(date=record.expiry_date)
else:
status = T("In Stock")
elif instance_type == "proc_plan_item":
rtable = s3db.proc_plan
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.plan_id)
record = current.db(query).select(rtable.eta,
limitby=(0, 1)).first()
if record:
T = current.T
if record.eta:
status = T("Planned %(date)s") % dict(date=record.eta)
else:
status = T("Planned Procurement")
elif instance_type == "inv_track_item":
rtable = s3db.inv_recv
query = (itable[ekey] == entity_id) & \
(rtable.id == itable.send_inv_item_id)
record = current.db(query).select(rtable.eta,
limitby=(0, 1)).first()
if record:
T = current.T
if record.eta:
status = T("Order Due %(date)s") % dict(date=record.eta)
else:
status = T("On Order")
else:
# @ToDo: Assets and req_items
return current.messages["NONE"]
return status or current.messages["NONE"]
# =============================================================================
def supply_item_controller():
""" RESTful CRUD controller """
s3 = current.response.s3
s3db = current.s3db
def prep(r):
if r.component:
if r.component_name == "inv_item":
# Inventory Items need proper accountability so are edited through inv_adj
s3db.configure("inv_inv_item",
listadd=False,
deletable=False)
# Filter to just item packs for this Item
inv_item_pack_requires = IS_ONE_OF(current.db,
"supply_item_pack.id",
s3db.supply_item_pack_represent,
sort=True,
filterby = "item_id",
filter_opts = (r.record.id,),
)
s3db.inv_inv_item.item_pack_id.requires = inv_item_pack_requires
elif r.component_name == "req_item":
# This is a report not a workflow
s3db.configure("req_req_item",
listadd=False,
deletable=False)
# Needs better workflow as no way to add the Kit Items
# else:
# caller = current.request.get_vars.get("caller", None)
# if caller == "inv_kit_item_id":
# field = r.table.kit
# field.default = True
# field.readable = field.writable = False
elif r.representation == "xls":
# Use full Category names in XLS output
s3db.supply_item.item_category_id.represent = \
supply_ItemCategoryRepresent(use_code=False)
return True
s3.prep = prep
return current.rest_controller("supply", "item",
rheader = supply_item_rheader,
)
# =============================================================================
def supply_item_entity_controller():
"""
RESTful CRUD controller
- consolidated report of inv_item, recv_item & proc_plan_item
@ToDo: Migrate JS to Static as part of migrating this to an
S3Search Widget
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
tablename = "supply_item_entity"
table = s3db[tablename]
# CRUD strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Item"),
title_display = T("Item Details"),
title_list = T("Items"),
title_update = T("Edit Item"),
label_list_button = T("List Items"),
label_delete_button = T("Delete Item"),
msg_record_created = T("Item added"),
msg_record_modified = T("Item updated"),
msg_record_deleted = T("Item deleted"),
msg_list_empty = T("No Items currently registered"))
table.category = Field.Method("category",
supply_item_entity_category)
table.country = Field.Method("country",
supply_item_entity_country)
table.organisation = Field.Method("organisation",
supply_item_entity_organisation)
table.contacts = Field.Method("contacts",
supply_item_entity_contacts)
table.status = Field.Method("status",
supply_item_entity_status)
# Allow VirtualFields to be sortable/searchable
s3.no_sspag = True
s3db.configure(tablename,
deletable = False,
insertable = False,
# @ToDo: Allow VirtualFields to be used to Group Reports
#report_groupby = "category",
list_fields = [(T("Category"), "category"),
"item_id",
"quantity",
(T("Unit of Measure"), "item_pack_id"),
(T("Status"), "status"),
(current.messages.COUNTRY, "country"),
(T("Organization"), "organisation"),
#(T("Office"), "site"),
(T("Contacts"), "contacts"),
],
extra_fields = ["instance_type"],
)
def postp(r, output):
if r.interactive and not r.record:
# Provide some manual Filters above the list
rheader = DIV()
# Filter by Category
table = s3db.supply_item_category
etable = s3db.supply_item_entity
itable = s3db.supply_item
query = (etable.deleted == False) & \
(etable.item_id == itable.id) & \
(itable.item_category_id == table.id)
categories = db(query).select(table.id,
table.name,
distinct=True)
select = SELECT(_multiple="multiple", _id="category_dropdown")
for category in categories:
select.append(OPTION(category.name, _name=category.id))
rheader.append(DIV(B("%s:" % T("Filter by Category")),
BR(),
select,
_class="rfilter"))
# Filter by Status
select = SELECT(_multiple="multiple", _id="status_dropdown")
if settings.has_module("inv"):
select.append(OPTION(T("In Stock")))
select.append(OPTION(T("On Order")))
if settings.has_module("proc"):
select.append(OPTION(T("Planned Procurement")))
rheader.append(DIV(B("%s:" % T("Filter by Status")),
BR(),
select,
_class="rfilter"))
output["rheader"] = rheader
# Find Offices with Items
# @ToDo: Other Site types (how to do this as a big Join?)
table = s3db.org_office
otable = s3db.org_organisation
ltable = s3db.gis_location
fields = [ltable.L0,
#table.name,
otable.name]
query = (table.deleted == False) & \
(table.organisation_id == otable.id) & \
(ltable.id == table.location_id)
isites = []
rsites = []
psites = []
# @ToDo: Assets & Req_Items
# @ToDo: Try to do this as a Join?
if settings.has_module("inv"):
inv_itable = s3db.inv_inv_item
iquery = query & (inv_itable.site_id == table.site_id)
isites = db(iquery).select(distinct=True, *fields)
inv_ttable = s3db.inv_track_item
inv_rtable = s3db.inv_recv
rquery = query & (inv_ttable.send_inv_item_id == inv_rtable.id) & \
(inv_rtable.site_id == table.site_id)
rsites = db(rquery).select(distinct=True, *fields)
if settings.has_module("proc"):
proc_ptable = s3db.proc_plan
proc_itable = s3db.proc_plan_item
pquery = query & (proc_itable.plan_id == proc_ptable.id) & \
(proc_ptable.site_id == table.site_id)
psites = db(pquery).select(distinct=True, *fields)
sites = []
for site in isites:
if site not in sites:
sites.append(site)
for site in rsites:
if site not in sites:
sites.append(site)
for site in psites:
if site not in sites:
sites.append(site)
# Filter by Country
select = SELECT(_multiple="multiple", _id="country_dropdown")
countries = []
for site in sites:
country = site.org_office.L0
if country not in countries:
select.append(OPTION(country or T("Unknown")))
countries.append(country)
rheader.append(DIV(B("%s:" % T("Filter by Country")),
BR(),
select,
_class="rfilter"))
# Filter by Organisation
select = SELECT(_multiple="multiple", _id="organisation_dropdown")
orgs = []
for site in sites:
org = site.org_organisation.name
if org not in orgs:
select.append(OPTION(org or T("Unknown")))
orgs.append(org)
rheader.append(DIV(B("%s:" % T("Filter by Organization")),
BR(),
select,
_class="rfilter"))
# http://datatables.net/api#fnFilter
# Columns:
# 1 = Category
# 5 = Status (@ToDo: Assets & Req Items)
# 6 = Country
# 7 = Organisation
# Clear column filter before applying new one
#
# @ToDo: Hide options which are no longer relevant because
# of the other filters applied
#
s3.jquery_ready.append('''
function filterColumns(){
var oTable=$('#list').dataTable()
var values=''
$('#category_dropdown option:selected').each(function(){
values+=$(this).text()+'|'
})
var regex=(values==''?'':'^'+values.slice(0, -1)+'$')
oTable.fnFilter('',1,false)
oTable.fnFilter(regex,1,true,false)
values=''
$('#status_dropdown option:selected').each(function(){
if($(this).text()=="''' + T("On Order") + '''"){
values+=$(this).text()+'|'+"''' + T("Order") + '''.*"+'|'
}else if($(this).text()=="''' + T("Planned Procurement") + '''"){
values+="''' + T("Planned") + '''.*"+'|'
}else{
values+=$(this).text()+'|'+"''' + T("Stock") + '''.*"+'|'
}
})
var regex=(values==''?'':'^'+values.slice(0,-1)+'$')
oTable.fnFilter('',5,false)
oTable.fnFilter(regex,5,true,false)
values=''
$('#country_dropdown option:selected').each(function(){
values+=$(this).text()+'|'
})
var regex=(values==''?'':'^'+values.slice(0,-1)+'$')
oTable.fnFilter('',6,false)
oTable.fnFilter(regex,6,true,false)
values=''
$('#organisation_dropdown option:selected').each(function(){
values+=$(this).text()+'|'
})
var regex=(values==''? '':'^'+values.slice(0,-1)+'$')
oTable.fnFilter('',7,false)
oTable.fnFilter(regex,7,true,false)
}
$('#category_dropdown').change(function(){
filterColumns()
var values=[]
$('#category_dropdown option:selected').each(function(){
values.push($(this).attr('name'))
})
if(values.length){
$('#list_formats a').attr('href',function(){
var href=this.href.split('?')[0]+'?item_entity.item_id$item_category_id='+values[0]
for(i=1;i<=(values.length-1);i++){
href=href+','+values[i]
}
return href
})
}else{
$('#list_formats a').attr('href',function(){
return this.href.split('?')[0]
})
}
})
$('#status_dropdown').change(function(){
filterColumns()
})
$('#country_dropdown').change(function(){
filterColumns()
})
$('#organisation_dropdown').change(function(){
filterColumns()
})''')
return output
s3.postp = postp
output = current.rest_controller("supply", "item_entity",
hide_filter = True,
)
return output
# -------------------------------------------------------------------------
def supply_get_shipping_code(type, site_id, field):
db = current.db
if site_id:
table = current.s3db.org_site
site = db(table.site_id == site_id).select(table.code,
limitby=(0, 1)
).first()
if site:
scode = site.code
else:
scode = "###"
code = "%s-%s-" % (type, scode)
else:
code = "%s-###-" % (type)
number = 0
if field:
query = (field.like("%s%%" % code))
ref_row = db(query).select(field,
limitby=(0, 1),
orderby=~field).first()
if ref_row:
ref = ref_row(field)
number = int(ref[-6:])
return "%s%06d" % (code, number + 1)
# END =========================================================================
|
mit
| 6,612,580,975,636,968,000
| 38.39402
| 161
| 0.433469
| false
| 4.859417
| false
| false
| false
|
trenton3983/Artificial_Intelligence_for_Humans
|
vol3/vol3-python-examples/examples/example_mnist_conv.py
|
1
|
2549
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import lasagne
from lib.aifh.mnist import *
import theano
import theano.tensor as T
import time
import types
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import sigmoid
from lasagne.nonlinearities import softmax
from lasagne.nonlinearities import rectify
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
from lasagne.layers import Conv2DLayer
layers0 = [('input', InputLayer),
('conv0', Conv2DLayer),
('dense0', DenseLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, 1, 28, 28),
conv0_num_filters=32,
conv0_filter_size=(5, 5),
conv0_nonlinearity=lasagne.nonlinearities.rectify,
dense0_num_units=1000,
dense0_nonlinearity = rectify,
output_num_units=10,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=0.1,
update_momentum=0.9,
regression=False,
on_epoch_finished=[
EarlyStopping(patience=5)
],
verbose=1,
max_epochs=100)
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(True)
def my_split(self, X, y, eval_size):
return X_train,X_val,y_train,y_val
net0.train_test_split = types.MethodType(my_split, net0)
net0.fit(X_train, y_train)
y_predict = net0.predict(X_val)
count = 0
wrong = 0
for element in zip(X_val,y_val,y_predict):
if element[1] != element[2]:
wrong = wrong + 1
count = count + 1
print("Incorrect {}/{} ({}%)".format(wrong,count,(wrong/count)*100))
|
apache-2.0
| -293,695,254,517,834,240
| 28.988235
| 76
| 0.708513
| false
| 3.439946
| false
| false
| false
|
demisto/content
|
Packs/Cylance_Protect/Integrations/Cylance_Protect_v2/Cylance_Protect_v2.py
|
1
|
50344
|
from CommonServerPython import *
import jwt
import uuid
import requests
import json
import re
import zipfile
from StringIO import StringIO
from datetime import datetime, timedelta
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CONSTANTS
TOKEN_TIMEOUT = 300 # 5 minutes
URI_AUTH = 'auth/v2/token'
URI_DEVICES = 'devices/v2'
URI_POLICIES = 'policies/v2'
URI_ZONES = 'zones/v2'
URI_THREATS = 'threats/v2'
URI_LISTS = 'globallists/v2'
SCOPE_DEVICE_LIST = 'device:list'
SCOPE_DEVICE_READ = 'device:read'
SCOPE_DEVICE_UPDATE = 'device:update'
SCOPE_DEVICE_THREAT_LIST = 'device:threatlist'
SCOPE_POLICY_LIST = 'policy:list'
SCOPE_POLICY_READ = 'policy:read'
SCOPE_ZONE_CREATE = 'zone:create'
SCOPE_ZONE_LIST = 'zone:list'
SCOPE_ZONE_READ = 'zone:read'
SCOPE_ZONE_UPDATE = 'zone:update'
SCOPE_THREAT_READ = 'threat:read'
SCOPE_THREAT_DEVICE_LIST = 'threat:devicelist'
SCOPE_THREAT_UPDATE = 'threat:update'
SCOPE_GLOBAL_LIST = 'globallist:list'
SCOPE_THREAT_LIST = 'threat:list'
SCOPE_GLOBAL_LIST_CREATE = 'globallist:create'
SCOPE_GLOBAL_LIST_DELETE = 'globallist:delete'
# PREREQUISITES
def load_server_url():
""" Cleans and loads the server url from the configuration """
url = demisto.params()['server']
url = re.sub('/[\/]+$/', '', url)
url = re.sub('\/$', '', url)
return url
# GLOBALS
APP_ID = demisto.params()['app_id']
APP_SECRET = demisto.params()['app_secret']
TID = demisto.params()['tid']
SERVER_URL = load_server_url()
FILE_THRESHOLD = demisto.params()['file_threshold']
USE_SSL = not demisto.params().get('unsecure', False)
# HELPERS
def generate_jwt_times():
"""
Generates the epoch time window in which the token will be valid
Returns the current timestamp and the timeout timestamp (in that order)
"""
now = datetime.utcnow()
timeout_datetime = now + timedelta(seconds=TOKEN_TIMEOUT)
epoch_time = int((now - datetime(1970, 1, 1)).total_seconds())
epoch_timeout = int((timeout_datetime - datetime(1970, 1, 1)).total_seconds())
return epoch_time, epoch_timeout
def api_call(uri, method='post', headers={}, body={}, params={}, accept_404=False):
"""
Makes an API call to the server URL with the supplied uri, method, headers, body and params
"""
url = '%s/%s' % (SERVER_URL, uri)
res = requests.request(method, url, headers=headers, data=json.dumps(body), params=params, verify=USE_SSL)
if res.status_code < 200 or res.status_code >= 300:
if res.status_code == 409 and str(res.content).find('already an entry for this threat') != -1:
raise Warning(res.content)
if not res.status_code == 404 and not accept_404:
return_error(
'Got status code ' + str(res.status_code) + ' with body ' + res.content + ' with headers ' + str(
res.headers))
return json.loads(res.text) if res.text else res.ok
def get_authentication_token(scope=None):
"""
Generates a JWT authorization token with an optional scope and queries the API for an access token
Returns the received API access token
"""
# Generate token ID
token_id = str(uuid.uuid4())
# Generate current time & token timeout
epoch_time, epoch_timeout = generate_jwt_times()
# Token claims
claims = {
'exp': epoch_timeout,
'iat': epoch_time,
'iss': 'http://cylance.com',
'sub': APP_ID,
'tid': TID,
'jti': token_id
}
if scope:
claims['scp'] = scope
# Encode the token
encoded = jwt.encode(claims, APP_SECRET, algorithm='HS256')
payload = {'auth_token': encoded}
headers = {'Content-Type': 'application/json; charset=utf-8'}
res = api_call(method='post', uri=URI_AUTH, body=payload, headers=headers)
return res['access_token']
def threat_to_incident(threat):
incident = {
'name': 'Cylance Protect v2 threat ' + threat['name'],
'occurred': threat['last_found'] + 'Z',
'rawJSON': json.dumps(threat)
}
host_name = None
devices = get_threat_devices_request(threat['sha256'], None, None)['page_items']
for device in devices:
if device['date_found'] == threat['last_found']:
host_name = device['name']
labels = [{'type': 'Classification', 'value': threat['classification']}, {'type': 'MD5', 'value': threat['md5']},
{'type': 'SHA256', 'value': threat['sha256']}, {'type': 'ThreatLastFound', 'value': threat['last_found']},
{'type': 'HostName', 'value': host_name}]
incident['labels'] = labels
return incident
def normalize_score(score):
"""
Translates API raw float (-1 to 1) score to UI score (-100 to 100)
"""
return score * 100
def translate_score(score, threshold):
if score > 0:
dbot_score = 1
elif threshold <= score:
dbot_score = 2
else:
dbot_score = 3
return dbot_score
# FUNCTIONS
def test():
access_token = get_authentication_token()
if not access_token:
raise Exception('Unable to get access token')
demisto.results('ok')
def get_devices():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
result = get_devices_request(page, page_size)
devices = result['page_items']
hr = []
devices_context = []
endpoint_context = []
for device in devices:
current_device_context = {
'AgentVersion': device['agent_version'],
'DateFirstRegistered': device['date_first_registered'],
'ID': device['id'],
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['name'],
'State': device['state']
}
if device['policy']:
policy = {}
if device['policy']['id']:
policy['ID'] = device['policy']['id']
if device['policy']['name']:
policy['Name'] = device['policy']['name']
if policy:
current_device_context['Policy'] = policy
devices_context.append(current_device_context)
endpoint_context.append({
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['name']
})
current_device = dict(device)
current_device['ip_addresses'] = ', '.join(current_device['ip_addresses'])
current_device['mac_addresses'] = ', '.join(current_device['mac_addresses'])
current_device['policy'] = current_device['policy']['name']
hr.append(current_device)
ec = {
'CylanceProtect.Device(val.ID && val.ID === obj.ID)': devices_context,
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context
}
entry = {
'Type': entryTypes['note'],
'Contents': devices,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Cylance Protect Devices', hr, headerTransform=underscoreToCamelCase,
removeNull=True),
'EntryContext': ec
}
demisto.results(entry)
def get_devices_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_DEVICE_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_DEVICES, method='get', headers=headers, params=params)
return res
def get_device():
device_id = demisto.args()['id']
device = get_device_request(device_id)
hr = []
if device:
device_context = {
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['host_name'],
'OSVersion': device['os_version'],
'UpdateAvailable': device['update_available'],
'BackgroundDetection': device['background_detection'],
'DateFirstRegistered': device['date_first_registered'],
'DateLastModified': device['date_last_modified'],
'DateOffline': device['date_offline'],
'IsSafe': device['is_safe'],
'LastLoggedInUser': device['last_logged_in_user'],
'State': device['state'],
'ID': device['id'],
'Name': device['name']
}
if device['update_type']:
device_context['UpdateType'] = device['update_type']
if device['policy']:
policy = {}
if device['policy']['id']:
policy['ID'] = device['policy']['id']
if device['policy']['name']:
policy['Name'] = device['policy']['name']
if policy:
device_context['Policy'] = policy
endpoint_context = {
'IPAddress': device['ip_addresses'],
'MACAdress': device['mac_addresses'],
'Hostname': device['host_name'],
'OSVersion': device['os_version']
}
ec = {
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context,
'CylanceProtect.Device(val.ID && val.ID === obj.ID)': device_context
}
current_device = dict(device)
current_device['ip_addresses'] = ', '.join(current_device['ip_addresses'])
current_device['mac_addresses'] = ', '.join(current_device['mac_addresses'])
current_device['policy'] = current_device['policy']['name']
hr.append(current_device)
else:
ec = {}
title = 'Cylance Protect Device ' + device_id
entry = {
'Type': entryTypes['note'],
'Contents': device,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, hr, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
}
demisto.results(entry)
def get_device_request(device_id):
access_token = get_authentication_token(scope=SCOPE_DEVICE_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='get', headers=headers)
return res
def update_device():
device_id = demisto.args()['id']
name = demisto.args()['name'] if 'name' in demisto.args() else None
policy_id = demisto.args()['policyId'] if 'policyId' in demisto.args() else None
add_zones = demisto.args()['addZones'] if 'addZones' in demisto.args() else None
remove_zones = demisto.args()['removeZones'] if 'removeZones' in demisto.args() else None
update_device_request(device_id, name, policy_id, add_zones, remove_zones)
hr = {}
if name:
hr['Name'] = name
if policy_id:
hr['PolicyID'] = policy_id
if add_zones:
hr['AddedZones'] = add_zones
if remove_zones:
hr['RemovedZones'] = remove_zones
device = hr.copy()
device['id'] = device_id
title = 'Device ' + device_id + ' was updated successfully.'
entry = {
'Type': entryTypes['note'],
'Contents': device,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, [hr])
}
demisto.results(entry)
def update_device_request(device_id, name=None, policy_id=None, add_zones=None, remove_zones=None):
access_token = get_authentication_token(scope=SCOPE_DEVICE_UPDATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {}
if name:
body['name'] = name
if policy_id:
body['policy_id'] = policy_id
if add_zones:
body['add_zone_ids'] = [add_zones]
if remove_zones:
body['remove_zone_ids'] = [remove_zones]
# Do we have anything to update?
if not body:
raise Exception('No changes detected')
uri = '%s/%s' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='put', headers=headers, body=body)
return res
def get_device_threats():
device_id = demisto.args()['id']
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
device_threats = get_device_threats_request(device_id, page, page_size)['page_items']
dbot_score_array = []
for threat in device_threats:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
dbot_score_array.append({
'Indicator': threat.get('sha256'),
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
})
if device_threats:
threats_context = createContext(data=device_threats, keyTransform=underscoreToCamelCase)
threats_context = add_capitalized_hash_to_context(threats_context)
ec = {
'File': threats_context,
'DBotScore': dbot_score_array
}
title = 'Cylance Protect Device Threat ' + device_id
demisto.results({
'Type': entryTypes['note'],
'Contents': device_threats,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, device_threats, headerTransform=underscoreToCamelCase),
'EntryContext': ec
})
else:
demisto.results('No threats found.')
def get_device_threats_request(device_id, page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_DEVICE_THREAT_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
uri = '%s/%s/threats' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='get', headers=headers, params=params)
return res
def get_policies():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
policies = get_policies_request(page, page_size)['page_items']
context_policies = createContext(data=policies, keyTransform=underscoreToCamelCase)
ec = {
'CylanceProtect.Policies(val.id && val.id === obj.id)': context_policies
}
title = 'Cylance Protect Policies'
entry = {
'Type': entryTypes['note'],
'Contents': policies,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, policies, headerTransform=underscoreToCamelCase),
'EntryContext': ec
}
demisto.results(entry)
def get_policies_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_POLICY_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_POLICIES, method='get', headers=headers, params=params)
return res
def create_zone():
name = demisto.args()['name']
policy_id = demisto.args()['policy_id']
criticality = demisto.args()['criticality']
zone = create_zone_request(name, policy_id, criticality)
title = 'Zone ' + name + ' was created successfully.'
demisto.results({
'Type': entryTypes['note'],
'Contents': zone,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, [zone], headerTransform=underscoreToCamelCase)
})
def create_zone_request(name, policy_id, criticality):
access_token = get_authentication_token(scope=SCOPE_ZONE_CREATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'name': name,
'policy_id': policy_id,
'criticality': criticality
}
res = api_call(uri=URI_ZONES, method='post', headers=headers, body=body)
return res
def get_zones():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
zones = get_zones_request(page, page_size)['page_items']
context_zones = createContext(data=zones, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'CylanceProtect.Zones(val.Id && val.Id === obj.Id)': context_zones
}
title = 'Cylance Protect Zones'
demisto.results({
'Type': entryTypes['note'],
'Contents': zones,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, zones, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
def get_zones_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_ZONE_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_ZONES, method='get', headers=headers, params=params)
return res
def get_zone():
zone_id = demisto.args()['id']
zone = get_zone_request(zone_id)
context_zone = createContext(data=zone, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'CylanceProtect.Zones(val.Id && val.Id === obj.Id)': context_zone
}
title = 'Cylance Protect Zone ' + zone_id
demisto.results({
'Type': entryTypes['note'],
'Contents': zone,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, zone, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
def get_zone_request(zone_id):
access_token = get_authentication_token(scope=SCOPE_ZONE_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_ZONES, zone_id)
res = api_call(uri=uri, method='get', headers=headers)
return res
def update_zone():
zone_id = demisto.args()['id']
# Get current zone and fill in requires missing arguments
current_zone = get_zone_request(zone_id)
# Details to update
name = demisto.args()['name'] if 'name' in demisto.args() else current_zone['name']
policy_id = demisto.args()['policy_id'] if 'policy_id' in demisto.args() else current_zone['policy_id']
criticality = demisto.args()['criticality'] if 'criticality' in demisto.args() else current_zone['criticality']
zone = update_zone_request(zone_id, name, policy_id, criticality)
hr = {}
if name:
hr['Name'] = name
if policy_id:
hr['PolicyID'] = policy_id
if criticality:
hr['Criticality'] = criticality
title = 'Zone was updated successfully.'
demisto.results({
'Type': entryTypes['note'],
'Contents': zone,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, [hr])
})
def update_zone_request(zone_id, name, policy_id, criticality):
access_token = get_authentication_token(scope=SCOPE_ZONE_UPDATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {}
if name:
body['name'] = name
if policy_id:
body['policy_id'] = policy_id
if criticality:
body['criticality'] = criticality
# Do we have anything to update?
if not body:
raise Exception('No changes detected')
uri = '%s/%s' % (URI_ZONES, zone_id)
res = api_call(uri=uri, method='put', headers=headers, body=body)
return res
def get_threat():
sha256 = demisto.args().get('sha256')
threat = get_threat_request(sha256)
if threat:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
context_threat = createContext(data=threat, keyTransform=underscoreToCamelCase, removeNull=True)
context_threat = add_capitalized_hash_to_context(context_threat)
ec = {
'File': context_threat,
'DBotScore': {
'Indicator': sha256,
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
}
}
title = 'Cylance Protect Threat ' + sha256
demisto.results({
'Type': entryTypes['note'],
'Contents': threat,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, threat, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
else:
demisto.results('Threat was not found.')
def get_threat_request(sha256):
access_token = get_authentication_token(scope=SCOPE_THREAT_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_THREATS, sha256)
res = api_call(uri=uri, method='get', headers=headers, body={}, params={}, accept_404=False)
return res
def get_threats():
page = demisto.args().get('pageNumber')
page_size = demisto.args().get('pageSize')
threats = get_threats_request(page, page_size)['page_items']
dbot_score_array = []
for threat in threats:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
dbot_score_array.append({
'Indicator': threat.get('sha256'),
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
})
context_threat = createContext(data=threats, keyTransform=underscoreToCamelCase, removeNull=True)
context_threat = add_capitalized_hash_to_context(context_threat)
ec = {
'File': context_threat,
'DBotScore': dbot_score_array
}
title = 'Cylance Protect Threats'
demisto.results({
'Type': entryTypes['note'],
'Contents': threats,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, threats, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
def get_threats_request(page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_THREAT_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page in demisto.args():
params['page'] = demisto.args()['page']
if page_size in demisto.args():
params['page_size'] = demisto.args()['pageSize']
res = api_call(uri=URI_THREATS, method='get', headers=headers, params=params)
return res
def get_threat_devices():
threat_hash = demisto.args()['sha256']
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
threats = get_threat_devices_request(threat_hash, page, page_size)['page_items']
if threats:
threats_context = threats[:]
for threat in threats:
threat['ip_addresses'] = ', '.join(threat['ip_addresses'])
threat['mac_addresses'] = ', '.join(threat['mac_addresses'])
file_paths = []
endpoint_context = []
devices_context = []
for threat in threats_context:
endpoint_context.append({
'Hostname': threat['name'],
'IPAddress': threat['ip_addresses'],
'MACAddress': threat['mac_addresses']
})
current_device = {
'Hostname': threat['name'],
'IPAddress': threat['ip_addresses'],
'MACAddress': threat['mac_addresses'],
'AgentVersion': threat['agent_version'],
'DateFound': threat['date_found'],
'FilePath': threat['file_path'],
'ID': threat['id'],
'State': threat['state'],
'FileStatus': threat['file_status']
}
if threat['policy_id']:
current_device['PolicyID'] = threat['policy_id']
devices_context.append(current_device)
file_path = threat.pop('file_path')
file_paths.append({
'FilePath': file_path
})
file_context = {
'SHA256': threat_hash,
'Path': file_paths
}
ec = {
'File': file_context,
'Endpoint(val.Hostname && val.Hostname === obj.Hostname)': endpoint_context,
'CylanceProtect.Threat(val.SHA256 && val.SHA256 === obj.SHA256)': {
'SHA256': threat_hash,
'Devices': devices_context
}
}
title = 'Cylance Protect Threat ' + threat_hash + ' Devices'
demisto.results({
'Type': entryTypes['note'],
'Contents': threats,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, threats, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
else:
demisto.results('No devices found on given threat.')
def get_threat_devices_request(threat_hash, page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_THREAT_DEVICE_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
uri = '%s/%s/devices' % (URI_THREATS, threat_hash)
res = api_call(uri=uri, method='get', headers=headers, params=params)
return res
def get_list():
page = demisto.args()['pageNumber'] if 'pageNumber' in demisto.args() else None
page_size = demisto.args()['pageSize'] if 'pageSize' in demisto.args() else None
lst = get_list_request(demisto.args()['listTypeId'], page, page_size)['page_items']
dbot_score_array = []
for threat in lst:
dbot_score = 0
score = threat.get('cylance_score', None)
if score:
threat['cylance_score'] = normalize_score(threat['cylance_score'])
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(threat['cylance_score'], int(threshold))
dbot_score_array.append({
'Indicator': threat['sha256'],
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
})
if lst:
context_list = createContext(data=lst, keyTransform=underscoreToCamelCase, removeNull=True)
context_list = add_capitalized_hash_to_context((context_list))
ec = {
'File': context_list,
'DBotScore': dbot_score_array
}
title = 'Cylance Protect Global List'
demisto.results({
'Type': entryTypes['note'],
'Contents': lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, lst, headerTransform=underscoreToCamelCase, removeNull=True),
'EntryContext': ec
})
else:
demisto.results('No list of this type was found.')
def get_list_request(list_type_id, page=None, page_size=None):
access_token = get_authentication_token(scope=SCOPE_GLOBAL_LIST)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
params = {}
if list_type_id == 'GlobalQuarantine':
params['listTypeId'] = 0
else: # List Type ID is GlobalSafe
params['listTypeId'] = 1
if page:
params['page'] = page
if page_size:
params['page_size'] = page_size
res = api_call(uri=URI_LISTS, method='get', headers=headers, params=params)
return res
def get_list_entry_by_hash(sha256=None, list_type_id=None):
if not sha256:
sha256 = demisto.args()['sha256']
if not list_type_id:
list_type_id = demisto.args()['listTypeId']
total_pages = 0
current_page = 0
found_hash = None
while not found_hash and total_pages >= current_page:
if not current_page:
current_page = 1
lst = get_list_request(list_type_id, current_page, 200)
if not total_pages:
total_pages = lst['total_pages']
for i in lst['page_items']:
if i['sha256'] == sha256:
found_hash = i
break
current_page += 1
if demisto.command() == 'cylance-protect-get-list-entry':
if found_hash:
context_list = createContext(data=found_hash, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'CylanceListSearch': context_list
}
title = 'Cylance Protect Global List Entry'
demisto.results({
'Type': entryTypes['note'],
'Contents': found_hash,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, found_hash, headerTransform=underscoreToCamelCase,
removeNull=True),
'EntryContext': ec
})
else:
demisto.results("Hash not found")
else:
return found_hash
def get_indicators_report():
url = 'https://protect.cylance.com/Reports/ThreatDataReportV1/indicators/' + demisto.args()['token']
res = requests.request('GET', url, verify=USE_SSL)
filename = 'Indicators_Report.csv'
demisto.results(fileResult(filename, res.content))
def update_device_threats():
device_id = demisto.args()['device_id']
threat_id = demisto.args()['threat_id']
event = demisto.args()['event']
update_device_threats_request(device_id, threat_id, event)
demisto.results('Device threat was updated successfully.')
def update_device_threats_request(device_id, threat_id, event):
access_token = get_authentication_token(scope=SCOPE_THREAT_UPDATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'threat_id': threat_id,
'event': event
}
uri = '%s/%s/threats' % (URI_DEVICES, device_id)
res = api_call(uri=uri, method='post', headers=headers, body=body)
return res
def download_threat():
contents = {}
context = {}
dbot_score = 0
sha256 = demisto.args()['sha256']
threat_url = download_threat_request(sha256)
threat_file = requests.get(threat_url, allow_redirects=True, verify=USE_SSL)
if threat_file.status_code == 200:
if demisto.args()['unzip'] == "yes":
file_archive = StringIO(threat_file.content)
zip_file = zipfile.ZipFile(file_archive)
file_data = zip_file.read(sha256.upper(), pwd='infected')
demisto.results(fileResult(sha256, file_data))
else:
demisto.results(fileResult(sha256, threat_file.content + '.zip'))
else:
return_error('Could not fetch the file')
threat = get_threat_request(sha256)
if threat:
# add data about the threat if found
if threat.get('cylance_score'):
score = normalize_score(threat.get('cylance_score'))
threshold = demisto.args().get('threshold', FILE_THRESHOLD)
dbot_score = translate_score(score, int(threshold))
contents = {
'Download URL': threat_url,
'File Name': threat.get('name'),
'File Size': threat.get('file_size'),
'Detected By': threat.get('detected_by'),
'GlobalQuarantine': threat.get('global_quarantined'),
'Safelisted': threat.get('safelisted'),
'Timestamp': threat.get('cert_timestamp'),
}
context[outputPaths['file']] = {
'DownloadURL': threat_url,
'SHA256': threat.get('sha256'),
'Name': threat.get('name'),
'Size': threat.get('file_size'),
'Safelisted': threat.get('safelisted'),
'Timestamp': threat.get('cert_timestamp'),
'MD5': threat.get('md5')
}
if dbot_score == 3:
context[outputPaths['file']]['Malicious'] = {
'Vendor': 'Cylance Protect',
'Description': 'Score determined by get threat command'
}
context[outputPaths['dbotscore']] = {
'Indicator': threat.get('sha256'),
'Type': 'file',
'Vendor': 'Cylance Protect',
'Score': dbot_score
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Cylance Protect - Downloading threat attached to the following hash: '
+ sha256, contents),
'EntryContext': context
})
def download_threat_request(hash):
access_token = get_authentication_token(scope=SCOPE_THREAT_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s/%s' % (URI_THREATS, "download", hash)
res = api_call(uri=uri, method='get', headers=headers)
if not res['url']:
return_error('No url was found')
return res['url']
def add_hash_to_list():
context = {}
sha256 = demisto.args().get('sha256')
list_type = demisto.args().get('listType')
reason = demisto.args().get('reason')
category = demisto.args().get('category')
if list_type == "GlobalSafe" and not category:
return_error('Category argument is required for list type of Global Safe')
add_hash = add_hash_to_list_request(sha256, list_type, reason, category)
if not add_hash:
return_error('Could not add hash to list')
contents = {
'Threat File SHA256': sha256,
'List Type': list_type,
'Category': category,
'Reason': reason
}
context[outputPaths['file']] = {
'SHA256': sha256,
'Cylance': {
'ListType': list_type,
'Category': category
}
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The requested threat has been successfully added to ' + list_type + ' hashlist.', contents),
'EntryContext': context
})
def add_hash_to_list_request(sha256, list_type, reason, category=None):
access_token = get_authentication_token(scope=SCOPE_GLOBAL_LIST_CREATE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'sha256': sha256,
'list_type': list_type,
'reason': reason
}
if category:
body['category'] = category.replace(" ", "")
res = api_call(uri=URI_LISTS, method='post', headers=headers, body=body)
return res
def delete_hash_from_lists():
sha256 = demisto.args().get('sha256')
list_type = demisto.args().get('listType')
context = {}
delete_hash_from_lists_request(sha256, list_type)
contents = {
'Threat File SHA256': sha256,
'Threat List Type': list_type
}
context[outputPaths['file']] = {
'SHA256': sha256,
'Cylance': {
'ListType': list_type
}
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The requested threat has been successfully removed from ' + list_type + ' hashlist.', contents),
'EntryContext': context
})
def delete_hash_from_lists_request(sha256, list_type):
access_token = get_authentication_token(scope=SCOPE_GLOBAL_LIST_DELETE)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'sha256': sha256,
'list_type': list_type
}
res = api_call(uri=URI_LISTS, method='delete', headers=headers, body=body)
return res
def delete_devices():
device_ids = demisto.args().get('deviceIds')
device_ids_list = argToList(device_ids)
contents = []
context_list = []
for device_id in device_ids_list:
device = get_device_request(device_id)
if not device:
continue
device_name = device.get('name')
context_list.append({
'Id': device_id,
'Name': device_name,
'Deleted': True
})
contents.append({
'Device Removed': device_id,
'Device Name': device_name,
'Deletion status': True
})
batch_size = demisto.args().get("batch_size", 20)
try:
batch_size = int(batch_size)
except ValueError:
return_error("Error: Batch Size specified must represent an int.")
for i in range(0, len(device_ids_list), batch_size):
current_deleted_devices_batch = device_ids_list[i:i + batch_size]
delete_devices_request(current_deleted_devices_batch)
context = {
'Cylance.Device(val.Id && val.Id == obj.Id)': context_list
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The requested devices have been successfully removed from your organization list.', contents),
'EntryContext': context
})
def delete_devices_request(device_ids):
access_token = get_authentication_token()
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
body = {
'device_ids': device_ids
}
res = api_call(uri=URI_DEVICES, method='delete', headers=headers, body=body)
if not res or not res.get('request_id'):
return_error('Delete response does not contain request id')
return res
def get_policy_details():
policy_id = demisto.args()['policyID']
contents = {} # type: Dict
context = {} # type: Dict
title = 'Could not find policy details for that ID'
filetype_actions_threat_contents = [] # type: list
filetype_actions_suspicious_contents = [] # type: list
safelist_contents = [] # type: list
title_filetype_actions_threat = 'Cylance Policy Details - FileType Actions Threat Files'
title_filetype_actions_suspicious = 'Cylance Policy Details - FileType Actions Suspicious Files'
title_safelist = 'Cylance Policy Details - File Exclusions - SafeList'
title_memory_exclusion = 'Cylance Policy Details - Memory Violation Actions \n' +\
'This table provides detailed information about the memory violation settings. \n' +\
'Memory protections Exclusion List :'
title_memory_violation = 'Memory Violation Settings: '
title_additional_settings = 'Cylance Policy Details - Policy Settings. \n' +\
'Various policy settings are contained within this section.'
policy_details = get_policy_details_request(policy_id)
memory_violations_content = []
if policy_details:
title = 'Cylance Policy Details for: ' + policy_id
date_time = ''
# timestamp in response comes back as bugged string, convert to actual timestamp.
timestamp = policy_details.get('policy_utctimestamp')
if timestamp:
reg = re.search(r"\d{13}", timestamp)
if reg:
ts = float(reg.group())
date_time = datetime.fromtimestamp(ts / 1000).strftime('%Y-%m-%dT%H:%M:%S.%f+00:00')
context = {
'Cylance.Policy(val.ID && val.ID == obj.ID)': {
'ID': policy_details.get('policy_id'),
'Name': policy_details.get('policy_name'),
'Timestamp': date_time
}
}
contents = {
'Policy Name': policy_details.get('policy_name'),
'Policy Created At': date_time
}
suspicious_files = policy_details.get('filetype_actions').get('suspicious_files')
if suspicious_files:
suspicious_files_list = []
for file in suspicious_files:
suspicious_files_list.append({
'Actions': file.get('actions'),
'File Type': file.get('file_type')
})
threat_files = policy_details.get('filetype_actions').get('threat_files')
if threat_files:
threat_files_list = []
for file in threat_files:
threat_files_list.append({
'Actions': file.get('actions'),
'File Type': file.get('file_type')
})
filetype_actions_suspicious_contents = suspicious_files_list
filetype_actions_threat_contents = threat_files_list
safelist = policy_details.get('file_exclusions')
if safelist:
file_exclusions_list = []
for file_exclusion in safelist:
file_exclusions_list.append({
'Research Class ID': file_exclusion.get('research_class_id'),
'Infinity': file_exclusion.get('infinity'),
'File Type': file_exclusion.get('file_type'),
'AV Industry': file_exclusion.get('av_industry'),
'Cloud Score': file_exclusion.get('cloud_score'),
'File Hash': file_exclusion.get('file_hash'),
'Research Subclass ID': file_exclusion.get('research_subclass_id'),
'Reason': file_exclusion.get('reason'),
'File Name': file_exclusion.get('file_name'),
'Category Id': file_exclusion.get('category_id'),
'MD5': file_exclusion.get('md5')
})
safelist_contents = file_exclusions_list
memory_violations = policy_details.get('memoryviolation_actions').get('memory_violations')
for memory_violation in memory_violations:
memory_violations_content.append({
'Action': memory_violation.get('action'),
'Violation Type': memory_violation.get('violation_type')
})
additional_settings = policy_details.get('policy')
additional_settings_content = []
for additional_setting in additional_settings:
additional_settings_content.append({
'Name': additional_setting.get('name'),
'Value': additional_setting.get('value')
})
demisto.results({
'Type': entryTypes['note'],
'Contents': contents,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, contents)
+ tableToMarkdown(title_filetype_actions_suspicious, filetype_actions_suspicious_contents)
+ tableToMarkdown(title_filetype_actions_threat, filetype_actions_threat_contents)
+ tableToMarkdown(title_safelist, safelist_contents)
+ tableToMarkdown(title_memory_exclusion, policy_details.get('memory_exclusion_list'))
+ tableToMarkdown(title_memory_violation, memory_violations_content)
+ tableToMarkdown(title_additional_settings, memory_violations_content),
'EntryContext': context
})
def get_policy_details_request(policy_id):
access_token = get_authentication_token(scope=SCOPE_POLICY_READ)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + access_token
}
uri = '%s/%s' % (URI_POLICIES, policy_id)
res = api_call(uri=uri, method='get', headers=headers)
return res
def fetch_incidents():
now = datetime.utcnow()
last_run = demisto.getLastRun().get('time')
if last_run is None:
now = now - timedelta(days=3)
last_run = now
else:
last_run = datetime.strptime(last_run, '%Y-%m-%dT%H:%M:%S') # Converts string to datetime object
current_run = last_run
threats = get_threats_request().get('page_items', [])
incidents = []
for threat in threats:
last_found = datetime.strptime(threat['last_found'], '%Y-%m-%dT%H:%M:%S')
if last_found > last_run:
incident = threat_to_incident(threat)
incidents.append(incident)
if last_found > current_run:
current_run = last_found
demisto.incidents(incidents)
demisto.setLastRun({'time': current_run.isoformat().split('.')[0]})
def add_capitalized_hash_to_context(threats_context):
"""Add capitalized hash keys to the context such as SHA256 and MD5,
the keys are redundant since they are used for avoiding BC issues.
Args:
threats_context(list): list of dicts of context outputs for the threats of interest, each containing
the key 'Sha256' (and possibly (Md5)).
Returns:
threats_context(list): list of dicts of context outputs for the threats of interest, each containing
the key and value 'Sha256' (and possibly Md5) as well as the key and value 'SHA256' (and possible MD5).
"""
if not isinstance(threats_context, list):
threats_context = [threats_context]
for context_item in threats_context:
if context_item.get('Sha256'):
context_item['SHA256'] = context_item.get('Sha256')
if context_item.get('Md5'):
context_item['MD5'] = context_item.get('Md5')
return threats_context
# EXECUTION
LOG('command is %s' % (demisto.command(),))
try:
handle_proxy()
if demisto.command() == 'test-module':
test()
if demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'cylance-protect-get-devices':
get_devices()
elif demisto.command() == 'cylance-protect-get-device':
get_device()
elif demisto.command() == 'cylance-protect-update-device':
update_device()
elif demisto.command() == 'cylance-protect-get-device-threats':
get_device_threats()
elif demisto.command() == 'cylance-protect-get-policies':
get_policies()
elif demisto.command() == 'cylance-protect-create-zone':
create_zone()
elif demisto.command() == 'cylance-protect-get-zones':
get_zones()
elif demisto.command() == 'cylance-protect-get-zone':
get_zone()
elif demisto.command() == 'cylance-protect-update-zone':
update_zone()
elif demisto.command() == 'cylance-protect-get-threat':
get_threat()
elif demisto.command() == 'cylance-protect-get-threats':
get_threats()
elif demisto.command() == 'cylance-protect-get-threat-devices':
get_threat_devices()
elif demisto.command() == 'cylance-protect-get-indicators-report':
get_indicators_report()
elif demisto.command() == 'cylance-protect-update-device-threats':
update_device_threats()
elif demisto.command() == 'cylance-protect-get-list':
get_list()
elif demisto.command() == 'cylance-protect-get-list-entry':
get_list_entry_by_hash()
# new commands
elif demisto.command() == 'cylance-protect-download-threat':
download_threat()
elif demisto.command() == 'cylance-protect-add-hash-to-list':
add_hash_to_list()
elif demisto.command() == 'cylance-protect-delete-hash-from-lists':
delete_hash_from_lists()
elif demisto.command() == 'cylance-protect-delete-devices':
delete_devices()
elif demisto.command() == 'cylance-protect-get-policy-details':
get_policy_details()
except Warning as w:
demisto.results({
'Type': 11,
'Contents': str(w),
'ContentsFormat': formats['text']
})
except Exception as e:
demisto.error('#### error in Cylance Protect v2: ' + str(e))
if demisto.command() == 'fetch-incidents':
LOG.print_log()
raise
else:
return_error(str(e))
|
mit
| 6,929,150,291,079,900,000
| 33.364505
| 120
| 0.598184
| false
| 3.73361
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v6/resources/types/customer_client_link.py
|
1
|
2276
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import manager_link_status
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"CustomerClientLink",},
)
class CustomerClientLink(proto.Message):
r"""Represents customer client link relationship.
Attributes:
resource_name (str):
Immutable. Name of the resource. CustomerClientLink resource
names have the form:
``customers/{customer_id}/customerClientLinks/{client_customer_id}~{manager_link_id}``
client_customer (str):
Immutable. The client customer linked to this
customer.
manager_link_id (int):
Output only. This is uniquely identifies a
customer client link. Read only.
status (google.ads.googleads.v6.enums.types.ManagerLinkStatusEnum.ManagerLinkStatus):
This is the status of the link between client
and manager.
hidden (bool):
The visibility of the link. Users can choose
whether or not to see hidden links in the Google
Ads UI. Default value is false
"""
resource_name = proto.Field(proto.STRING, number=1)
client_customer = proto.Field(proto.STRING, number=7, optional=True)
manager_link_id = proto.Field(proto.INT64, number=8, optional=True)
status = proto.Field(
proto.ENUM,
number=5,
enum=manager_link_status.ManagerLinkStatusEnum.ManagerLinkStatus,
)
hidden = proto.Field(proto.BOOL, number=9, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -4,313,098,730,890,692,600
| 34.015385
| 98
| 0.681459
| false
| 4.108303
| false
| false
| false
|
geomf/omf-fork
|
omf/solvers/gridlabd/__init__.py
|
1
|
9449
|
# Portions Copyright (C) 2015 Intel Corporation
''' Code for running Gridlab and getting results into pythonic data structures. '''
import sys
import os
import subprocess
import platform
import re
import datetime
import shutil
import traceback
import math
import time
import tempfile
import json
from os.path import join as pJoin
from copy import deepcopy
# Locational variables so we don't have to rely on OMF being in the system
# path.
_myDir = os.path.dirname(os.path.abspath(__file__))
_omfDir = os.path.dirname(os.path.dirname(_myDir))
sys.path.append(_omfDir)
# OMF imports.
from omf import feeder
import logging
def _addGldToPath():
''' Figure out what platform we're on and choose a suitable Gridlab binary.
Returns full path to binary as result. '''
# Do we have a version of GridlabD available?
if 0 == subprocess.call(["gridlabd"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
# There's a system-level install of Gridlab, so use it:
return "gridlabd"
else:
# No system-level version of Gridlab available, so add ours to the
# path.
enviro = os.environ
if sys.platform == 'win32' or sys.platform == 'cygwin':
if platform.machine().endswith('64'):
binary = _myDir + "\\win64\\gridlabd.exe"
enviro['GRIDLABD'] = _myDir + "\\win64"
enviro['GLPATH'] = _myDir + "\\win64\\"
else:
binary = _myDir + "\\win32\\gridlabd.exe"
enviro['GRIDLABD'] = _myDir + "\\win32"
enviro['GLPATH'] = _myDir + "\\win32\\"
return binary
elif sys.platform == 'darwin':
# Implement me, maybe.
pass
elif sys.platform == 'linux2':
binary = _myDir + "/linx64/gridlabd.bin"
enviro['GRIDLABD'] = _myDir + "/linx64"
enviro['GLPATH'] = _myDir + "/linx64"
# Uncomment the following line if we ever get all the linux libraries bundled. Hard!
# enviro['LD_LIBRARY_PATH'] = enviro['LD_LIBRARY_PATH'] + ':' + solverRoot + "/linx64"
return binary
else:
# Platform not supported, so just return the standard binary and
# pray it works:
return "gridlabd"
logger = logging.getLogger(__name__)
def runInFilesystem(feederTree, attachments=[], keepFiles=False, workDir=None, glmName=None):
''' Execute gridlab in the local filesystem. Return a nice dictionary of results. '''
logger.info(
'Running GridLab-D for %d feeders (working dir=%s)', len(feederTree), workDir)
try:
binaryName = "gridlabd"
# Create a running directory and fill it, unless we've specified where
# we're running.
if not workDir:
workDir = tempfile.mkdtemp()
print "gridlabD runInFilesystem with no specified workDir. Working in", workDir
# Need to zero out lat/lon data on copy because it frequently breaks
# Gridlab.
localTree = deepcopy(feederTree)
for key in localTree.keys():
try:
del localTree[key]["latitude"]
del localTree[key]["longitude"]
except:
pass # No lat lons.
# Write attachments and glm.
for attach in attachments:
with open(pJoin(workDir, attach), 'w') as attachFile:
attachFile.write(attachments[attach])
glmString = feeder.sortedWrite(localTree)
if not glmName:
glmName = "main." + \
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ".glm"
with open(pJoin(workDir, glmName), 'w') as glmFile:
glmFile.write(glmString)
logger.debug('Wrote GLM file: %s', glmName)
# RUN GRIDLABD IN FILESYSTEM (EXPENSIVE!)
with open(pJoin(workDir, 'stdout.txt'), 'w') as stdout, open(pJoin(workDir, 'stderr.txt'), 'w') as stderr, open(pJoin(workDir, 'PID.txt'), 'w') as pidFile:
# MAYBEFIX: turn standerr WARNINGS back on once we figure out how
# to supress the 500MB of lines gridlabd wants to write...
logger.info(
'Running <%s -w %s> in <%s>', binaryName, glmName, workDir)
proc = subprocess.Popen(
[binaryName, '-w', glmName], cwd=workDir, stdout=stdout, stderr=stderr)
pidFile.write(str(proc.pid))
logger.info('Launched gridlabd with pid=%d', proc.pid)
returnCode = proc.wait()
logger.info('gridlabd finished with exit code=%d', returnCode)
# Build raw JSON output.
rawOut = anaDataTree(workDir, lambda x: True)
with open(pJoin(workDir, 'stderr.txt'), 'r') as stderrFile:
rawOut['stderr'] = stderrFile.read().strip()
with open(pJoin(workDir, 'stdout.txt'), 'r') as stdoutFile:
rawOut['stdout'] = stdoutFile.read().strip()
logger.info('GridlabD STDOUT:\n%s', rawOut['stdout'])
logger.info('GridlabD STDERR:\n%s', rawOut['stderr'])
# Delete the folder and return.
if not keepFiles and not workDir:
# NOTE: if we've specify a working directory, don't just blow it
# away.
for attempt in range(5):
try:
shutil.rmtree(workDir)
break
except OSError:
# HACK: if we don't sleep 1 second, windows intermittantly fails to delete things and an exception is thrown.
# Probably cus dropbox is monkeying around in these folders
# on my dev machine. Disabled for now since it works when
# dropbox is off.
time.sleep(2)
return rawOut
except:
with open(pJoin(workDir, "stderr.txt"), "a+") as stderrFile:
traceback.print_exc(file=stderrFile)
return {}
def _strClean(x):
''' Helper function that translates csv values to reasonable floats (or header values to strings). '''
if x == 'OPEN':
return 1.0
elif x == 'CLOSED':
return 0.0
# Look for strings of the type '+32.0+68.32d':
elif x == '-1.#IND':
return 0.0
if x.endswith('d'):
matches = re.findall(
'^([+-]?\d+\.?\d*e?[+-]?\d+)[+-](\d+\.?\d*e?[+-]?\d*)d$', x)
if len(matches) == 0:
return 0.0
else:
floatConv = map(float, matches[0])
squares = map(lambda x: x**2, floatConv)
return math.sqrt(sum(squares))
elif re.findall('^([+-]?\d+\.?\d*e?[+-]?\d*)$', x) != []:
matches = re.findall('([+-]?\d+\.?\d*e?[+-]?\d*)', x)
if len(matches) == 0:
return 0.0
else:
try:
return float(matches[0])
except:
return 0.0 # Hack for crazy WTF occasional Gridlab output.
else:
return x
def csvToArray(fileName):
''' Take a Gridlab-export csv filename, return a list of timeseries vectors.'''
with open(fileName) as openfile:
data = openfile.read()
lines = data.splitlines()
array = map(lambda x: x.split(','), lines)
cleanArray = [map(_strClean, x) for x in array]
# Magic number 8 is the number of header rows in each GridlabD csv.
arrayNoHeaders = cleanArray[8:]
# Drop the timestamp column:
return arrayNoHeaders
def _seriesTranspose(theArray):
''' Transpose every matrix that's a value in a dictionary. Yikes. '''
return {i[0]: list(i)[1:] for i in zip(*theArray)}
def anaDataTree(studyPath, fileNameTest):
''' Take a study and put all its data into a nested object {fileName:{metricName:[...]}} '''
data = {}
csvFiles = os.listdir(studyPath)
for cName in csvFiles:
if fileNameTest(cName) and cName.endswith('.csv'):
arr = csvToArray(studyPath + '/' + cName)
data[cName] = _seriesTranspose(arr)
return data
def _tests():
print "Full path to Gridlab executable we're using:", _addGldToPath()
print "Testing string cleaning."
strTestCases = [("+954.877", 954.877),
("+2.18351e+006", 2183510.0),
("+7244.99+1.20333e-005d", 7244.99),
# ("+7244.99+120d", 7245.98372204), # Fails due to float rounding but should pass.
("+3.76184", 3.76184),
("1", 1.0),
("-32.4", -32.4),
("+7200+0d", 7200.0),
("+175020+003133", 0.0)]
for (string, result) in strTestCases:
assert _strClean(
string) == result, "A _strClean operation failed on: " + string
# Get a test feeder and test climate.
print "Testing GridlabD solver."
with open(pJoin(_omfDir, "data", "Feeder", "public", "Simple Market System.json"), "r") as feederFile:
feederJson = json.load(feederFile)
with open(pJoin(_omfDir, "data", "Climate", "AL-HUNTSVILLE.tmy2"), "r") as climateFile:
tmyStr = climateFile.read()
# Add climate in.
feederJson["attachments"]["climate.tmy2"] = tmyStr
testStudy = runInFilesystem(feederJson["tree"], feederJson["attachments"])
assert testStudy != {}, "Gridlab run failed and we got blank output."
print "GridlabD standard error:", testStudy['stderr']
print "GridlabD standard output:", testStudy['stdout']
if __name__ == '__main__':
_tests()
|
gpl-2.0
| -5,696,401,509,147,885,000
| 39.904762
| 163
| 0.580167
| false
| 3.659566
| true
| false
| false
|
TardigradeX/Space-Race-RX
|
backend/Commands_util.py
|
1
|
1776
|
from Commands import Commands, Targets, Defaults, Payloads
dd = Defaults.DELIMETER
dt = Defaults.TARGET_DELIMETER
dn = Defaults.NONE
""" COMMANDS TO BE SENT BY ANY """
def createLogin(targetType, roomid = dn ):
playerId = dn
payload = dn
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.LOGIN, target, payload])
return(msg)
def createLoginResponse(targetType, roomid, playerId):
target = dt.join([targetType, roomid, playerId])
payload = Payloads.SIGNUP;
msg = dd.join([Commands.LOGIN, target, payload])
return(msg)
def createPlayerJoined(roomid, playerId):
targetType = Targets.MASTER
payload = Payloads.JOINED
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.LOGIN, target, payload])
return(msg)
def createAnswer(targetType, roomid, playerId, payload):
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.ANSWER, target, payload])
return(msg)
def createMessage(source, targetType, roomid, playerId, payload):
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.MESSAGE, target, payload])
return(msg)
def createLogout(roomid, playerId):
payload = dn
targetType = Targets.MASTER
roomid = roomid
targetPlayer = playerId
payload = dn
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.LOGOUT, target, payload])
return(msg)
""" COMMANDS CREATED BY CONTROLLER ONLY """
def createGameCommand(command, targetType, roomid = Defaults.NONE, playerId = Defaults.NONE):
roomid = roomid
targetPlayer = playerId
target = dt.join([targetType, roomid, playerId])
payload = dn
msg = dd.join([command, target, payload])
return(msg)
|
mit
| -8,001,001,289,209,257,000
| 30.714286
| 93
| 0.693131
| false
| 3.455253
| false
| false
| false
|
sheeshmohsin/mozioproj
|
mozio/settings.py
|
1
|
3500
|
"""
Django settings for mozio project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')@hh8efot31g^d)b$p(wy4d37gih!9c2q+*efe4v#jj1f#gza$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.gis',
'tastypie',
'tastypie_swagger',
'units',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mozio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mozio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geology',
'USER': 'geouser',
'PASSWORD': 'geopassword',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Caching
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'localhost:6379',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -3,582,490,734,909,463,600
| 24.547445
| 91
| 0.671429
| false
| 3.468781
| false
| false
| false
|
ianmiell/shutit-distro
|
python3/python3.py
|
1
|
1207
|
"""ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class python3(ShutItModule):
def build(self, shutit):
shutit.send('mkdir /tmp/build/python')
shutit.send('cd /tmp/build/python')
shutit.send('wget -qO- https://www.python.org/ftp/python/3.4.2/Python-3.4.2.tar.xz | xz -d | tar -xf -')
shutit.send('cd Python-*')
shutit.send('./configure --prefix=/usr --enable-shared --with-system-expat --with-system-ffi --enable-unicode=ucs4 --without-ensurepip')
shutit.send('make')
shutit.send('make install',check_exit=False) # why? seems ok
shutit.send('chmod -v 755 /usr/lib/libpython3.4m.so')
shutit.send('chmod -v 755 /usr/lib/libpython3.so')
return True
#def get_config(self, shutit):
# shutit.get_config(self.module_id,'item','default')
# return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/python')
return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return python3(
'shutit.tk.sd.python3.python3', 158844782.002553,
description='',
maintainer='',
depends=['shutit.tk.sd.libffi.libffi','shutit.tk.sd.sqlite.sqlite','shutit.tk.sd.make_certs.make_certs']
)
|
gpl-2.0
| -146,777,834,580,436,640
| 27.738095
| 138
| 0.691798
| false
| 2.493802
| false
| false
| false
|
att-comdev/drydock
|
drydock_provisioner/drivers/node/maasdriver/models/boot_resource.py
|
1
|
3592
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for MaaS API boot_resource type."""
import drydock_provisioner.error as errors
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
class BootResource(model_base.ResourceBase):
resource_url = 'boot-resources/{resource_id}/'
fields = [
'resource_id',
'name',
'type',
'subarches',
'architecture',
]
json_fields = [
'name',
'type',
'subarches',
'architecture',
]
def __init__(self, api_client, **kwargs):
super().__init__(api_client, **kwargs)
def get_image_name(self):
"""Return the name that would be specified in a deployment.
Return None if this is not an ubuntu image, otherwise
the distro series name
"""
(os, release) = self.name.split('/')
# Only supply image names for ubuntu-based images
if os == 'ubuntu':
return release
else:
# Non-ubuntu images such as the uefi bootloader
# should never be selectable
return None
def get_kernel_name(self):
"""Return the kernel name that would be specified in a deployment."""
(_, kernel) = self.architecture.split('/')
return kernel
class BootResources(model_base.ResourceCollectionBase):
collection_url = 'boot-resources/'
collection_resource = BootResource
def __init__(self, api_client, **kwargs):
super().__init__(api_client)
def is_importing(self):
"""Check if boot resources are importing."""
url = self.interpolate_url()
self.logger.debug("Checking if boot resources are importing.")
resp = self.api_client.get(url, op='is_importing')
if resp.status_code == 200:
resp_json = resp.json()
self.logger.debug("Boot resource importing status: %s" % resp_json)
return resp_json
else:
msg = "Error checking import status of boot resources: %s - %s" % (
resp.status_code, resp.text)
self.logger.error(msg)
raise errors.DriverError(msg)
def get_available_images(self):
"""Get list of available deployable images."""
image_options = list()
for k, v in self.resources.items():
if v.get_image_name() not in image_options:
image_options.append(v.get_image_name())
return image_options
def get_available_kernels(self, image_name):
"""Get kernels available for image_name
Return list of kernel names available for
``image_name``.
:param image_name: str image_name (e.g. 'xenial')
"""
kernel_options = list()
for k, v in self.resources.items():
if (v.get_image_name() == image_name
and v.get_kernel_name() not in kernel_options):
kernel_options.append(v.get_kernel_name())
return kernel_options
|
apache-2.0
| 7,456,596,019,089,050,000
| 31.954128
| 79
| 0.615256
| false
| 4.157407
| false
| false
| false
|
xfire/pydzen
|
plugins/mem.py
|
1
|
2041
|
#
# Copyright (C) 2008 Rico Schiekel (fire at downgra dot de)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# vim:syntax=python:sw=4:ts=4:expandtab
import os
import re
import logging
from pydzen import utils
logger = logging.getLogger('plugin.mem')
RE_MEM = re.compile('^Mem:\s*(?P<total>\d+)\s+(?P<used>\d+)\s+(?P<free>\d+)\s+(?P<shared>\d+)\s+(?P<buffers>\d+)\s+(?P<cached>\d+).*$')
RE_SWAP = re.compile('^Swap:\s*(?P<total>\d+)\s+(?P<used>\d+)\s+(?P<free>\d+).*$')
def bar(used, total):
return utils.gdbar('%d %d' % (used, total), l = '%d%% ' % (100. / total * used))
@utils.cache(2)
def update():
try:
out = utils.execute('free', m = True)
lines = out.split('\n')
_mem = RE_MEM.match(lines[1]).groupdict()
_swap = RE_SWAP.match(lines[3]).groupdict()
if _mem and _swap:
mem_total = float(_mem['total'])
swap_total = float(_swap['total'])
mem_used = float(_mem['used']) - float(_mem['buffers']) - float(_mem['cached'])
swap_used = float(_swap['used'])
mem = bar(mem_used, mem_total)
swap = bar(swap_used, swap_total)
return ['Mem: %s' % mem,
'Mem: %s (%d/%d Mb) Swap: %s (%d/%d Mb)' % (mem, mem_used, mem_total, swap, swap_used, swap_total)]
except StandardError, e:
logger.warn(e)
return None
|
gpl-2.0
| 772,684,951,767,942,500
| 34.189655
| 135
| 0.617834
| false
| 3.2656
| false
| false
| false
|
Micronaet/micronaet-mx
|
sale_address/address.py
|
1
|
2562
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
''' Add extra address for delivery
'''
_inherit = 'sale.order'
# Override onchange for reset address name
def onchange_partner_id(self, cr, uid, ids, part, context=None):
res = super(SaleOrder, self).onchange_partner_id(
cr, uid, ids, part, context=context)
res['value']['address_id'] = False # reset address
res['value']['invoice_id'] = False # reset address
return res
_columns = {
'address_id': fields.many2one('res.partner', 'Delivery address'),
'invoice_id': fields.many2one('res.partner', 'Invoice address'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -4,763,169,489,420,876,000
| 35.6
| 78
| 0.633099
| false
| 4.206897
| false
| false
| false
|
uranix/ttpy
|
tt/ksl/ksl.py
|
1
|
6075
|
""" Dynamical TT-approximation """
import numpy as np
import dyn_tt
import tt
def ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000):
""" Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation
for the equation
.. math ::
\\frac{dy}{dt} = A y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0
"""
ry = y0.r.copy()
if scheme is 'symm':
tp = 2
else:
tp = 1
# Check for dtype
y = tt.vector()
if np.iscomplex(A.tt.core).any() or np.iscomplex(y0.core).any():
dyn_tt.dyn_tt.ztt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core + 0j,
y0.core + 0j,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space)
y.core = dyn_tt.dyn_tt.zresult_core.copy()
else:
A.tt.core = np.real(A.tt.core)
y0.core = np.real(y0.core)
dyn_tt.dyn_tt.tt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core,
y0.core,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space
)
y.core = dyn_tt.dyn_tt.dresult_core.copy()
dyn_tt.dyn_tt.deallocate_result()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.get_ps()
return y
def diag_ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000):
""" Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation with diagonal matrix, i.e. it solves the equation
for the equation
.. math ::
\\frac{dy}{dt} = V y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0
"""
ry = y0.r.copy()
if scheme is 'symm':
tp = 2
else:
tp = 1
# Check for dtype
y = tt.vector()
if np.iscomplex(A.core).any() or np.iscomplex(y0.core).any():
dyn_tt.dyn_diag_tt.ztt_diag_ksl(
y0.d,
A.n,
A.r,
A.core + 0j,
y0.core + 0j,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space)
y.core = dyn_tt.dyn_diag_tt.zresult_core.copy()
else:
A.core = np.real(A.core)
y0.core = np.real(y0.core)
dyn_tt.dyn_diag_tt.dtt_diag_ksl(
y0.d,
A.n,
A.r,
A.core,
y0.core,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space)
y.core = dyn_tt.dyn_diag_tt.dresult_core.copy()
dyn_tt.dyn_diag_tt.deallocate_result()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.get_ps()
return y
|
mit
| 5,739,514,568,776,435,000
| 27.255814
| 129
| 0.523457
| false
| 3.142783
| false
| false
| false
|
openSUSE/polkit-default-privs
|
tools/remove_duplicate_entries.py
|
1
|
2188
|
#!/usr/bin/python3
# vim: ts=4 et sw=4 sts=4 :
import argparse
from pkcommon import *
class DuplicateEntryRemover:
def __init__(self):
self.m_parser = argparse.ArgumentParser(
description = "Removes superfluous duplicate entries from polkit profiles or warns about conflicting ones."
)
def run(self):
self.m_args = self.m_parser.parse_args()
for profile in PROFILES:
self.m_lines_to_drop = set()
self.m_actions_seen = {}
path = getProfilePath(profile)
for entry in parseProfile(path):
self.checkDuplicate(entry)
if self.m_lines_to_drop:
self.rewriteProfile(path, self.m_lines_to_drop)
else:
print("{}: no entries removed".format(path.name.ljust(35)))
def checkDuplicate(self, entry):
seen = self.m_actions_seen.get(entry.action, None)
if not seen:
self.m_actions_seen[entry.action] = entry
else:
if entry.settings == seen.settings:
self.m_lines_to_drop.add(entry.linenr)
print("{}:{}: removing redundant entry with same settings as in line {}".format(
entry.path.name.ljust(35),
str(entry.linenr).rjust(3),
seen.linenr
))
else:
printerr("{}:{}: {}: conflicting duplicate entry ({}), previously seen in line {} ({})".format(
seen.path.name.ljust(35),
str(entry.linenr).rjust(3),
seen.action,
':'.join(entry.settings),
seen.linenr,
':'.join(seen.settings)
))
def rewriteProfile(self, path, lines_to_drop):
lines = []
with open(path) as fd:
for linenr, line in enumerate(fd.readlines(), start = 1):
if linenr not in lines_to_drop:
lines.append(line)
with open(path, 'w') as fd:
fd.write(''.join(lines))
if __name__ == '__main__':
main = DuplicateEntryRemover()
main.run()
|
gpl-2.0
| -189,961,837,190,509,020
| 27.789474
| 123
| 0.516453
| false
| 4.167619
| false
| false
| false
|
gbanegas/HappyClient
|
happy/tests/models/__init__.py
|
1
|
1611
|
# -*- coding: utf-8 -*-
"""Unit test suite for the models of the application."""
from nose.tools import eq_
from happy.model import DBSession
from happy.tests import load_app
from happy.tests import setup_db, teardown_db
__all__ = ['ModelTest']
def setup():
"""Setup test fixture for all model tests."""
load_app()
setup_db()
def teardown():
"""Tear down test fixture for all model tests."""
teardown_db()
class ModelTest(object):
"""Base unit test case for the models."""
klass = None
attrs = {}
def setUp(self):
"""Setup test fixture for each model test method."""
try:
new_attrs = {}
new_attrs.update(self.attrs)
new_attrs.update(self.do_get_dependencies())
self.obj = self.klass(**new_attrs)
DBSession.add(self.obj)
DBSession.flush()
return self.obj
except:
DBSession.rollback()
raise
def tearDown(self):
"""Tear down test fixture for each model test method."""
DBSession.rollback()
def do_get_dependencies(self):
"""Get model test dependencies.
Use this method to pull in other objects that need to be created
for this object to be build properly.
"""
return {}
def test_create_obj(self):
"""Model objects can be created"""
pass
def test_query_obj(self):
"""Model objects can be queried"""
obj = DBSession.query(self.klass).one()
for key, value in self.attrs.items():
eq_(getattr(obj, key), value)
|
apache-2.0
| 1,707,177,911,946,079,000
| 24.171875
| 72
| 0.58473
| false
| 4.152062
| true
| false
| false
|
eepgwde/pyeg0
|
pandas0/ch09/portfolio0.py
|
1
|
2641
|
import matplotlib.pyplot as plt
import pandas as pd
import pandas.io.data as web
from collections import defaultdict
names = ['AAPL', 'GOOG', 'MSFT', 'DELL', 'GS', 'MS', 'BAC', 'C']
def get_px(stock, start, end):
return web.get_data_yahoo(stock, start, end)['Adj Close']
#
px = pd.DataFrame({n: get_px(n, '1/1/2009', '6/1/2012') for n in names})
px = px.asfreq('B').fillna(method='pad')
rets = px.pct_change()
((1 + rets).cumprod() - 1).plot()
# For the portfolio construction, we’ll compute momentum over a
# certain lookback, then rank in descending order and standardize:
def calc_mom(price, lookback, lag):
mom_ret = price.shift(lag).pct_change(lookback)
ranks = mom_ret.rank(axis=1, ascending=False)
demeaned = ranks - ranks.mean(axis=1)
return demeaned / demeaned.std(axis=1)
# With this transform function in hand, we can set up a strategy
# backtesting function that computes a portfolio for a particular
# lookback and holding period (days between trading), returning the
# overall Sharpe ratio
compound = lambda x : (1 + x).prod() - 1
daily_sr = lambda x: x.mean() / x.std()
def strat_sr(prices, lb, hold):
# Compute portfolio weights
freq = '%dB' % hold
port = calc_mom(prices, lb, lag=1)
daily_rets = prices.pct_change()
# Compute portfolio returns
port = port.shift(1).resample(freq, how='first')
returns = daily_rets.resample(freq, how=compound)
port_rets = (port * returns).sum(axis=1)
return daily_sr(port_rets) * np.sqrt(252 / hold)
strat_sr(px, 70, 30)
# From there, you can evaluate the strat_sr function over a grid of
# parameters, storing them as you go in a defaultdict and finally
# putting the results in a DataFrame:
lookbacks = range(20, 90, 5)
holdings = range(20, 90, 5)
dd = defaultdict(dict)
for lb in lookbacks:
for hold in holdings:
dd[lb][hold] = strat_sr(px, lb, hold)
ddf = pd.DataFrame(dd)
ddf.index.name = 'Holding Period'
ddf.columns.name = 'Lookback Period'
# To visualize the results and get an idea of what’s going on, here is
# a function that uses matplotlib to produce a heatmap with some
# adornments:
def heatmap(df, cmap=plt.cm.gray_r):
fig = plt.figure()
ax = fig.add_subplot(111)
axim = ax.imshow(df.values, cmap=cmap, interpolation='nearest')
ax.set_xlabel(df.columns.name)
ax.set_xticks(np.arange(len(df.columns)))
ax.set_xticklabels(list(df.columns))
ax.set_ylabel(df.index.name)
ax.set_yticks(np.arange(len(df.index)))
ax.set_yticklabels(list(df.index))
plt.colorbar(axim)
# Calling this function on the backtest results, we get Figure 11-3:
heatmap(ddf)
|
gpl-3.0
| -4,000,118,988,380,880,000
| 30.392857
| 72
| 0.687903
| false
| 2.989796
| false
| false
| false
|
himaaaatti/qtile
|
libqtile/widget/generic_poll_text.py
|
1
|
2336
|
import json
import six
from six.moves.urllib.request import urlopen, Request
from libqtile.widget import base
from libqtile.log_utils import logger
class GenPollText(base.ThreadedPollText):
"""
A generic text widget that polls using poll function to get the text.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('func', None, 'Poll Function'),
]
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(GenPollText.defaults)
def poll(self):
if not self.func:
return "You need a poll function"
return self.func()
class GenPollUrl(base.ThreadedPollText):
"""
A generic text widget that polls an url and parses it using parse
function.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('url', None, 'Url'),
('data', None, 'Post Data'),
('parse', None, 'Parse Function'),
('json', True, 'Is Json?'),
('user_agent', 'Qtile', 'Set the user agent'),
('headers', {}, 'Extra Headers')
]
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(GenPollUrl.defaults)
def fetch(self, url, data=None, headers={}, is_json=True):
req = Request(url, data, headers)
res = urlopen(req)
if six.PY3:
charset = res.headers.get_content_charset()
else:
charset = res.headers.getparam('charset')
body = res.read()
if charset:
body = body.decode(charset)
if is_json:
body = json.loads(body)
return body
def poll(self):
if not self.parse or not self.url:
return "Invalid config"
data = self.data
headers = {"User-agent": self.user_agent}
if self.json:
headers['Content-Type'] = 'application/json'
if data and not isinstance(data, str):
data = json.dumps(data).encode()
headers.update(self.headers)
body = self.fetch(self.url, data, headers, self.json)
try:
text = self.parse(body)
except Exception:
logger.exception('got exception polling widget')
text = "Can't parse"
return text
|
mit
| -4,587,477,473,095,674,400
| 26.809524
| 77
| 0.577483
| false
| 4.020654
| false
| false
| false
|
didicout/python_util
|
time_util.py
|
1
|
4169
|
# coding=utf8
"""
time util.
"""
__author__ = 'didicout <i@julin.me>'
import time
import datetime
def date_str_2_stamp(date_str, millisecond=False):
if millisecond:
return int(time.mktime(time.strptime(date_str, '%Y-%m-%d'))) * 1000
else:
return int(time.mktime(time.strptime(date_str, '%Y-%m-%d')))
def time_str_2_stamp(time_str, millisecond=False):
if millisecond:
return int(time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S'))) * 1000
else:
return int(time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S')))
def datetime_var_2_stamp(datetime_var, millisecond=False):
if millisecond:
return int(time.mktime(datetime_var.timetuple())) * 1000
else:
return int(time.mktime(datetime_var.timetuple()))
def time_var_2_stamp(time_var, millisecond=False):
time_var = standardize_time(time_var)
if millisecond:
return int(time_var) * 1000
else:
return int(time_var)
def stamp_2_date_str(stamp, millisecond=False):
if millisecond:
stamp /= 1000
return time.strftime('%Y-%m-%d', time.localtime(stamp))
def stamp_2_datetime_str(stamp, millisecond=False):
if millisecond:
stamp /= 1000
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(stamp))
def time_str_2_date_str(time_str):
return stamp_2_date_str(time_str_2_stamp(time_str))
def datetime_2_date_str(datetime_var):
return stamp_2_date_str(datetime_var_2_stamp(datetime_var))
def time_2_date_str(time_var):
return stamp_2_date_str(time_var_2_stamp(time_var))
def time_2_datetime_str(time_var):
return stamp_2_datetime_str(time_var_2_stamp(time_var))
def time_minus_by_str(time_str1, time_str2):
return int(time.mktime(standardize_time(time_str1)) - time.mktime(standardize_time(time_str2)))
def date_range(date_str1, date_str2, step=1):
ret = []
step_seconds = 3600 * 24 * step
for i in range(date_str_2_stamp(date_str1), date_str_2_stamp(date_str2) + 1, step_seconds):
ret.append(stamp_2_date_str(i))
return ret
def get_monday_str(date_str):
datetime_var = datetime.datetime.strptime(date_str, '%Y-%m-%d')
monday = datetime_var - datetime.timedelta(days=datetime_var.weekday())
return datetime_2_date_str(monday)
def get_month_first_day_str(date_str):
datetime_var = datetime.datetime.strptime(date_str, '%Y-%m-%d')
first_day = datetime_var - datetime.timedelta(days=datetime_var.day - 1)
return datetime_2_date_str(first_day)
def get_today_str():
return datetime.date.today().strftime('%Y-%m-%d')
def get_yesterday_str():
return (datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
def get_yesterday_str_by_date_str(date_str):
stamp = date_str_2_stamp(date_str) - 24 * 3600
return stamp_2_date_str(stamp)
def get_tomorrow_str():
return (datetime.date.today() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
def day_minus_by_date_str(date_str1, date_str2):
tmp = date_str1.split('-')
a = datetime.datetime(int(tmp[0]), int(tmp[1]), int(tmp[2]))
tmp = date_str2.split('-')
b = datetime.datetime(int(tmp[0]), int(tmp[1]), int(tmp[2]))
return (a-b).days
def get_stamp_of_week(stamp, millisecond=False):
"""
get the stamp of monday morning of the week
取时间戳stamp所在周周一00:00:00的时间戳
"""
date_str = stamp_2_date_str(stamp, millisecond)
monday_str = get_monday_str(date_str)
return date_str_2_stamp(monday_str, millisecond)
def get_stamp_of_month(stamp, millisecond=False):
"""
get the stamp of the first day morning of the month
取时间戳stamp所在月1号0点的时间戳
"""
date_str = stamp_2_date_str(stamp, millisecond)
first_day = get_month_first_day_str(date_str)
return date_str_2_stamp(first_day, millisecond)
def standardize_time(time_var):
"""
avoid error when time has a time zone.
"""
return time.strptime(datetime.datetime.strftime(time_var, '%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
if __name__ == '__main__':
print get_yesterday_str_by_date_str('2014-05-01')
|
mit
| -4,047,810,320,677,002,000
| 26.797297
| 104
| 0.651106
| false
| 2.880252
| false
| false
| false
|
getsmap/smap4
|
WS/email/sendEmail.py
|
1
|
1254
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import smtplib for the actual sending function
import sys, os
import smtplib
import email
# Import the email modules we'll need
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
def sendEmail(fromEmail="", password=None, toEmails=[], smtp="smtp.gmail.com",\
port=25, msg=""):
server = smtplib.SMTP(smtp, port)
server.ehlo()
if password!=None:
server.starttls()
server.login(fromEmail, password)
server.sendmail(fromEmail, toEmails, msg.as_string())
server.close()
if __name__=='__main__':
pass
"""fromEmail = "noreply@malmo.se" #"johanlahti@gmail.com"
password = None
smtp = "mail2.malmo.se"
port = 25
toEmails = ["johan.lahti@malmo.se"]
subject = "Testar ÅÄÖ åäö"
content = "ÅÄÖ åäö Nu testar jag skicka en länk...\n\n/Johan"
msg = MIMEText(content, "plain", "utf-8")
msg['Subject'] = subject
msg['From'] = fromEmail
msg['To'] = ";".join(toEmails)
sendEmail(fromEmail, password, \
toEmails=toEmails, msg=msg, \
smtp=smtp, port=port)"""
|
apache-2.0
| 6,888,675,901,893,147,000
| 26.577778
| 79
| 0.63336
| false
| 3.223377
| false
| false
| false
|
qbilius/autoart
|
dots/dots.py
|
1
|
1438
|
import numpy as np
from PIL import Image
import scipy.ndimage
import matplotlib.pyplot as plt
def gabor(
theta=0,
gamma=1,
sigma=2,
lam=5.6,
k=10
):
# Mutch and Lowe, 2006
theta -= np.pi/2
x,y = np.meshgrid(np.arange(-k,k),np.arange(-k,k))
X = x*np.cos(theta) - y*np.sin(theta)
Y = x*np.sin(theta) + y*np.cos(theta)
g = np.exp( - (X**2 + (gamma*Y)**2) / (2*sigma**2) ) * np.cos( 2*np.pi*X/lam )
g -= np.mean(g) # mean 0
g /= np.sum(g**2) # energy 1
g[np.abs(g)<.001] = 0
return g
def get_edges(stim, oris, sf=1):
gabor_max = stim
edge_map = np.zeros((len(oris),)+gabor_max.shape)
sf=1
for oi, ori in enumerate(oris):
gab = gabor(theta=ori, sigma=2*sf,lam=5.6*sf,k=10*sf)
edges = scipy.ndimage.correlate(gabor_max,gab)
edge_map[oi] = edges
gabor_max = np.max(edge_map, axis=0)
gabor_argmax = np.argmax(edge_map, axis=0)
return gabor_max, gabor_argmax
im = Image.open('dots_input.png').convert('L')
stim = np.asarray(im)*1.
stim = stim[50:125,50:125]
oris = np.pi/8*np.arange(8)
gabor_max, gabor_argmax = get_edges(stim, oris, sf=1)
hist, bin_edges = np.histogram(gabor_max.ravel(),bins=1)
threshold = bin_edges[-2]
inds = gabor_max>threshold
gabor_max[np.logical_not(inds)] = 0
plt.imshow(gabor_max)
plt.axis('off')
#plt.show()
plt.savefig('dots.jpg', dpi=300, format='jpg',
bbox_inches='tight', pad_inches=0)
|
mit
| -7,737,220,422,988,155,000
| 25.62963
| 82
| 0.609875
| false
| 2.384743
| false
| false
| false
|
keishi/chromium
|
tools/isolate/run_test_cases_smoke_test.py
|
1
|
2333
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import subprocess
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, 'data', 'gtest_fake'))
import gtest_fake
class TraceTestCases(unittest.TestCase):
def setUp(self):
# Make sure there's no environment variable that could do side effects.
os.environ.pop('GTEST_SHARD_INDEX', '')
os.environ.pop('GTEST_TOTAL_SHARDS', '')
def test_simple(self):
target = os.path.join(ROOT_DIR, 'data', 'gtest_fake', 'gtest_fake.py')
cmd = [
sys.executable,
os.path.join(ROOT_DIR, 'run_test_cases.py'),
'--no-dump',
target,
]
logging.debug(' '.join(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# pylint is confused.
out, err = proc.communicate() or ('', '')
self.assertEquals(0, proc.returncode)
if sys.platform == 'win32':
out = out.replace('\r\n', '\n')
lines = out.splitlines()
expected_out_re = [
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
re.escape('Note: Google Test filter = Baz.Fail'),
r'',
] + [
re.escape(l) for l in gtest_fake.get_test_output('Baz.Fail').splitlines()
] + [
'',
] + [
re.escape(l) for l in gtest_fake.get_footer(1).splitlines()
] + [
'',
re.escape('Success: 3 75.00%'),
re.escape('Flaky: 0 0.00%'),
re.escape('Fail: 1 25.00%'),
r'\d+\.\ds Done running 4 tests with 6 executions. \d+\.\d test/s',
]
for index in range(len(expected_out_re)):
line = lines.pop(0)
self.assertTrue(
re.match('^%s$' % expected_out_re[index], line),
(index, expected_out_re[index], repr(line)))
self.assertEquals([], lines)
self.assertEquals('', err)
if __name__ == '__main__':
VERBOSE = '-v' in sys.argv
logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR)
unittest.main()
|
bsd-3-clause
| 5,622,570,705,021,971,000
| 29.697368
| 79
| 0.571367
| false
| 3.139973
| true
| false
| false
|
leethargo/geonet
|
geonet/degeneracy.py
|
1
|
2109
|
'''
Detecting degeneracy and merging zero-length edges.
'''
from geonet.network import SteinerTree, merge_pos
from geonet.geometry import distance
from geonet.constants import abstol
def degenerate_edges(tree, steiner_pos, abstol=abstol):
'''list of edges with (numerically) zero length'''
assert isinstance(tree, SteinerTree)
pos = merge_pos(tree, steiner_pos)
return [(u,v) for (u,v) in tree.get_arcs()
if (tree.is_steiner(u) or tree.is_steiner(v))
and distance(pos[u], pos[v]) <= abstol]
def is_degenerate(tree, steiner_pos, abstol=abstol):
return degenerate_edges(tree, steiner_pos, abstol) != []
def merged(tree, steiner_pos, abstol=abstol):
'''build new tree that merges all degenerate edges.
when merging an edge, the lexicographically smaller node will
survive. returns a tree and a matching dict of steiner node
positions.
'''
degedges = degenerate_edges(tree, steiner_pos, abstol)
# key: removed node, value: remaining node (taking over)
turn_into = {}
for u, v in degedges:
if tree.is_terminal(u) and tree.is_terminal(v):
# don't merge terminals
continue
elif tree.is_terminal(u):
# keep terminals
pass
elif tree.is_terminal(v):
# keep terminals
u, v = v, u
elif v < u:
# keep lexicographically smaller node
u, v = v, u
turn_into[v] = u
# merge nodes into transitive end-point
for v, u in turn_into.iteritems():
while u in turn_into:
u = turn_into[u]
turn_into[v] = u
# build new tree data
new_nodes = [u for u in tree.get_nodes() if u not in turn_into]
new_edges = []
for u, v in tree.get_arcs():
uu, vv = turn_into.get(u, u), turn_into.get(v, v)
if uu != vv: # remove self-loops
new_edges.append((uu, vv))
new_tree = SteinerTree(new_nodes, new_edges, tree.get_terminal_positions())
new_pos = {s:steiner_pos[s] for s in steiner_pos if s in new_nodes}
return new_tree, new_pos
|
mit
| 4,646,769,807,229,963,000
| 30.954545
| 79
| 0.614983
| false
| 3.234663
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/data/_errordata.py
|
1
|
2335
|
import datetime
import os.path
import subprocess
import time
import xml.etree.ElementTree
import dsz
import ops
XALAN = os.path.join(ops.RESDIR, 'ExternalLibraries', 'java-j2se_1.6-sun', 'xalan.jar')
STYLESHEET = os.path.join(ops.DATA, 'DszErrorExtractor.xsl')
class DszCommandError(list, ):
def __init__(self, timestamp, cmdid):
self.timestamp = timestamp
self.__cmdid = cmdid
list.__init__(self)
def __str__(self):
msg = ('Error running command %d: %s\n' % (self.__cmdid, dsz.cmd.data.Get('commandmetadata::fullcommand', dsz.TYPE_STRING, cmdId=self.__cmdid)[0]))
if len(self):
for i in self:
msg += (' - %s' % i)
else:
msg += ' - No additional information available. Try viewing the logs.'
return msg
class DszCommandErrorData(object, ):
def __init__(self, type, text, timestamp):
self.type = type
self.text = text
self.timestamp = timestamp
def __str__(self):
return ('%s: %s' % (self.type, self.text))
def getLastError():
return getErrorFromCommandId(cmdid=dsz.cmd.LastId())
def getErrorFromCommandId(cmdid):
if (cmdid < 1):
return []
dataDir = os.path.join(ops.LOGDIR, 'Data')
files = []
for file in os.listdir(dataDir):
fullpath = os.path.join(dataDir, file)
if (not os.path.isfile(fullpath)):
continue
try:
if (int(file.split('-', 1)[0]) == cmdid):
files.append(fullpath)
except ValueError:
pass
errorSets = []
for file in files:
errorSets.append(_parseXML(file, cmdid))
return errorSets
def _parseXML(fullpath, cmdid):
xsltoutput = subprocess.Popen(['javaw', '-jar', XALAN, '-in', fullpath, '-xsl', STYLESHEET], stdout=subprocess.PIPE).communicate()[0]
tree = xml.etree.ElementTree.fromstring(xsltoutput)
if (not tree.get('timestamp')):
return DszCommandError(timestamp='', data=[], cmdid=cmdid)
timestamp = datetime.datetime(*time.strptime(tree.get('timestamp'), '%Y-%m-%dT%H:%M:%S')[0:6])
errors = DszCommandError(timestamp=timestamp, cmdid=cmdid)
for error in tree:
errors.append(DszCommandErrorData(type=error.get('type'), text=unicode(error.text, 'utf_8'), timestamp=timestamp))
return errors
|
unlicense
| -5,100,924,808,575,776,000
| 32.855072
| 155
| 0.6197
| false
| 3.464392
| false
| false
| false
|
AustereCuriosity/astropy
|
astropy/coordinates/builtin_frames/__init__.py
|
1
|
5052
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames actually implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the trasnformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from .baseradec import BaseRADecFrame
from .icrs import ICRS
from .fk5 import FK5
from .fk4 import FK4, FK4NoETerms
from .galactic import Galactic
from .galactocentric import Galactocentric
from .lsr import LSR, GalacticLSR
from .supergalactic import Supergalactic
from .altaz import AltAz
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .hcrs import HCRS
from .ecliptic import (GeocentricTrueEcliptic, BarycentricTrueEcliptic,
HeliocentricTrueEcliptic, BaseEclipticFrame)
from .skyoffset import SkyOffsetFrame
# need to import transformations so that they get registered in the graph
from . import icrs_fk5_transforms
from . import fk4_fk5_transforms
from . import galactic_transforms
from . import supergalactic_transforms
from . import icrs_cirs_transforms
from . import cirs_observed_transforms
from . import intermediate_rotation_transforms
from . import ecliptic_transforms
# we define an __all__ because otherwise the transformation modules get included
__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric',
'Supergalactic', 'AltAz', 'GCRS', 'CIRS', 'ITRS', 'HCRS',
'PrecessedGeocentric', 'GeocentricTrueEcliptic',
'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic',
'SkyOffsetFrame', 'GalacticLSR', 'LSR',
'BaseEclipticFrame', 'BaseRADecFrame']
def _make_transform_graph_docs():
"""
Generates a string for use with the coordinate package's docstring
to show the available transforms and coordinate systems
"""
import inspect
from textwrap import dedent
from ...extern import six
from ..baseframe import BaseCoordinateFrame, frame_transform_graph
isclass = inspect.isclass
coosys = [item for item in six.itervalues(globals())
if isclass(item) and issubclass(item, BaseCoordinateFrame)]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = frame_transform_graph.to_dot_graph(addnodes=coosys,
priorities=False)
docstr = """
The diagram below shows all of the coordinate systems built into the
`~astropy.coordinates` package, their aliases (useful for converting
other coordinates to them using attribute-style access) and the
pre-defined transformations between them. The user is free to
override any of these transformations by defining new transformations
between these systems, but the pre-defined transformations should be
sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. graphviz::
"""
docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ')
# colors are in dictionary at the bottom of transformations.py
from ..transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = u"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{0}:</b>
<span style="font-size: 24px; color: {1};"><b>➝</b></span>
</p>
</li>
""".format(cls.__name__, color)
html_list_items.append(block)
graph_legend = u"""
.. raw:: html
<ul>
{}
</ul>
""".format("\n".join(html_list_items))
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = _make_transform_graph_docs()
|
bsd-3-clause
| 1,821,318,041,240,630,800
| 38.76378
| 118
| 0.70396
| false
| 3.899614
| false
| false
| false
|
RedBulli/CourseDeadlines
|
CourseDeadlines/settings.py
|
1
|
6055
|
# Django settings for CourseDeadlines project.
import os
settings_dir = os.path.dirname(__file__)
project_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0], os.path.pardir)
DEBUG = True
TASTYPIE_FULL_DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Helsinki'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
project_dir + '/static/',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fb4)u@=p!d4py3eqh_2bm%^f(d4!u5!$1rex(9e%6u%u8(xo_!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'CourseDeadlines.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'CourseDeadlines.wsgi.application'
TEMPLATE_DIRS = (
project_dir + '/templates'
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
AUTHENTICATION_BACKENDS = (
'django_openid_auth.auth.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
OPENID_CREATE_USERS = True
OPENID_UPDATE_DETAILS_FROM_SREG = True
LOGIN_URL = '/openid/login/'
LOGIN_REDIRECT_URL = '/'
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id'
OPENID_USE_EMAIL_FOR_USERNAME = True
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_openid_auth',
'tastypie',
'templatetag_handlebars',
'Course',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
mit
| 1,575,287,465,834,630,700
| 33.016854
| 127
| 0.688852
| false
| 3.623579
| false
| false
| false
|
nict-isp/scn-openflow-driver
|
src/ncps_openflow/scn/plugins/middleware/interface.py
|
1
|
6895
|
# -*- coding: utf-8 -*-
"""
scn.plugins.middleware.interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright (c) 2015, National Institute of Information and Communications Technology.All rights reserved.
:license: GPL3, see LICENSE for more details.
"""
import json
from pox.core import core
from pox.lib.addresses import IPAddr
from pox.lib.revent import EventMixin
from events import (CmdResp,
InitializeReq, InitializeResp, CreateBiPathReq, CreateBiPathResp,
DeleteBiPathReq, DeleteBiPathResp, UpdatePathReq, UpdatePathResp,
OptimizeReq, OptimizeResp, PushReq, HeartBeatReq, DumpReq, DumpResp)
from utils.widgets import Transport, Peer
from utils.connection import MWTcpServer, MWUdpServer, MWTcpClient
log = core.getLogger()
def send_tcp_payload(dst_peer, payload):
"""send TCP message I/F using MWTcpClient(original TCP client for SCN).
dst_peer need for getting application port.
"""
# get registed node.
node = core.topology.getHost(dst_peer.ipaddr)
ofp = node.ofp
switch = ofp.ofs
dpid = switch.dpid
port = ofp.number
src_mac = ofp.hwAddr
src_ip = ofp.ipAddr
src = (src_mac, src_ip)
dst_mac = node.macAddr
dst_ip = dst_peer.ipaddr
dst_port = dst_peer.port
dst = (dst_mac, dst_ip, dst_port)
log.info("request : dpid=%s, port=%s,src=%s, dst=%s" % (dpid, port, src, dst))
log.debug("payload : %s" % str(payload))
tcp_client = MWTcpClient(dpid, port, src, dst, payload)
core.protocols.addClient(tcp_client)
tcp_client.start()
class Interface(EventMixin):
"""request and response I/F from/to node(SCN) or Switch(OFC)
"""
_eventMixin_events = [
CmdResp,
InitializeReq,
CreateBiPathReq,
UpdatePathReq,
DeleteBiPathReq,
OptimizeReq,
HeartBeatReq,
DumpReq
]
supported = {
Peer.TCP : send_tcp_payload
}
def __init__(self):
EventMixin.__init__(self)
udp_server = MWUdpServer(self.process_command, Transport.LPORT)
core.protocols.addServer(udp_server)
tcp_server = MWTcpServer(self.process_command, Transport.LPORT)
core.protocols.addServer(tcp_server,
needSend=True)
core.middleware.listenTo(self)
self.register_event_handler()
# register decode class for input (request/response) message.
# 本OFCサーバへの入力メッセージをデコードするクラスを登録する
# (入力メッセージボディのNAMEプロパティから,対応するクラスメソッドが呼びだされる)
self.decode_classes = {
# JSON CMD Name : called Class
InitializeReq.NAME : InitializeReq,
CreateBiPathReq.NAME : CreateBiPathReq,
UpdatePathReq.NAME : UpdatePathReq,
DeleteBiPathReq.NAME : DeleteBiPathReq,
OptimizeReq.NAME : OptimizeReq,
HeartBeatReq.NAME : HeartBeatReq,
DumpReq.NAME : DumpReq
}
def register_event_handler(self):
"""register handler for event raised middlewar.py
request handler is for innter domain request.
"""
for req in [InitializeReq, CreateBiPathReq, UpdatePathReq, \
DeleteBiPathReq, OptimizeReq, HeartBeatReq, DumpReq]:
core.middleware.addListenerByName(req.__name__, self.handle_request)
for resp in [InitializeResp, CreateBiPathResp, UpdatePathResp, \
DeleteBiPathResp, OptimizeResp, DumpResp, PushReq, CmdResp]:
core.middleware.addListenerByName(resp.__name__, self.handle_response)
def process_command(self, node, data):
"""input handler.
call when MWTcpServer receive payload.
@param [ScnOpenFlowHost] node input src node
@param [string] data JSON format
"""
log.debug('process_command = [%s]' % repr(data))
event = self.decode_json(data)
if not node:
# if not node -> create registerd node instance from listen_peer
node = core.topology.getHost(IPAddr(event.dst_peer.ipaddr))
self.raiseEvent(event, node)
def decode_json(self, data):
"""decode json protocol cmd.
use reigisted class(self.decode_class)
"""
try:
kwargs = json.loads(data)
kwargs['buf'] = data
cls = self.decode_classes.get(kwargs['NAME'])
if not cls:
log.warn('Unknown Command Type')
return CmdResp()
decoded = cls.from_dict(kwargs)
if not decoded:
log.warn('No Data ? Class=%s' % str(cls))
return CmdResp()
except (TypeError, ValueError) as inst:
log.exception(inst)
log.error("Could not decode json : [%s]" % str(data))
return CmdResp()
log.info('\n--\n%s command received\n%s\n--\n' % (decoded.NAME, repr(data)))
return decoded
# ========= Handler from OFC Server(middlewar.py) raise (request/response) message =========== #
def handle_response(self, resp):
""" handler to send response.
"""
log.info("send response to node :%s" % str(resp))
self.__send_data__(resp, resp.dst_peer)
def handle_request(self, req):
""" handler to send resquest.
"""
log.info("send request to other OFC :%s" % str(req))
self.__send_data__(req, req.dst_peer)
def __send_data__(self, send_cls, dst_peer):
"""send data.
check protocol and convert data.
do send method.
"""
if not dst_peer:
log.warning('Peer is none. It might be a static service with no listen peer...')
return
if not self.__check_supported_protocol__(dst_peer.protocol):
log.warn("not supported protocol.%s" % str(dst_peer.protocol))
return
payload = None
try:
payload = send_cls.to_json() + MWTcpServer.DELIMITER
except (TypeError, ValueError) as inst:
log.exception(inst)
if not payload:
log.warn("no payload")
return
log.info('\n--\n%s: %s to\n%s\n--\n' % (send_cls.NAME, repr(payload), dst_peer))
self.__get_request_method__(dst_peer.protocol)(dst_peer, payload)
def __check_supported_protocol__(self, protocol):
"""check that protocol can use ?
"""
return self.supported.has_key(protocol)
def __get_request_method__(self, protocol):
"""get sender method.
"""
return self.supported[protocol]
def launch(**kwargs):
"""middlewareCmds launch
**kwargs is need for option args.
see __init__.py also.
"""
log.debug(kwargs)
Interface()
|
gpl-3.0
| -4,565,760,055,426,813,400
| 32.009756
| 116
| 0.598493
| false
| 3.66974
| false
| false
| false
|
yeming233/rally
|
rally/plugins/openstack/context/quotas/quotas.py
|
1
|
4765
|
# Copyright 2014: Dassault Systemes
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import logging
from rally.common import validation
from rally import consts
from rally import osclients
from rally.plugins.openstack.context.quotas import cinder_quotas
from rally.plugins.openstack.context.quotas import designate_quotas
from rally.plugins.openstack.context.quotas import manila_quotas
from rally.plugins.openstack.context.quotas import neutron_quotas
from rally.plugins.openstack.context.quotas import nova_quotas
from rally.task import context
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", admin=True)
@context.configure(name="quotas", platform="openstack", order=300)
class Quotas(context.Context):
"""Sets OpenStack Tenants quotas."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA,
"cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA,
"manila": manila_quotas.ManilaQuotas.QUOTAS_SCHEMA,
"designate": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA,
"neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA
}
}
def __init__(self, ctx):
super(Quotas, self).__init__(ctx)
self.clients = osclients.Clients(
self.context["admin"]["credential"],
api_info=self.context["config"].get("api_versions"))
self.manager = {
"nova": nova_quotas.NovaQuotas(self.clients),
"cinder": cinder_quotas.CinderQuotas(self.clients),
"manila": manila_quotas.ManilaQuotas(self.clients),
"designate": designate_quotas.DesignateQuotas(self.clients),
"neutron": neutron_quotas.NeutronQuotas(self.clients)
}
self.original_quotas = []
def _service_has_quotas(self, service):
return len(self.config.get(service, {})) > 0
@logging.log_task_wrapper(LOG.info, _("Enter context: `quotas`"))
def setup(self):
for tenant_id in self.context["tenants"]:
for service in self.manager:
if self._service_has_quotas(service):
# NOTE(andreykurilin): in case of existing users it is
# required to restore original quotas instead of reset
# to default ones.
if "existing_users" in self.context:
self.original_quotas.append(
(service, tenant_id,
self.manager[service].get(tenant_id)))
self.manager[service].update(tenant_id,
**self.config[service])
def _restore_quotas(self):
for service, tenant_id, quotas in self.original_quotas:
try:
self.manager[service].update(tenant_id, **quotas)
except Exception as e:
LOG.warning("Failed to restore quotas for tenant %(tenant_id)s"
" in service %(service)s \n reason: %(exc)s" %
{"tenant_id": tenant_id, "service": service,
"exc": e})
def _delete_quotas(self):
for service in self.manager:
if self._service_has_quotas(service):
for tenant_id in self.context["tenants"]:
try:
self.manager[service].delete(tenant_id)
except Exception as e:
LOG.warning("Failed to remove quotas for tenant "
"%(tenant_id)s in service %(service)s "
"\n reason: %(exc)s"
% {"tenant_id": tenant_id,
"service": service, "exc": e})
@logging.log_task_wrapper(LOG.info, _("Exit context: `quotas`"))
def cleanup(self):
if self.original_quotas:
# existing users
self._restore_quotas()
else:
self._delete_quotas()
|
apache-2.0
| 7,832,287,058,500,353,000
| 41.544643
| 79
| 0.590766
| false
| 4.213086
| false
| false
| false
|
akkana/scripts
|
wpnet.py
|
1
|
17033
|
#!/usr/bin/env python3
# A wrapper script to make it easier to use wpa_cli to connect.
# https://wiki.archlinux.org/index.php/WPA_supplicant#Connecting_with_wpa_cli
# was very helpful.
#
# For extending this to eth0, browse /etc/dhcpcd.conf
# and /usr/share/dhcpcd/hooks/10-wpa_supplicant on raspbian,
# where dhcpcd is the master and is in charge of stopping
# and starting wpa_supplicant.
#
# Copyright 2018 by Akkana Peck: share and enjoy under the GPLv2 or later.
import subprocess
import os, sys
import argparse
import getpass
import urllib.request
import time
verbose = False
"""
To run this as a normal user, not under sudo:
edit /etc/wpa_supplicant/wpa_supplicant.conf
and add a line like:
ctrl_interface_group=adm
using whatever group you think should have network permissions.
Commands this script runs:
** Get the wireless interface:
iw dev
** Start the daemon:
wpa_supplicant -B -i $iface -c /etc/wpa_supplicant/wpa_supplicant.conf
** List known networks:
wpa_cli list_networks
** List available networks:
wpa_cli scan
wpa_cli scan_results
** Define a new SSID:
wpa_cli add_network
(prints new $ID. Then:)
NOT : wpa_cli set_network $ID
** Connect to a new open SSID:
wpa_cli set_network $ID ssid $SSID key_mgmt NONE
** Connect to a new WPA SSID:
wpa_cli set_network $ID ssid $SSID psk $PASSWORD
wpa_cli enable_network $ID
wpa_cli save_config
WORKED:
wpa_supplicant -B -i wlp2s0 -c /etc/wpa_supplicant/wpa_supplicant.conf
wpa_cli list_networks
wpa_cli scan
wpa_cli scan_results
wpa_cli add_network
wpa_cli set_network 1 (this gave an error, I think)
wpa_cli set_network 1 ssid '"LAC-Public Library"'
wpa_cli set_network 1 key_mgmt NONE
(idiot bash lost this command, probably enable?)
wpa_cli save_config
dhclient -v wlp2s0
"""
def run_as_root(cmdargs):
"""Run cmdargs inside sudo, unless we're already root.
return (stdout, stderr) as strings.
"""
if os.getpid() != 0:
cmdargs = ["sudo"] + cmdargs
if verbose:
print("\n** Run:", ' '.join(cmdargs))
proc = subprocess.Popen(cmdargs, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# proc.communicate() returns bytes, so change them to strings:
return ( b.decode() for b in proc.communicate() )
def run_cmd(cmdargs):
"""Run and return (stdout, stderr) as strings.
"""
if verbose:
print("\n** Run:", ' '.join(cmdargs))
proc = subprocess.Popen(cmdargs, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# proc.communicate() returns bytes, so change them to strings:
return ( b.decode() for b in proc.communicate() )
def start_wpa_supplicant(iface):
# https://wiki.archlinux.org/index.php/WPA_supplicant
if is_wpa_running():
if verbose:
print("wpa_supplicant is already running")
return
args = ['sudo', 'wpa_supplicant', '-B', '-i', iface,
'-c', '/etc/wpa_supplicant/wpa_supplicant.conf']
if verbose:
print("Starting wpa_supplicant:", ' '.join(args), end='')
subprocess.call(args)
time.sleep(5)
def is_wpa_running():
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
args = open(os.path.join('/proc', pid, 'cmdline'),
'rb').read().decode().split('\0')
if args[0] == 'wpa_supplicant':
return True
except IOError: # proc has already terminated
continue
return False
def start_dhcp(iface):
if verbose:
print("Starting dhcp")
# Can't use run_cmd here because the output takes time
# and the usr might want to see it, especially if it fails.
return subprocess.call(['sudo', 'dhclient', '-v', iface])
def get_available_accesspoints(iface):
aps = {}
start_wpa_supplicant(iface)
run_cmd(["wpa_cli", "scan"])
out, err = run_cmd(["wpa_cli", "scan_results"])
stdout_lines = out.split('\n')
for line in stdout_lines:
if not line or line.startswith('Selected') \
or line.startswith('bssid /'):
continue
words = line.strip().split(maxsplit=4)
# Get the ssid if it's not hidden, else use the MAC
if len(words) == 4:
ssid = '[%s]' % words[0]
else:
ssid = words[4]
aps[ssid] = { 'MAC': words[0],
'flags': words[3],
'signal': int(words[2]),
}
return aps
def get_current():
"""
<iridum>- sudo wpa_cli list_networks
Selected interface 'wlp2s0'
network id / ssid / bssid / flags
0 clink any
1 LAC-Public Library any [CURRENT]
2 CommunityLab any [DISABLED]
3 COAFreeWireless any
4 LAC-Public Library any
"""
start_wpa_supplicant(iface)
networks = {}
out, err = run_cmd(["wpa_cli", "list_networks"])
stdout_lines = out.split('\n')
for line in stdout_lines:
line = line.strip()
if line.endswith('[CURRENT]'):
words = line.split('\t')
return words[1]
return None
def get_known_networks():
start_wpa_supplicant(iface)
networks = {}
out, err = run_cmd(["wpa_cli", "list_networks"])
stdout_lines = out.split('\n')
for line in stdout_lines:
line = line.strip()
if not line:
continue
words = line.split('\t')
if words[0].isdigit():
networks[int(words[0])] = words[1]
return networks
def match_ssid(pat, ssids):
for net in ssids:
if pat in net:
return net
return None
def get_wireless_ifaces():
# For a list of all devices, ls /sys/class/net
ifaces = []
# Get a list of wireless interfaces.
# iwconfig lists wireless interfaces on stdout, wired and lo on stderr.
proc = subprocess.Popen(["iw", "dev"], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_lines = proc.communicate()[0].decode().split('\n')
for line in stdout_lines:
line = line.strip()
if line.startswith('Interface'):
ifaces.append(line.split()[1])
# could get MAC and ssid if appropriate
return ifaces
def show_browser_if_redirect():
"""Try to fetch a test URL. If we're redirected to some other URL
(probably a stupid login page), pop up a browser.
"""
# Alas, there's no universal page everyone can use.
# So make one on your own website, or find a trusted page,
# and put that URL in ~/.config/netscheme/testurl
testurl = None
testurlfile = os.path.expanduser("~/.config/netscheme/testurl")
if not os.path.exists(testurlfile):
print("No test URL file; not testing connection")
return
with open(testurlfile) as tufile:
testurl = tufile.read().strip()
with open(testurlfile + ".out") as tufile:
content_from_file = tufile.read()
if verbose and not testurl:
print("No test URL set; not checking for redirects")
return
content_from_web = ''
print("Trying to fetch test URL", testurl)
try:
response = urllib.request.urlopen(testurl, timeout=100)
# Were we redirected? In theory response.geturl() will tell us that,
# but in practice, it doesn't, so we have to fetch the content
# of a page and compare it to the expected value.
content_from_web = response.read().decode('utf-8')
# Lots of ways this can fail.
# e.g. ValueError, "unknown url type"
# or BadStatusLine: ''
except Exception as e:
print("Couldn't fetch test URL %s: probably redirected." % testurl, e)
content_from_web = ''
if content_from_web == content_from_file:
print("Looks like we're really connected -- no redirect")
return
print("Couldn't make a test connection -- probably redirected.")
# Don't want to run a browser as root, so figure out if we're root
# and if so, de-escalate privilege.
# os.getuid(), os.geteuid() and psutil.uids() are all zero under sudo,
# but sudo helpfully sets an env variable we can use.
orig_uid = os.getenv("SUDO_UID")
if orig_uid:
print("De-escalating back to UID", orig_uid)
orig_uid = int(orig_uid)
os.setuid(orig_uid)
print("Calling quickbrowse", testurl)
try:
subprocess.call(["quickbrowse", testurl])
except Exception as e:
print("Problem starting a browser", e)
raise e
def show_available_networks():
accesspoints = get_available_accesspoints(iface)
aps = accesspoints.keys()
known_nets = get_known_networks()
# Print the ones we have saved already:
format = "%-20s %4s %7s %s"
print(format % ("SSID", "#", "Signal", "Encryption"))
print(format % ("----", "--", "------", "----------"))
known = []
for i in sorted(known_nets):
if known_nets[i] in aps:
print(format % (known_nets[i],
i,
accesspoints[known_nets[i]]['signal'],
accesspoints[known_nets[i]]['flags']))
known.append(known_nets[i])
'''
Sample flags:
SSID Signal # Encryption
---- ------ -- ----------
LAC-Wireless -86 [WPA2-EAP-CCMP][ESS]
Historical -84 [WPA-PSK-TKIP][WPA2-PSK-CCMP+TKIP][ESS]
LAC PUBLIC -85 [ESS]
Public-LAC -90 [ESS]
NMC-Main -79 [WPA2-PSK-CCMP][ESS]
<iridum>- wpa_cli scan_results ~
Selected interface 'wlp2s0'
bssid / frequency / signal level / flags / ssid
58:bf:ea:92:ba:c0 2437 -48 [WPA2-EAP-CCMP][ESS] LAC-Wireless
6c:70:9f:de:4d:7c 2462 -84 [WPA-PSK-TKIP][WPA2-PSK-CCMP+TKIP][ESS]Historical
58:bf:ea:92:ba:c2 2437 -56 [ESS] LAC PUBLIC
24:01:c7:3a:91:b0 2462 -64 [ESS] Public-LAC
Selected interface 'wlp2s0'
https://askubuntu.com/questions/541704/how-can-one-use-wpa-cli-to-connect-to-a-wpa-network-without-a-password
> scan
OK
CTRL-EVENT-SCAN-RESULTS
> scan_results
bssid / frequency / signal level / flags / ssid
f8:d1:11:23:c2:2f 2412 76 [ESS] BAYINET
f8:d1:11:23:c1:e9 2412 47 [ESS] BAYINET
> add_network
0
> set_network 0 ssid "Public-LAC"
OK
> set_network 0 key_mgmt NONE
OK
> enable_network 0
OK
CTRL-EVENT-SCAN-RESULTS
Trying to associate with f8:d1:11:23:c2:2f (SSID='BAYINET' freq=2412 MHz)
Association request to the driver failed
Associated with f8:d1:11:23:c2:2f
CTRL-EVENT-CONNECTED - Connection to f8:d1:11:23:c2:2f completed (auth) [id=1 id_str=]
> quit
'''
# Print the ones we don't know:
print()
for ap in aps:
if ap not in known:
print(format % (ap,
'',
accesspoints[ap]['signal'],
accesspoints[ap]['flags']))
def connect_to(to_ap):
if verbose:
print("Connecting to", to_ap)
accesspoints = get_available_accesspoints(iface)
aps = list(accesspoints.keys())
known_nets = get_known_networks()
known = [ known_nets[i] for i in known_nets ]
known_index = None
if to_ap not in aps:
# But maybe it's a number for a known network?
if to_ap.isdigit():
known_index = int(to_ap)
if known_index not in known_nets:
print("No network %d known" % known_index)
sys.exit(1)
to_ap = known_nets[known_index]
if to_ap not in aps:
print("Network %d, '%s', not visible" % (known_index,
to_ap))
sys.exit(1)
else:
matched = match_ssid(to_ap, accesspoints.keys())
if not matched:
print("'%s' isn't visible" % to_ap)
sys.exit(1)
to_ap = matched
print("Matched:", matched)
# Now to_ap is an SSID that's known.
if to_ap in known:
if verbose:
print("Great, we see", to_ap, "and we know it already")
if known_index is None:
for i in known_nets:
if known_nets[i] == to_ap:
known_index = i
break
if known_index is None:
print("Internal error, lost track of SSID %s" % to_ap)
if verbose:
print("Enabling network", to_ap)
run_cmd(["wpa_cli", "enable_network", str(known_index)])
if start_dhcp(iface):
print("DHCP failed")
else:
show_browser_if_redirect()
sys.exit(0)
# New network, hasn't been stored yet. But it is seen.
if verbose:
print(to_ap, "must be a new network")
thisap = accesspoints[to_ap]
out, err = run_cmd(["wpa_cli", "add_network"])
# The last (second) line of the output is the new network number.
# But split('\n') gives a bogus empty final line.
# To be safer, try iterating to find a line that's just a single number.
lines = out.split('\n')
netnum_str = None
for line in lines:
if not line:
continue
words = line.split()
if len(words) == 1 and words[0].isdigit():
netnum_str = words[0]
break
if not netnum_str:
print("Unexpected output from wpa_cli add_network:")
print(out)
print("---")
sys.exit(1)
if verbose:
print("new netnum:", netnum_str)
def check_fail(out, err, errmsg=None):
if 'FAIL' in out or 'FAIL' in err:
if errmsg:
print("Error:", errmsg)
if out:
print("==== FAIL: out")
print(out)
if err:
print("==== FAIL: err")
print(err)
sys.exit(1)
if out or err:
print("SUCCESS:")
if out:
print(out)
if err:
print(err)
out, err = run_cmd(["wpa_cli", "set_network", netnum_str, "ssid",
'"%s"' % to_ap])
check_fail(out, err, "Set network")
if 'WPA' in thisap['flags'] or 'PSK' in thisap['flags']:
password = getpass.getpass("Password: ")
out, err = run_cmd(["wpa_cli", "set_network", netnum_str,
"psk", '"%s"' % password])
check_fail(out, err, "Set password")
else:
if verbose:
print("Trying to connect to %s with no password" % to_ap)
out, err = run_cmd(["wpa_cli", "set_network", netnum_str,
"key_mgmt", "NONE"])
check_fail(out, err, "Set key management")
if verbose:
print("Waiting a little ...", end='')
time.sleep(5)
if verbose:
print()
if verbose:
print("Enabling network", netnum_str)
out, err = run_cmd(["wpa_cli", "enable_network", netnum_str])
check_fail(out, err, "Enable network")
if verbose:
print("Waiting a little ...", end='')
time.sleep(5)
if verbose:
print()
if verbose:
print("Saving configuration")
out, err = run_cmd(["wpa_cli", "save_config"])
check_fail(out, err, "Save configuration")
if verbose:
print(out, err, "Saved configuration")
start_dhcp(iface)
show_browser_if_redirect()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-k', "--known", dest="known",
action="store_true", help="List known networks")
parser.add_argument('-a', "--available", dest="available",
action="store_true", help="Show available accesspoints")
parser.add_argument('connect_to', nargs='?',
help="The essid or numeric specifier to connect to")
args = parser.parse_args(sys.argv[1:])
ifaces = get_wireless_ifaces()
if not ifaces:
print("No wireless interface, sorry")
sys.exit(1)
if len(ifaces) > 1:
print("Multiple wireless interfaces:", ' '.join(get_wireless_ifaces()))
print("Using", ifaces[0])
iface = ifaces[0]
if not iface:
print("No interface!")
sys.exit(1)
if verbose:
print("Interface:", iface)
if args.available:
show_available_networks()
sys.exit(0)
if args.known:
known_nets = get_known_networks()
for i in sorted(known_nets.keys()):
print('%3d: %s' % (i, known_nets[i]))
sys.exit(0)
# If no flags specified, then we should have one arg,
# either a numeric specifier or an essid.
if not args.connect_to:
current = get_current()
if current:
print("Connected to", current)
else:
print("Not connected")
sys.exit(0)
connect_to(args.connect_to)
|
gpl-2.0
| 2,445,823,374,214,298,000
| 29.634892
| 109
| 0.573358
| false
| 3.500411
| true
| false
| false
|
peterwilletts24/Monsoon-Python-Scripts
|
geopotential/plot_geopotential_diff_from_global.py
|
1
|
16864
|
"""
Load mean geopotential heights and plot in colour
"""
import os, sys
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from mpl_toolkits.basemap import Basemap
import iris
import numpy as np
import imp
import h5py
import cartopy.crs as ccrs
import scipy.interpolate
from textwrap import wrap
model_name_convert_title = imp.load_source('util', '/home/pwille/python_scripts/model_name_convert_title.py')
def main():
def unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat):
"""
Convert rotated-pole lons and lats to unrotated ones.
Example::
lons, lats = unrotate_pole(grid_lons, grid_lats, pole_lon, pole_lat)
.. note:: Uses proj.4 to perform the conversion.
"""
src_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon,
pole_latitude=pole_lat)
target_proj = ccrs.Geodetic()
res = target_proj.transform_points(x=rotated_lons, y=rotated_lats,
src_crs=src_proj)
unrotated_lon = res[..., 0]
unrotated_lat = res[..., 1]
return unrotated_lon, unrotated_lat
# Set rotated pole longitude and latitude, not ideal but easier than trying to find how to get iris to tell me what it is.
plot_levels = [925, 850, 700, 500]
#plot_levels = [925]
experiment_id = 'djznw'
p_levels = [1000, 950, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20, 10]
expmin1 = experiment_id[:-1]
plot_type='mean'
# for pl in plot_diags:
plot_diag='temp'
fname_h = '/projects/cascade/pwille/temp/408_pressure_levels_interp_pressure_%s_%s' % (experiment_id, plot_type)
fname_d = '/projects/cascade/pwille/temp/%s_pressure_levels_interp_%s_%s' % (plot_diag, experiment_id, plot_type)
print fname_h
print fname_d
# Height data file
with h5py.File(fname_h, 'r') as i:
mh = i['%s' % plot_type]
mean_heights = mh[. . .]
print mean_heights.shape
with h5py.File(fname_d, 'r') as i:
mh = i['%s' % plot_type]
mean_var = mh[. . .]
print mean_var.shape
#lon_low= 60
#lon_high = 105
#lat_low = -10
#lat_high = 30
f_oro = '/projects/cascade/pwille/moose_retrievals/%s/%s/33.pp' % (expmin1, experiment_id)
oro = iris.load_cube(f_oro)
print oro
for i, coord in enumerate (oro.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_oro = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_oro = i
fu = '/projects/cascade/pwille/moose_retrievals/%s/%s/30201_mean.pp' % (expmin1, experiment_id)
u_wind,v_wind = iris.load(fu)
# Wind may have different number of grid points so need to do this twice
lat_w = u_wind.coord('grid_latitude').points
lon_w = u_wind.coord('grid_longitude').points
p_levs = u_wind.coord('pressure').points
lat = oro.coord('grid_latitude').points
lon = oro.coord('grid_longitude').points
cs_w = u_wind.coord_system('CoordSystem')
cs = oro.coord_system('CoordSystem')
if isinstance(cs_w, iris.coord_systems.RotatedGeogCS):
print ' Wind - %s - Unrotate pole %s' % (experiment_id, cs_w)
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
csur_w=cs_w.ellipsoid
for i, coord in enumerate (u_wind.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_uwind = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_uwind = i
u_wind.remove_coord('grid_latitude')
u_wind.remove_coord('grid_longitude')
u_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w),lat_dim_coord_uwind )
u_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w), lon_dim_coord_uwind)
v_wind.remove_coord('grid_latitude')
v_wind.remove_coord('grid_longitude')
v_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w), lat_dim_coord_uwind)
v_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w),lon_dim_coord_uwind )
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' 33.pp - %s - Unrotate pole %s' % (experiment_id, cs)
lons, lats = np.meshgrid(lon, lat)
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lons,lats = unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_corner_u,lat_corner_u = unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
#lon_highu,lat_highu = unrotate_pole(lon_high, lat_high, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
for i, coord in enumerate (oro.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_oro = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_oro = i
csur=cs.ellipsoid
oro.remove_coord('grid_latitude')
oro.remove_coord('grid_longitude')
oro.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_oro)
oro.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_oro)
else:
lons, lats = np.meshgrid(lon, lat)
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
######## Regrid to global, and difference #######
############################################################################
## Heights
f_glob_h = '/projects/cascade/pwille/temp/408_pressure_levels_interp_pressure_djznw_%s' % (plot_type)
f_glob_d = '/projects/cascade/pwille/temp/%s_pressure_levels_interp_djznw_%s' % (plot_diag, plot_type)
with h5py.File(f_glob_h, 'r') as i:
mh = i['%s' % plot_type]
mean_heights_global = mh[. . .]
with h5py.File(f_glob_d, 'r') as i:
mh = i['%s' % plot_type]
mean_var_global = mh[. . .]
# Wind
fw_global = '/projects/cascade/pwille/moose_retrievals/djzn/djznw/30201_mean.pp'
fo_global = '/projects/cascade/pwille/moose_retrievals/djzn/djznw/33.pp'
u_global,v_global = iris.load(fw_global)
oro_global = iris.load_cube(fo_global)
# Unrotate global coordinates
cs_glob = u_global.coord_system('CoordSystem')
cs_glob_v = v_global.coord_system('CoordSystem')
cs_glob_oro = oro_global.coord_system('CoordSystem')
lat_g = u_global.coord('grid_latitude').points
lon_g = u_global.coord('grid_longitude').points
lat_g_oro = oro_global.coord('grid_latitude').points
lon_g_oro = oro_global.coord('grid_longitude').points
if cs_glob!=cs_glob_v:
print 'Global model u and v winds have different poles of rotation'
# Unrotate global winds
if isinstance(cs_glob, iris.coord_systems.RotatedGeogCS):
print ' Global Model - Winds - djznw - Unrotate pole %s' % cs_glob
lons_g, lats_g = np.meshgrid(lon_g, lat_g)
lons_g,lats_g = unrotate_pole(lons_g,lats_g, cs_glob.grid_north_pole_longitude, cs_glob.grid_north_pole_latitude)
lon_g=lons_g[0]
lat_g=lats_g[:,0]
for i, coord in enumerate (u_global.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_uglobal = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_uglobal = i
csur_glob=cs_glob.ellipsoid
u_global.remove_coord('grid_latitude')
u_global.remove_coord('grid_longitude')
u_global.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_uglobal)
u_global.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_uglobal)
#print u_global
v_global.remove_coord('grid_latitude')
v_global.remove_coord('grid_longitude')
v_global.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_uglobal)
v_global.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_uglobal)
#print v_global
# Unrotate global model
if isinstance(cs_glob_oro, iris.coord_systems.RotatedGeogCS):
print ' Global Model - Orography - djznw - Unrotate pole %s - Winds and other diagnostics may have different number of grid points' % cs_glob_oro
lons_go, lats_go = np.meshgrid(lon_g_oro, lat_g_oro)
lons_go,lats_go = unrotate_pole(lons_go,lats_go, cs_glob_oro.grid_north_pole_longitude, cs_glob_oro.grid_north_pole_latitude)
lon_g_oro=lons_go[0]
lat_g_oro=lats_go[:,0]
for i, coord in enumerate (oro_global.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_og = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_og = i
csur_glob_oro=cs_glob_oro.ellipsoid
oro_global.remove_coord('grid_latitude')
oro_global.remove_coord('grid_longitude')
oro_global.add_dim_coord(iris.coords.DimCoord(points=lat_g_oro, standard_name='grid_latitude', units='degrees', coord_system=csur_glob_oro), lat_dim_coord_og)
oro_global.add_dim_coord(iris.coords.DimCoord(points=lon_g_oro, standard_name='grid_longitude', units='degrees', coord_system=csur_glob_oro), lon_dim_coord_og)
############## Regrid and Difference #################################
# Regrid Height and Temp/Specific humidity to global grid
h_regrid = np.empty((len(lat_g_oro), len(lon_g_oro), len(p_levels)))
v_regrid = np.empty((len(lat_g_oro), len(lon_g_oro), len(p_levels)))
for y in range(len(p_levels)):
h_regrid[:,:,y] = scipy.interpolate.griddata((lats.flatten(),lons.flatten()),mean_heights[:,:,y].flatten() , (lats_go,lons_go),method='cubic')
v_regrid[:,:,y] = scipy.interpolate.griddata((lats.flatten(),lons.flatten()),mean_var[:,:,y].flatten() , (lats_go,lons_go),method='cubic')
# Difference heights
mean_heights = np.where(np.isnan(h_regrid), np.nan, h_regrid - mean_heights_global)
#Difference temperature/specific humidity
mean_var = np.where(np.isnan(v_regrid), np.nan, v_regrid - mean_var_global)
# Difference winds
u_wind_regrid = iris.analysis.interpolate.regrid(u_wind, u_global, mode='bilinear')
v_wind_regrid = iris.analysis.interpolate.regrid(v_wind, v_global, mode='bilinear')
u_wind=u_wind_regrid-u_global
v_wind=v_wind_regrid-v_global
#######################################################################################
# 2 degree lats lon lists for wind regridding on plot
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
lons_w,lats_w = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
for p in plot_levels:
m_title = 'Height of %s-hPa level (m)' % (p)
# Set pressure height contour min/max
if p == 925:
clev_min = -24.
clev_max = 24.
elif p == 850:
clev_min = -24.
clev_max = 24.
elif p == 700:
clev_min = -24.
clev_max = 24.
elif p == 500:
clev_min = -24.
clev_max = 24.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p == 925:
clevpt_min = -3.
clevpt_max = 3.
elif p == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.linspace(clev_min, clev_max, num=24)
s = np.searchsorted(p_levels[::-1], p)
sc = np.searchsorted(p_levs, p)
# Set plot contour lines for pressure levels
plt_h = mean_heights[:,:,-(s+1)]
#plt_h[plt_h==0] = np.nan
# Set plot colours for variable
plt_v = mean_var[:,:,-(s+1)]
#plt_v[plt_v==0] = np.nan
# Set u,v for winds, linear interpolate to approx. 1 degree grid
u_interp = u_wind[sc,:,:]
v_interp = v_wind[sc,:,:]
sample_points = [('grid_latitude', lat_wind_1deg), ('grid_longitude', lon_wind_1deg)]
u = iris.analysis.interpolate.linear(u_interp, sample_points).data
v = iris.analysis.interpolate.linear(v_interp, sample_points).data
lons_w, lats_w = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high,projection='mill')
#x, y = m(lons, lats)
x, y = m(lons_go, lats_go)
x_w, y_w = m(lons_w, lats_w)
fig=plt.figure(figsize=(8,8))
ax = fig.add_axes([0.05,0.05,0.9,0.85])
m.drawcoastlines(color='gray')
m.drawcountries(color='gray')
m.drawcoastlines(linewidth=0.5)
#m.fillcontinents(color='#CCFF99')
#m.drawparallels(np.arange(-80,81,10),labels=[1,1,0,0])
#m.drawmeridians(np.arange(0,360,10),labels=[0,0,0,1])
cs_lin = m.contour(x,y, plt_h, clevs_lin,colors='k',linewidths=0.5)
#cs_lin = m.contour(x,y, plt_h,colors='k',linewidths=0.5)
#wind = m.barbs(x_w,y_w, u, v, length=6)
if plot_diag=='temp':
cs_col = m.contourf(x,y, plt_v, np.linspace(clevpt_min, clevpt_max), cmap=plt.cm.RdBu_r, colorbar_extend='both')
#cs_col = m.contourf(x,y, plt_v, cmap=plt.cm.RdBu_r)
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%d')
cbar.set_label('K')
plt.suptitle('Difference from Global Model (Model - Global Model ) of Height, Potential Temperature and Wind Vectors at %s hPa'% (p), fontsize=10)
elif plot_diag=='sp_hum':
cs_col = m.contourf(x,y, plt_v, np.linspace(clevsh_min, clevsh_max), cmap=plt.cm.RdBu_r)
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%.3f')
cbar.set_label('kg/kg')
plt.suptitle('Difference from Global Model (Model - Global Model ) of Height, Specific Humidity and Wind Vectors at %s hPa'% (p), fontsize=10)
wind = m.quiver(x_w,y_w, u, v, scale=150)
qk = plt.quiverkey(wind, 0.1, 0.1, 5, '5 m/s', labelpos='W')
plt.clabel(cs_lin, fontsize=10, fmt='%d', color='black')
#plt.title('%s\n%s' % (m_title, model_name_convert_title.main(experiment_id)), fontsize=10)
plt.title('\n'.join(wrap('%s' % (model_name_convert_title.main(experiment_id)), 80)), fontsize=10)
#plt.show()
if not os.path.exists('/home/pwille/figures/%s/%s' % (experiment_id, plot_diag)): os.makedirs('/home/pwille/figures/%s/%s' % (experiment_id, plot_diag))
plt.savefig('/home/pwille/figures/%s/%s/geop_height_difference_%shPa_%s_%s.tiff' % (experiment_id, plot_diag, p, experiment_id, plot_diag), format='tiff', transparent=True)
if __name__ == '__main__':
main()
|
mit
| -4,436,724,991,769,473,000
| 39.057007
| 180
| 0.59968
| false
| 2.987422
| false
| false
| false
|
mr-martian/potential-doodle
|
doodle.py
|
1
|
78676
|
#!/usr/bin/env python3
import re, itertools, random, copy, os
from collections import defaultdict
from subprocess import Popen, PIPE
from os.path import isfile
from types import SimpleNamespace
Globals = SimpleNamespace(path=os.path.abspath(__file__)[:-9], unknown_error=True, flat=False, partial=True, keepmeta=True, spacing=1, blob=True, treebank=[], output=None, usetreebank=False)
#path: The directory containing the program
#unknown_error: Should an error be raised when trying parse a non-existent morpheme?
#flat: Read |[XP] as [XP a b c d] rather than [XP a [Xmod b [Xbar c d]]]
#partial: Return incomplete translations
#keepmeta: Copy glosses and metadata from input to output
#spacing: Number of newlines to put between segments of output
#blob: Write all lines in one operation rather than as they are generated
#treebank: Local storage for trees to be reused by later commands
#output: Where to write output (use None for treebank or stdout)
#usetreebank: Write to treebank rather than stdout
class PatternElement:
"""Base class for elements of trees (both sentences and rule patterns)"""
CheckType = False
#by default, items of different subclasses can be equivalent
#for pattern-matching purposes
def __init__(self, ntype, props=None, loc=None):
self.ntype = ntype
self.props = props or {}
self.loc = loc or ''
#the file line that generated the object, for debugging
def __getitem__(self, key):
return self.props[key]
def __setitem__(self, key, value):
self.props[key] = value
def __contains__(self, key):
return key in self.props
def __str__(self):
return type(self).__name__ + self.ntype + str(self.props)
def __repr__(self):
return self.__str__()
def matchcondlist(self, cndls):
"""Given a list of (key, value) pairs, check that they all appear in self.props."""
return all(k in self and self[k] == v for k,v in cndls)
def getvars(self, tree, vrs):
"""Use self as a pattern and check against tree,
storing any variable values in vrs.
If tree does not match self,
a failure message is stored in vrs[' failed'].
"""
if self.CheckType and type(tree) != type(self):
vrs[' failed'] = 'type'
return vrs
if tree == None:
vrs[' failed'] = 'tree is None'
return vrs
if self.ntype and self.ntype != tree.ntype:
vrs[' failed'] = 'ntype'
return vrs
for p in self.props:
if p not in tree:
vrs[' failed'] = 'nonexistent property %s' % p
return vrs
if isinstance(self.props[p], str) or self.props[p] == None:
if self.props[p] != tree[p]:
vrs[' failed'] = 'property value mismatch'
return vrs
else:
self.props[p].getvars(tree[p], vrs)
if vrs[' failed']:
return vrs
return vrs
def putvars(self, vrs):
"""Reinsert variables (vrs) into pattern (self), inverse of getvars()."""
return self
def check(self, tree):
return self.getvars(tree, {' failed': False}) == False
class DataElement(PatternElement):
"""Base class for elements of sentences"""
CheckType = True
def trans(self, tr):
"""Apply a translation tr to self and return the result
extract variables from context and then from form,
apply operations and reinsert variables
"""
vrs = tr.context.getvars(self, {' failed': False})
if vrs[' failed'] or not isinstance(vrs[' '], DataElement):
if tr.debug:
print('Debugging rule failure on context for %s' % tr.name)
print(' Tree was: %s' % self)
print(' Reason was: %s' % vrs[' failed'])
print(' @ was: %s\n\n' % vrs[' '])
return []
vrs = tr.form.getvars(vrs[' '], vrs)
if vrs[' failed']:
if tr.debug:
print('Debugging rule failure on form for %s' % tr.name)
print(' Tree was: %s' % vrs[' '])
print(' Reason was: %s\n\n' % vrs[' failed'])
return []
applyrules(tr.result, vrs)
return copy.deepcopy(tr.context).putvars(vrs)
def transmulti(self, tr):
"""Apply a multirule (tr) to self and return the result
extract variables at each level and then apply operations in reverse order
"""
if tr.ntypelist and self.ntype not in tr.ntypelist:
return []
vrs = {' failed': False, ' ': self}
path = []
for l in tr.layers:
for i, f in enumerate(l):
vrs2 = f[0].getvars(vrs[' '], vrs.copy())
if not vrs2[' failed']:
vrs = vrs2
path.append(f[1:])
break
else:
return []
for result in reversed(path):
applyrules(result, vrs)
return vrs[' ']
def transform(self, pats, returnself=True):
"""Apply a set of rules to self.
If none of the rules produce output, return self if returnself is True.
Otherwise return [].
All returned nodes will either be self or self after 1 rule application.
"""
if len(pats) > 0:
nodes = []
retstr = ['[]']
for i, p in enumerate(pats):
if isinstance(p, Translation):
x = self.trans(p)
else:
x = self.transmulti(p)
s = str(x)
if s not in retstr:
nodes.append(x)
retstr.append(s)
if not nodes and returnself:
nodes = [self]
return nodes
elif returnself:
return [self]
else:
return []
###VARIABLES
class Variable(PatternElement):
"""Pattern element for extracting data"""
pattern = re.compile('^\\$?([^:?!+\\.&]*):?([^:?!+\\.&]*)\\.?([^:?!+\\.&]*)([?!+&]*)$')
def __init__(self, label, ntype=None, prop=None, opt=False, neg=False, group=False, descend=False, cond=None, loc=None):
PatternElement.__init__(self, ntype, loc=loc)
self.label = label
self.prop = prop
self.opt = opt
self.neg = neg
self.group = group
self.descend = descend
self.cond = cond
def fromstring(s):
"""Convert a string into a into a Variable object."""
m = Variable.pattern.match(s)
if m:
g = m.groups()
return Variable(g[0], g[1], g[2], '?' in g[3], '!' in g[3], '+' in g[3], '&' in g[3])
else:
print('no match with %s' % s)
def checkset(self, vrs):
"""Given a set of variable values, verify that self's conditions are met."""
if self.label not in vrs:
return self.neg or self.opt
if self.group:
return all(self.check(x) for x in vrs[self.label])
else:
return self.check(vrs[self.label])
def check(self, v):
"""Check whether an element satisfies self's conditions."""
if self.neg:
return v == None
if v == None:
return self.opt
if not PatternElement.check(self, v):
return False
if self.cond:
return self.cond.check(v)
return True
def retrieve(self, vrs):
"""Extract property values or children from a set of values."""
if self.label in vrs:
node = vrs[self.label]
if not node:
if not self.prop or self.opt:
return node
else:
raise Exception('Variable %s cannot retrieve properties from None.' % self)
if self.descend:
while True:
if node.ntype == 'conjP':
node = node.children[0]
elif node.ntype[-1] == 'P' and len(node.children) in [2,4]:
if len(node.children) == 4:
node = node.children[2]
else:
node = node.children[1].children[1].children[0]
else:
break
if self.prop:
if self.prop in node:
return node[self.prop]
elif self.opt:
return None
else:
raise Exception('Error with variable %s and node %s, property does not exist.' % (self, node))
else:
return node
elif self.opt:
return None
else:
print(vrs)
print(self.label)
raise Exception('Variable %s does not exist.' % self)
def place(self, vrs, val):
"""Insert a value into a dictionary."""
if self.label in vrs and vrs[self.label]:
if self.group:
for v in vrs[self.label]:
v.props[self.prop] = val
else:
vrs[self.label].props[self.prop] = val
def getvars(self, node, vrs):
PatternElement.getvars(self, node, vrs)
if not vrs[' failed'] and self.cond:
self.cond.getvars(node, vrs)
if node == None and (self.opt or self.neg):
vrs[' failed'] = False
if self.neg and node:
vrs[' failed'] = 'node is not None'
if not vrs[' failed']:
if self.label in vrs:
if self.group:
vrs[self.label].append(node)
else:
#perhaps overwriting the previous value is the wrong approach
#but since this hasn't yet come up in practice I'm inclined to ignore it
# -D.S. 2018-07-27
vrs[self.label] = node
else:
vrs[self.label] = [node] if self.group else node
return vrs
def putvars(self, vrs):
if self.label not in vrs:
return None
else:
return vrs[self.label]
def __str__(self):
return '$'+self.label + \
((':'+self.ntype) if self.ntype else '') + \
(('.'+self.prop) if self.prop else '') + \
('?' if self.opt else '') + \
('!' if self.neg else '') + \
('+' if self.group else '') + \
('&' if self.descend else '') + \
(('(' + str(self.cond) + ')') if self.cond else '')
def __deepcopy__(self, memo):
return self
#Variables aren't modified, so we don't care about copying them
class Unknown(Variable):
"""Variable that will match anything at all"""
count = 0
def __init__(self):
Variable.__init__(self, ' '+str(Unknown.count), opt=True)
Unknown.count += 1
def getvars(self, tree, vrs):
vrs[self.label] = tree
return vrs
def check(self, v):
return True
def __str__(self):
return '*'
###DATA STRUCTURES
class Morpheme(DataElement):
"""Word, tense, punctuation mark, or other non-structural sentence element"""
__AllMorphemes = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: None)))
def __init__(self, lang, ntype, root, props=None, isref=False, loc=None):
PatternElement.__init__(self, ntype, props, loc)
self.lang = lang
self.root = root
self.isref = isref
if not isref:
roots = [root]
if 'searchkey' in self.props:
roots.append(self.props['searchkey'])
pos = [ntype]
if 'altpos' in self.props:
pos.append(self.props['altpos'])
for p in pos:
for r in roots:
Morpheme.__AllMorphemes[lang][p][r] = self
else:
try:
Morpheme.get(lang, ntype, root, loc or '(see stacktrace)')
except:
if Globals.unknown_error:
raise
else:
f = open(Globals.path + 'missing_morphemes.txt', 'a')
f.write(str(lang) + ': ' + ntype + '=' + root + '\n')
f.close()
def __str__(self):
return self.ntype + '=' + self.root
def getref(self):
"""Create a separate Morpheme that points back to self."""
return Morpheme(self.lang, self.ntype, self.root, isref=True)
def itermorph(lang):
return Morpheme.__AllMorphemes[lang]
def tagify(self, regex=False):
"""Produce input for a morphological transducer.
If regex is True the output will be a regex to be used in parse()
"""
lang = Language.getormake(self.lang)
format = ''
tagset = []
defaults = {}
for typ in lang.tags:
if typ['ntype'] != self.ntype:
continue
if not self.matchcondlist(typ['conds']):
continue
format = typ['format']
tagset = typ['tags']
defaults = typ['defaults']
break
else:
format = '{root[0]}<%s>' % self.ntype
tagset = {}
defaults = {}
tags = {'root': self.root.split('#')[0].split(lang.tags_rootsplit)}
if 'root' in self:
tags['root'] = self['root'].split(lang.tags_rootsplit)
for tg in tagset:
if isinstance(tagset[tg], str):
if tagset[tg] in self:
t = self[tagset[tg]]
tags[tg] = '<' + t + '>' if t else ''
else:
for cs in tagset[tg]:
if self.matchcondlist(cs['conds']):
tags[tg] = cs['tag']
break
if tg not in tags:
if regex:
tags[tg] = '<[^<>]*>'
else:
tags[tg] = defaults[tg]
ret = format.format(**tags) or self.root
if regex:
ret = '\t' + ret.replace('+', '\\+')
return ret
def get(lang, ntype, root, loc):
"""Retrieve elements from Morpheme.__AllMorphemes."""
if lang not in Morpheme.__AllMorphemes:
raise Exception('Error at %s: Language %s not loaded.' % (loc, lang))
else:
d = Morpheme.__AllMorphemes[lang]
if ntype not in d:
raise Exception('Error at %s: Non-existent part of speech %s' % (loc, ntype))
else:
d = d[ntype]
if root not in d:
raise Exception('Error at %s: Undefined morpheme %s=%s' % (loc, ntype, root))
else:
return d[root]
def __getitem__(self, key):
if key in self.props:
return self.props[key]
elif self.isref:
ref = Morpheme.get(self.lang, self.ntype, self.root, None)
if key in ref.props:
return ref.props[key]
else:
raise KeyError('Morpheme %s does not have property %s.' % (self, key))
def __contains__(self, key):
if key in self.props:
return True
if self.isref:
return key in Morpheme.get(self.lang, self.ntype, self.root, None).props
return False
def getvars(self, tree, vrs):
PatternElement.getvars(self, tree, vrs)
if not vrs[' failed']:
if self.lang != tree.lang or self.root != tree.root:
vrs[' failed'] = 'lang or root'
return vrs
def putvars(self, vrs):
return self
class Node(DataElement):
"""Structural element of a sentence"""
def __init__(self, ntype, children, props=None, loc=None):
PatternElement.__init__(self, ntype, props, loc)
self.children = children
self.rotate = False
def swapchildren(self, ls):
"""Return a Node with the same properties but different children."""
return Node(self.ntype, ls, self.props.copy())
def getvars(self, tree, vrs):
PatternElement.getvars(self, tree, vrs)
if not vrs[' failed']:
if len(self.children) != len(tree.children):
vrs[' failed'] = 'number of children'
return vrs
for s,t in zip(self.children, tree.children):
if s:
s.getvars(t, vrs)
elif t:
vrs[' failed'] = 'non-null child'
if vrs[' failed']:
return vrs
return vrs
def putvars(self, vrs):
ch = []
for c in self.children:
try:
a = c.putvars(vrs)
if isinstance(a, list):
ch += a
else:
ch.append(a)
except AttributeError:
ch.append(c)
return Node(self.ntype, ch, self.props.copy())
def transform(self, pats, returnself=True):
"""Apply DataElement.transform() to children and then to self."""
chs = []
for c in self.children:
if c:
chs.append(c.transform(pats, True))
else:
chs.append([c])
swap = map(lambda x: self.swapchildren(list(x)), itertools.product(*chs))
ret = list(itertools.chain.from_iterable(map(lambda x: DataElement.transform(x, pats, True), swap)))
if returnself and not ret:
ret = [self]
return ret
def __str__(self):
if isinstance(self.children, list):
s = '[' + ' '.join([str(x) for x in self.children]) + ']'
else:
s = str(self.children)
return '%s%s%s' % (self.ntype, s, str(self.props))
def debug(self, depth=0):
"""Convert self to a multi-line indented string."""
ls = [(' '*depth) + ('%s[' % self.ntype)]
for c in self.children:
if isinstance(c, Node):
l.append(c.debug(depth+1))
else:
l.append(' '*(depth+1) + str(c))
ls.append(' '*depth + ']' + str(self.props))
return '\n'.join(ls)
def writecompile(self):
"""Convert self to a string that can be parsed back to self."""
if len(self.children) == 1 and isinstance(self.children[0], str):
return self.ntype + '=' + self.children[0]
l = [self.ntype]
for c in self.children:
if isinstance(c, Node):
l.append(c.writecompile())
elif not c:
l.append('~')
else:
l.append(str(c))
return '[' + ' '.join(l) + ']'
def graph(self, name, ishead=False):
"""Convert self to a dot graph."""
ret = ''
if ishead:
ret += 'digraph {'
ret += '%s [label="%s"];' % (name, self.ntype)
for i, c in enumerate(self.children):
ret += '%s -> %s%d;' % (name, name, i)
if isinstance(c, Node):
ret += c.graph(name+str(i))
else:
ret += '%s%d [label="%s"];' % (name, i, str(c))
if ishead:
ret += '}'
return ret
def flatten(self):
"""Flatten X-bar phrases to single nodes (destructive).
Converts [XP specifier [Xmod modifier [Xbar head complement]]]
to [XP specifier modifier head complement]
"""
for c in self.children:
if isinstance(c, Node):
c.flatten()
if self.ntype[-1] == 'P':
n = self.ntype[:-1]
if len(self.children) != 2: return None
if not isinstance(self.children[1], Node): return None
m = self.children[1]
if m.ntype != n+'mod': return None
if len(m.children) != 2: return None
if not isinstance(m.children[1], Node): return None
b = m.children[1]
if b.ntype != n+'bar': return None
if len(b.children) != 2: return None
self.children = [self.children[0], m.children[0], b.children[0], b.children[1]]
def unflatten(self):
"""Transform nodes with 4 children to X-bar phrases (destructive).
Inverse of flatten()
Converts [XP specifier modifier head complement]
to [XP specifier [Xmod modifier [Xbar head complement]]]
"""
for c in self.children:
if isinstance(c, Node):
c.unflatten()
if self.ntype[-1] == 'P' and len(self.children) == 4:
ch = self.children
n = self.ntype[:-1]
self.children = [ch[0], Node(n+'mod', [ch[1], Node(n+'bar', [ch[2], ch[3]])])]
def rotated(self, lang):
"""Determine whether the children should be reversed for sentence generation."""
return self.ntype in Language.getormake(lang).rotate != self.rotate
def tagify_all(self, lang):
"""Run Morpheme.tagify() on all Morphemes in a tree."""
rev = self.rotated(lang)
ret = []
for c in self.children:
if isinstance(c, Node):
a = c.tagify_all()
elif isinstance(c, Morpheme):
a = [c.tagify()]
else:
a = [c] if c else []
if rev:
ret = a + ret
else:
ret += a
return ret
def linear(self, lang):
"""Convert a tree to an ordered list of Morphemes."""
l = []
for c in self.children:
if isinstance(c, Node):
l.append(c.linear(lang))
elif c:
l.append([c])
if self.rotated(lang):
l.reverse()
r = []
for c in l:
r += c
return r
def iternest(self):
"""Iterate over all elements in a tree."""
yield self
for ch in self.children:
if isinstance(ch, Node):
yield from ch.iternest()
else:
yield ch
def roots(self):
"""Return the roots of all Morphemes in a tree."""
ret = []
for ch in self.children:
if isinstance(ch, Morpheme):
ret.append(ch.root)
elif isinstance(ch, Node):
ret += ch.roots()
return ret
def alllang(self, lang):
"""Verify that all Morphemes in a tree are in the target language."""
for n in self.iternest():
if isinstance(n, Morpheme) and n.lang != lang:
return False
return True
class UnorderedCollector(PatternElement):
"""Collection of Variables that matches the children of a Node
Matched children are associated with the first matching Variable.
Variables with .group and .opt both False will match exactly 1 child,
or the match will fail.
These are typically used to match [I] Nodes of verbal conjugations.
"""
def __init__(self, ntype, children, loc):
PatternElement.__init__(self, ntype, None, loc)
self.children = children
def getvars(self, tree, vrs):
PatternElement.getvars(self, tree, vrs)
if not vrs[' failed']:
if not isinstance(tree, Node):
vrs[' failed'] = 'UnorderedCollector only matches Nodes'
return vrs
found = set()
for c in tree.children:
if not c:
continue
for i, v in enumerate(self.children):
v.getvars(c, vrs)
if not vrs[' failed']:
found.add(i)
break
else:
vrs[' failed'] = False
else:
vrs[' failed'] = 'no matching variables found for %s' % c
break
else:
for i, v in enumerate(self.children):
if isinstance(v, Variable) and v.label not in vrs:
if v.opt:
vrs[v.label] = None
found.add(i)
else:
vrs[' failed'] = 'unmatched variable'
break
if len(found) < len(self.children):
vrs[' failed'] = 'unmatched element'
return vrs
def putvars(self, vrs):
ch = []
for v in self.children:
a = v.putvars(vrs)
if isinstance(a, list):
ch += a
else:
ch.append(a)
return Node(self.ntype, ch)
def __str__(self):
return '<%s %s>' % (self.ntype, ' '.join(str(x) for x in self.children))
###TRANSFORMATIONS
class Rule:
"""Base class for transformations
Rule applications are ordered by stage, starting with 0 and no guarantees
are made about ordering within a single stage.
"""
def __init__(self, langs, category='', mode='syntax', stage=0, name='', debug=False):
self.langs = langs
self.category = category
self.mode = mode
self.stage = stage
self.name = name
self.debug = debug
if self.langs[0] == self.langs[1]:
l = Language.getormake(self.langs[0])
if mode == 'linear':
l.linear[category].append(self)
elif mode == 'linear-text':
l.lineartext[category].append(self)
else:
x = len(l.movement[category])
l.movement[category].append(self)
assert(len(l.movement[category]) > x)
else:
l = LangLink.getormake(self.langs[0], self.langs[1])
if mode == 'syntax':
l.syntax.append(self)
else:
l.pats[category].append(self)
class Translation(Rule):
"""Transformation consisting of a context, form, and result
Applies result to form when form is embedded in context.
"""
def __init__(self, form, result, category, langs, context=None, mode='syntax', stage=0, name=''):
self.form = form
self.result = result
self.roots = [] #roots of all morphemes in form
if isinstance(form, Node):
self.roots = form.roots()
self.rootset = set(self.roots)
self.resultroots = []
if isinstance(result, Node):
self.resultroots = result.roots()
self.resultrootset = set(self.resultroots)
self.addedroots = self.resultrootset - self.rootset
self.context = context or Variable(' ')
Rule.__init__(self, langs, category, mode, stage, name)
def __str__(self):
return '{%s => %s}%s' % (self.form, self.result, self.roots)
def __repr__(self):
return self.__str__()
class MultiRule(Rule):
"""Multi-layer transformation
Each layer contains 1 or more forms, each of which has an associated
result and serves as a context for the next layer.
"""
def __init__(self, layers, category, langs, mode='syntax', stage=0, name=''):
self.layers = layers
self.roots = [] #roots of all morphemes in form
self.rootset = set(self.roots)
self.resultroots = []
self.resultrootset = set(self.resultroots)
self.addedroots = self.resultrootset - self.rootset
self.ntypelist = []
if all(isinstance(x[0], Node) for x in layers[0]):
self.ntypelist = [x[0].ntype for x in layers[0]]
Rule.__init__(self, langs, category, mode, stage, name)
def applyrules(rules, vrs):
"""Apply the output of a rule to a set of variables."""
putback = {}
for rule in rules:
if isinstance(rule, DataElement) or isinstance(rule, UnorderedCollector):
vrs[' '] = rule.putvars(vrs)
elif isinstance(rule, list):
if rule[0] == 'setlang':
vrs[' '].lang = rule[1]
elif rule[0] == 'setdisplay':
vrs[' '].props['display'] = rule[1]
elif rule[0] == 'set':
vrs[' '].props.update(rule[1])
elif rule[0] == 'setprop':
if isinstance(rule[2], str):
rule[1].place(vrs, rule[2])
else:
rule[1].place(vrs, rule[2].retrieve(vrs))
elif rule[0] == 'rotate':
vrs[' '].rotate = True
elif rule[0] == 'makevar':
vrs[rule[1]] = copy.deepcopy(rule[2])
elif rule[0] == 'order':
ch = []
for v in rule[2:]:
if v.label in vrs and vrs[v.label]:
ch.append(vrs[v.label])
vrs[' '] = Node(rule[1], ch)
elif rule[0] == 'node':
vrs[' '] = toobj(*rule[1:], at=vrs[' ']).putvars(vrs)
elif rule[0] == 'cond':
for op in rule[1:]:
if all(v.checkset(vrs) for v in op[0]):
applyrules(op[1:], vrs)
break
elif rule[0] == 'distribute':
src = rule[1]
dst = rule[2]
try:
val = vrs[rule[3].label][src]
except:
print(vrs[' '])
print(rule)
raise
for l in rule[4:]:
nv = None
for v in (l if isinstance(l, list) else [l]):
if v.label in vrs and vrs[v.label]:
vrs[v.label].props[dst] = val
if src in vrs[v.label]:
nv = vrs[v.label][src]
if nv:
val = nv
elif rule[0] == 'log':
print(rule[1].retrieve(vrs))
elif rule[0] == 'print':
print(rule[1])
elif rule[0] == 'pull':
putback[rule[1]] = vrs[rule[1]]
vrs[rule[1]] = None
elif rule[0] == 'replace':
putback[rule[1]] = vrs[rule[1]]
vrs[rule[1]] = vrs[rule[2]]
vrs.update(putback)
###GENERATION
class SyntaxPat:
"""Pattern for syntax tree generation
Contains a list of sets of conditions, each with an associated tree output
and an associated list of requirements that will stop generation in the
parser if unmet (intended to speed up the parser).
"""
def __init__(self, name, conds, opts, vrs, require):
self.name = name
self.conds = conds
self.opts = opts
self.vrs = vrs
self.require = require
def __str__(self):
return 'SyntaxPat(%s, %s, %s, %s)' % (self.name, self.conds, self.opts, self.vrs)
def __repr__(self):
return self.__str__()
class Language:
"""Collection of language-wide per-language settings"""
__alllangs = {}
def __init__(self, lang):
#Metadata
self.name = ''
self.names = {}
self.creator = ''
#General
self.lang = lang
self.syntax = {}
self.rotate = []
self.syntaxstart = None
#Movement
self.movement = defaultdict(list)
self.linear = defaultdict(list)
self.lineartext = defaultdict(list)
#Transducer
self.lexc = ''
self.lexc_lexicons = []
self.tags = []
self.tags_rootsplit = '' #for cases where it's easiest to have tags between parts of the root
self.morph_mode = '' #hfst or lttoolbox
self.capitalize = False
Language.__alllangs[lang] = self
def isloaded(lang):
"""Check whether a language has been loaded from its data file."""
return lang in Language.__alllangs
def getormake(lang):
"""Return the associate Language object, loading from file if needed."""
if lang in Language.__alllangs:
return Language.__alllangs[lang]
else:
return loadlang(lang)
def getpats(self):
"""Return a dictionary of patterns for sentence generation."""
r = {}
r.update(self.syntax)
for k, v in Morpheme.itermorph(self.lang).items():
r[k] = list(v.values())
return r
def movefind(self, roots):
"""Return a list of movement rules, sorted by stage."""
s = set(roots + [''])
ret = defaultdict(list)
for r in s:
for p in self.movement[r]:
if p.rootset < s:
ret[p.stage].append(p)
return [ret[k] for k in sorted(ret.keys())]
def domovement(self, sen):
"""Retrieve and apply movement rules to a sentence."""
pats = self.movefind(sen.roots())
tr = [sen]
for p in pats:
ntr = []
for s in tr:
ntr += s.transform(p, False)
tr = ntr or tr
return tr
def totext(self, sen):
"""Generate the default surface form of a sentence."""
return dolinear(self.domovement(sen)[0], self.lang)
def allnames():
"""Return the names of all loaded languages."""
return [(x, Language.__alllangs[x].name) for x in sorted(Language.__alllangs.keys())]
def iterlex(self):
"""Iterate over all Morphemes in this language."""
dct = Morpheme.itermorph(self.lang)
for ntype in dct:
for root in dct[ntype]:
yield dct[ntype][root]
class LangLink:
"""Container for translations for a language pair in a particular direction"""
__alllinks = {}
def __init__(self, fromlang, tolang):
self.fromlang = fromlang
self.tolang = tolang
self.syntax = []
self.pats = defaultdict(list)
LangLink.__alllinks['%s-%s' % (fromlang, tolang)] = self
def find(self, _roots):
"""Retrieve all rules applicable to a set of roots."""
roots = _roots + ['']
s = set(roots)
ret = defaultdict(list)
for r in roots:
for p in self.pats[r]:
if p.rootset < s:
ret[p.stage].append(p)
return [ret[k] for k in sorted(ret.keys())]
def getormake(fromlang, tolang):
"""Retrieve a LangLink, loading from file if needed."""
s = '%s-%s' % (fromlang, tolang)
if s in LangLink.__alllinks:
return LangLink.__alllinks[s]
else:
return loadtrans(fromlang, tolang)
def translate(self, sen):
"""Translate a sentence."""
pats = self.find(sen.roots())
tr = [sen]
for p in pats:
ntr = []
for s in tr:
ntr += s.transform(p, False)
if ntr:
tr = ntr
return tr
def run(prog, *args, data=None):
"""Launch an external program, pass data to it and return its output."""
proc = Popen([prog] + list(args), stdin=PIPE, stdout=PIPE, universal_newlines=True)
if data:
return proc.communicate(data)[0]
def transduce(data, lang, gen=True):
"""Pass data to a transducer.
gen=True for generation, gen=False for parsing
"""
mode = Language.getormake(lang).morph_mode
if mode not in ['hfst', 'lttoolbox']:
raise Exception('Unknown morphology mode %s' % mode)
path = Globals.path + 'langs/%d/.generated/' % lang
path += ('gen' if gen else 'parse') + ('hfst' if mode == 'hfst' else 'bin')
if gen:
data = '\n'.join(data) if mode == 'hfst' else '^'+('$\n^'.join(data))+'$'
if mode == 'hfst':
result = run('hfst-lookup', '-q', '-b', '0', '-i', path, data=data)
return [x.split('\t')[1] for x in result.strip().split('\n\n')]
else:
result = run('lt-proc', '-g', path, data=data)
return [x[1:] if x[0] == '~' else x for x in result.split('\n')]
else:
if mode == 'hfst':
result = run('hfst-proc', '-x', '-w', path, data=data+'\n').split('\n\n')
resplus = run('hfst-proc', '-x', '-w', path, data=data.replace(' ', '+')+'\n')
return result + [x for x in resplus.split('\n\n') if '+' in x]
def dolinear(sen, _lang):
"""Apply rules that manipulate adjacent Morphemes rather than trees."""
lin = sen.linear(_lang)
lang = Language.getormake(_lang)
for i, m in enumerate(lin):
for pat in lang.linear[m.root]:
if not pat.form.check(m):
continue
if isinstance(pat.context, list):
for d, p in pat.context:
if i+d < 0 or i+d >= len(lin):
break
if p.check(lin[i+d]):
break
else:
for d, r in pat.result:
if r == 'inaudible':
lin[i+d]['audible'] = 'false'
elif isinstance(r, list) and r[0] == 'display':
lin[i+d]['display'] = r[1]
else:
lin[i+d] = r
lintxt = transduce([x.tagify() for x in lin], _lang)
for i, m in enumerate(lin):
for pat in lang.lineartext[m.root]:
if isinstance(pat.context, list):
for d, p in pat.context:
if i+d < 0 or i+d >= len(lintxt):
break
if isinstance(p, str) and lintxt[i+d] != p:
break
if not isinstance(p, str) and not p.match(lintxt[i+d]):
break
else:
lintxt[i] = pat.result
final = []
for i, m in enumerate(lin):
if 'audible' in m and m['audible'] == 'false':
continue
elif 'display' in m:
final.append(m['display'])
else:
final.append(lintxt[i])
ret = ' '.join(final).replace('+', ' ').replace('- -', '').replace('- ', '').replace(' -', '')
if lang.capitalize:
for i, c in enumerate(ret):
if c.isalpha():
ret = ret[:i] + ret[i].capitalize() + ret[i+1:]
break
return ret
###PARSING
def tokenize(s):
"""Tokenize a string."""
ret = []
add = False
digraph = False
for c in s:
if c in '[]<>$(){}=@~*':
if digraph:
ret[-1] += c
digraph = False
else:
ret.append(c)
add = False
elif c == '|':
if digraph:
ret[-1] += c
else:
ret.append(c)
digraph = True
elif c.isspace():
add = False
digraph = False
elif add:
ret[-1] += c
else:
ret.append(c)
add = True
return ret
def toobj(s, lang, loc):
"""Parse a string into language lang from original source loc."""
assert(isinstance(lang, int))
Language.getormake(lang)
rest = tokenize(s)
def destring():
nonlocal rest
cur = rest.pop(0)
def ok(th):
return th[0] not in '[]<>$(){}=@|~*'
if cur == '~':
return None
elif cur == '*':
return Unknown()
elif cur == '@':
return Variable(' ', loc=loc)
elif cur == '$': #Variable
ret = Variable.fromstring(rest.pop(0))
if not ret:
raise ParseError('Badly formed variable at %s' % loc)
ret.loc = loc
if rest and rest[0] == '{':
ret.props.update(destring())
if rest and rest[0] == '(':
rest.pop(0)
if len(rest) >= 2 and rest[1] == ')':
ret[rest.pop(0)] = Unknown()
rest.pop(0)
elif len(rest) >= 4 and ok(rest[0]) and rest[1] == '=' and ok(rest[2]) and rest[3] == ')':
ret[rest[0]] = rest[2]
rest = rest[4:]
else:
if rest[0] == '%': rest.pop(0) #@TODO total hack
# later go through and switch to {} for properties and have () be only .cond
ret.cond = destring()
if rest[0] != ')':
raise ParseError('Badly formed variable condition on line %s (remainder was %s).' % (loc, rest))
rest.pop(0)
return ret
elif cur == '[': #Syntax
ntype = rest.pop(0)
ch = []
while rest[0] != ']':
ch.append(destring())
d = {}
rest.pop(0)
if rest and rest[0] == '{':
d = destring()
return Node(ntype, ch, d, loc=loc)
elif cur == '|[': #xbar Sytnax
if rest[0][0] == '?':
rest = ['?', rest[0][1:]] + rest[1:]
if rest[0] not in '*?$~':
rest = ['~'] + rest
mode = rest.pop(0)
name = rest.pop(0)[:-1]
spots = ['spec', 'mod', 'head', 'comp']
sub = {'*': [Unknown(), Unknown(), Unknown(), Unknown()],
'?': [Variable(name+s, opt=True, loc=loc) for s in spots],
'$': [Variable(name+s, loc=loc) for s in spots],
'~': [None, None, None, None]}[mode]
ch = []
while rest and rest[0] != ']':
ch.append(destring())
if rest:
rest.pop(0)
else:
raise ParseError('Syntax mode is missing closing bracket at %s' % loc)
if len(ch) == 0: #nothing
ch.insert(0, sub[2]) #insert head
if len(ch) == 1: #just head
ch.insert(1, sub[3]) #insert comp
if len(ch) == 2: #head and comp
ch.insert(0, sub[0]) #insert spec
if len(ch) == 3: #spec, head, and comp
ch.insert(1, sub[1]) #insert mod
d = {}
if rest and rest[0] == '{':
d = destring()
if Globals.flat:
return Node(name+'P', ch, d, loc=loc)
else:
bar = Node(name+'bar', ch[2:], loc=loc)
mod = Node(name+'mod', [ch[1], bar], loc=loc)
return Node(name+'P', [ch[0], mod], d, loc=loc)
elif cur == '<': #UnorderedCollector
ntype = rest.pop(0)
ch = []
while rest and rest[0] != '>':
ch.append(destring())
if not rest:
raise ParseError('Incomplete Unordered Collector, missing > at %s' % loc)
rest.pop(0)
return UnorderedCollector(ntype, ch, loc=loc)
elif cur == '{': #props pattern
d = {}
while rest[0] != '}':
p = rest.pop(0)
assert(rest.pop(0) == '=')
d[p] = rest.pop(0)
rest.pop(0)
return d
else:
if rest[0] == '=': #Morpheme
pos = cur
root = rest.pop(1)
rest.pop(0)
d = {}
if rest and rest[0] == '{':
d = destring()
return Morpheme(lang, pos, root, isref=True, props=d, loc=loc)
else:
rest = ['$', ':'+cur] + rest
return destring()
try:
ret = destring()
except:
print('original line: %s' % s)
print('problem on line %s, add more checks, unparsed remainder was %s' % (loc, rest))
raise
if rest != []:
print('problem on line %s, unparsed remainder was %s' % (loc, rest))
assert(rest == [])
return ret
###FILES
class ParseError(Exception):
pass
class ParseLine:
"""Line from a data file, has label, arguments, value, and children"""
def __init__(self, num, label, args=None, val=None, children=None):
self.num = num
self.label = label
self.args = args or []
self.arg = '; '.join(self.args)
self.val = val or ''
self.vals = [val] if val else []
self.children = children or []
def fromstring(fstr, num):
"""Parse a line (without leading whitespace).
Allowed formats:
label (arg1; arg2): value
label: value
label (arg1; arg2)
label
"""
i = 0
r = ParseLine(num, '', [], '', [])
while i < len(fstr) and fstr[i] not in ' :(':
r.label += fstr[i]
i += 1
r.label = r.label.strip()
while i < len(fstr) and fstr[i] == ' ':
i += 1
p = 0
if i < len(fstr)-1 and fstr[i] == '(':
i += 1
s = ''
while fstr[i] != ')' or p != 0:
s += fstr[i]
if fstr[i] == '(':
p += 1
if fstr[i] == ')':
p -= 1
i += 1
i += 1
r.args = [x.strip() for x in s.split(';') if not x.isspace()]
r.arg = s.strip()
if i < len(fstr)-1 and fstr[i] == ':':
i += 2
r.val = fstr[i:].strip()
r.vals = [x.strip() for x in r.val.split(';') if not x.isspace()]
i = len(fstr)
if i != len(fstr):
raise ParseError('Something is wrong with line %s.\nString was "%s", position: %d' % (num, fstr, i))
else:
return r
def fromfile(fname):
"""Parse a file and return a list of ParseLines."""
r = ParseLine(-1, '', [], '', [])
depth = 0
with open(fname) as f:
for i, l in enumerate(f):
if l.isspace() or l.lstrip()[0] == '#':
continue
while not l.startswith(' '*depth):
depth -= 1
lobj = ParseLine.fromstring(l.rstrip()[depth*2:], 'line %s of %s' % (i+1, fname))
at = r
for d in range(depth):
at = at.children[-1]
at.children.append(lobj)
depth += 1
return r.children
def tofilestr(self, indent):
"""Convert self back to a string."""
r = ' '*indent + '%s' % self.label
if self.args:
r += ' (' + '; '.join(self.args) + ')'
if self.vals:
r += ': ' + '; '.join(self.vals)
r += '\n'
for c in self.children:
r += c.tofilestr(indent+1)
return r
def tofile(self, fname):
"""Convert self to string and write to a file."""
f = open(fname, 'w')
f.write(self.tofilestr(0))
f.close()
def __str__(self):
return '%d %s (%s): %s\n' % (self.num, self.label, '; '.join(self.args), self.val) + ''.join([str(x) for x in self.children])
def __getitem__(self, key):
"""Iterates of children that have label == key."""
for ch in self.children:
if ch.label == key:
yield ch
def __contains__(self, key):
for ch in self.children:
if ch.label == key:
return True
return False
def child_vals(self, key):
"""Iterate over values of self[key]."""
for ch in self[key]:
yield ch.val
def first(self, key):
"""Return first child with label == key."""
for ch in self.children:
if ch.label == key:
return ch
def firstval(self, key):
"""Return value of first child with label == key."""
return self.first(key).val
def fvo(self, key, lang, default=None):
"""Parse the value of the first child with label == key.
Use default if no value is found.
"""
f = self.first(key)
if f:
return toobj(f.val, lang, f.num)
elif default:
return toobj(default, lang, self.num)
else:
raise ParseError('Line %s does not have required child %s.' % (self.num, key))
def avo(self, key, lang, default=None): #all val objects
"""Parse the values of all children with label == key.
Use default if none are found.
"""
c = 0
for ch in self.children:
if ch.label == key:
c += 1
yield toobj(ch.val, lang, ch.num)
if c == 0:
if default:
yield toobj(default, lang, self.num)
else:
raise ParseError('Line %s does not have required child(ren) %s.' % (self.num, key))
def condlist(ch):
"""Parse the argument of a ParseLine.
Transforms "(a=b; c=d)" into [['a', 'b'], ['c', 'd']].
"""
ret = []
for s in ch.args:
k,v = s.split('=')
ret.append([k.strip(), v.strip()])
return ret
def readresult(node, lang):
"""Read the results section of a rule definition."""
ret = []
def mkvar(_s, loc):
if '$' in _s or '@' in _s:
s = _s.replace('@', ' ')
else:
s = '$ .'+s
r = Variable.fromstring(s)
if r == None:
raise ParseError('Cannot interpret variable %s on line %s.' % (_s, loc))
return r
for ch in node.children:
if ch.label == 'result':
ret.append(toobj(ch.val, lang, ch.num))
elif ch.label == 'setprop':
ret.append(['setprop', mkvar(ch.arg, ch.num), mkvar(ch.val, ch.num)])
elif ch.label == 'setval':
ret.append(['setprop', mkvar(ch.arg, ch.num), ch.val])
elif ch.label == 'setdisplay':
ret.append(['setdisplay', ch.val])
elif ch.label == 'setprops':
d = {}
for prop in ch.children:
d[prop.label] = prop.val
ret.append(['set', d])
elif ch.label == 'blank':
ret.append(['setdisplay', ''])
elif ch.label == 'set':
ret.append(['set', dict(condlist(ch))])
elif ch.label == 'rotate':
ret.append(['rotate'])
elif ch.label == 'cond':
com = ['cond']
for op in ch.children:
if op.label == 'option':
com.append([[toobj(x, lang, op.num) for x in op.args]] + readresult(op, lang))
ret.append(com)
elif ch.label == 'if':
ret.append(['cond', [[toobj(x, lang, ch.num) for x in ch.args]] + readresult(ch, lang)])
elif ch.label == 'distribute':
ret.append(['distribute'] + ch.args + [toobj(x, lang, ch.num) for x in ch.vals])
elif ch.label == 'order':
ret.append(['order', ch.arg] + [toobj(x, lang, ch.num) for x in ch.vals])
elif ch.label == 'log':
ret.append(['log', toobj(ch.val, lang, ch.num)])
elif ch.label == 'print':
ret.append(['print', ch.val])
elif ch.label == 'makevar':
ret.append(['makevar', ch.arg, toobj(ch.val, lang, ch.num)])
elif ch.label == 'pull':
ret.append(['pull', ch.val])
elif ch.label == 'replace':
ret.append(['replace', ch.arg, ch.val])
return ret
def readrule(node, lfrom, _lto, mode, category, _stage):
"""Read a rule definition."""
if 'samelang' in node:
lto = lfrom
else:
lto = _lto
if 'stage' in node:
stage = int(node.firstval('stage'))
elif node.arg:
stage = int(node.arg)
else:
stage = _stage
if node.label == 'rule':
con = node.fvo('context', lfrom, '@')
form = node.fvo('form', lfrom, '@')
res = readresult(node, lto)
return Translation(form, res, category, [lfrom, lto], context=con, mode=mode, stage=stage, name=node.val)
elif node.label == 'multirule':
layers = []
for ly in node.children:
if ly.val and 'form' not in ly and ly.label[-1] == '~':
ly.label = ly.label[:-1]
ly.children.append(ParseLine(ly.num, 'result', [], ly.val, []))
if ly.val and 'form' not in ly:
ly.children = [ParseLine(ly.num, 'form', [], ly.val, ly.children)]
if ly.label == 'layer?':
ly.children.append(ParseLine(-1, 'form', [], '@', [ParseLine(-1, 'result', [], '@', [])]))
ly.label = 'layer'
if ly.label != 'layer':
continue
for p in ly['form~']:
p.label = 'form'
p.children.append(ParseLine(p.num, 'result', [], p.val, []))
l = []
for p in ly['form']:
op = [toobj(p.val, lfrom, p.num)]
op += readresult(p, lfrom)
l.append(op)
layers.append(l)
return MultiRule(layers, category, [lfrom, lto], mode=mode, stage=stage, name=node.val)
elif node.label == 'linear':
pass
elif node.label == 'linear-text':
pass
def loadlexicon(lang):
"""Read a lexicon file."""
rootslist = ParseLine.fromfile(Globals.path + 'langs/%s/lexicon.txt' % lang)
defaults = defaultdict(lambda: defaultdict(dict))
if rootslist[0].label == 'defaults':
for pat in rootslist.pop(0).children:
defaults[pat.label][pat.val] = {ch.label:ch.val for ch in pat.children}
for root in rootslist:
m = Morpheme(lang, root.arg, root.label, isref=False, props=defaults[root.arg][root.val].copy())
if 'output' not in m.props:
m.props['output'] = []
for p in root.children:
if p.label in ['rule', 'multirule']:
readrule(p, lang, lang, 'lex', root.label, 1)
elif p.label == 'output':
o = [p.arg, p.val, '#']
if '=' in p.arg:
o[0] = condlist(p)
if 'lexicon' in p:
o[2] = p.firstval('lexicon')
m.props['output'].append(o)
elif p.label == 'linear':
con = []
res = []
if p.val:
res.append([0, toobj(p.val, lang, p.num)])
for ch in p.children:
try: idx = int(ch.label)
except: continue
con.append([idx, toobj(ch.val, lang, ch.num)])
if 'inaudible' in ch:
res.append([idx, 'inaudible'])
elif 'to' in ch:
res.append([idx, ch.fvo('to', lang)])
elif 'display' in ch:
res.append([idx, ['display', ch.firstval('display')]])
Translation(m.getref(), res, root.label, [lang, lang], context=con, mode='linear')
elif p.label == 'linear-text':
con = []
for ch in p.children:
if ch.label.isnumeric() or (ch.label[0] == '-' and ch.label[1:].isnumeric()):
if ch.val[0] == '/' and ch.val[-1] == '/':
con.append([int(ch.label), re.compile(ch.val[1:-1])])
else:
con.append([int(ch.label), ch.val])
Translation(m, p.val, root.label, [lang, lang], context=con, mode='linear-text')
else:
m.props[p.label] = p.val
for pos in root['altpos']:
p2 = m.props.copy()
for l in pos.children:
p2[l.label] = l.val
Morpheme(lang, pos.val, m.root, props=p2, isref=False)
def loadlang(lang):
"""Read a language file."""
things = ParseLine.fromfile(Globals.path + 'langs/%s/lang.txt' % lang)
ret = Language(lang)
loadlexicon(lang)
for th in things:
if th.label == 'syntax':
for ch in th.children:
if ch.label == 'start-with':
ret.syntaxstart = ch.val
elif ch.label == 'node-types':
for ty in ch.children:
vrs = [toobj(s, lang, ty.num) for s in ty.child_vals('variable')]
if not list(ty['option']):
ty.children = [ParseLine(-1, 'option', [], '', ty.children)]
conds = []
ops = []
require = []
for op in ty['option']:
if 'xbar' in op:
line = op.first('xbar')
nodes = line.vals
if len(nodes) != 4:
ParseError('Wrong number of nodes given to xbar on line %s, expected 4, got %s' % (line.num, len(nodes)))
xargs = []
for s, arg in zip(nodes, ['spec', 'mod', 'head', 'comp']):
if s[0] == '$' or s == '~':
xargs.append(s)
else:
xargs.append('$%s:%s'%(arg,s))
node = toobj('|[%s %s]' % (ty.label, ' '.join(xargs)), lang, line.num)
else:
st = op.first('structure')
node = toobj(st.val, lang, st.num)
conds.append([toobj(x, lang, op.num) for x in op.args])
ops.append(node)
req = []
for r in op['require']:
req.append(r.val)
require.append(req)
ret.syntax[ty.label] = SyntaxPat(ty.label, conds, ops, vrs, require)
if th.label == 'transform':
for ch in th.children:
if ch.label == 'rotate':
ret.rotate.append(ch.val)
else:
readrule(ch, lang, lang, 'syntax', '', 0)
if th.label == 'metadata':
if 'creator' in th:
ret.creator = th.firstval('creator')
if 'name' in th:
for ch in th.first('name').children:
if ch.label == 'local':
ret.name = ch.val
ret.names[lang] = ch.val
else:
ret.names[int(ch.label)] = ch.val
if th.label == 'lexc':
ret.morph_mode = th.val
for ch in th.children:
if ch.label == 'split-root':
ret.tags_rootsplit = ch.val
continue
elif ch.label == 'capitalize-first-letter':
ret.capitalize = True
continue
cases = []
if 'lexicon' not in ch:
cases = [ch]
else:
cases = ch.first('lexicon').children
for cs in cases:
ap = {'ntype': ch.label, 'conds': condlist(cs)}
if 'bland' in cs:
ap['lexicon-in'] = ch.label + 'Root'
ap['lexicon-to'] = ch.label + 'Infl'
ap['bland'] = cs.firstval('bland')
ch.children.append(ParseLine(-1, 'format', '', '{root[0]}'+ap['bland'], []))
else:
ap['lexicon-in'] = cs.firstval('in')
ap['lexicon-to'] = cs.firstval('to')
ap['bland'] = False
if 'regex-match' in cs:
ap['regex'] = [cs.firstval('regex-match'), cs.firstval('regex-replace')]
ret.lexc_lexicons.append(ap)
tags = {}
defaults = {}
ls = ch.first('tags').children if 'tags' in ch else []
for tg in ls:
if tg.val:
tags[tg.label] = tg.val
else:
tags[tg.label] = []
for cs in tg['case']:
tags[tg.label].append({'conds': condlist(cs), 'tag': cs.val})
defaults[tg.label] = tg.firstval('default')
if defaults[tg.label] == '_':
defaults[tg.label] = ''
ret.tags.append({'format': ch.firstval('format'), 'tags': tags, 'ntype': ch.label, 'conds': condlist(ch), 'defaults': defaults})
return ret
def loadtrans(lfrom, lto):
"""Read a translation file."""
fname = Globals.path + 'langs/%s/translate/%s.txt' % (lfrom, lto)
ret = LangLink(lfrom, lto)
if isfile(fname):
trans = ParseLine.fromfile(fname)
if trans and trans[0].label != 'stage':
trans = [ParseLine(-1, 'stage', [], '', trans)]
for i, stage in enumerate(trans):
for lex in stage.children:
if lex.label in ['rule', 'multirule']:
readrule(lex, lfrom, lto, 'lex', '', i)
else:
m = toobj(lex.label, lfrom, lex.num)
if lex.val:
for g in lex.vals:
d = toobj(g, lto, lex.num)
Translation(m, [d], category=m.root, langs=[lfrom, lto], mode='lex', stage=i)
for tr in lex.children:
readrule(tr, lfrom, lto, 'lex', m.root, i)
return ret
def loadlangset(langs):
"""Given a set of languages, load them and all associated translation files."""
loaded = []
for l in langs:
if l not in loaded and l != 0:
loadlang(l)
loaded.append(l)
for lf in loaded:
for lt in loaded:
loadtrans(lf, lt)
def addmissing():
"""Add entries for everything in missing_morphemes.txt to the relevant
lexicon files.
"""
f = open('missing_morphemes.txt')
lns = list(set(f.readlines()))
lns.sort()
lang = ''
for _line in lns:
line = _line.strip()
if not line: continue
s = line.split()
l = s[0][:-1]
p,r = s[1].split('=')
if l != lang:
f.close()
f = open(Globals.path + 'langs/%s/lexicon.txt' % l, 'a')
f.write('\n\n#Generated from missing_morphemes.txt\n')
lang = l
print('Writing to langs/%s/lexicon.txt' % l)
f.write('%s (%s)\n' % (r,p))
f.close()
f = open('missing_morphemes.txt', 'w')
f.write('\n')
f.close()
def filltrans(lfrom, lto):
"""Generate empty translation rules for any words in source language
which do not have translation rules to the target language.
"""
Language.getormake(lfrom)
Language.getormake(lto)
LangLink.getormake(lfrom, lto)
fname = Globals.path + 'langs/%s/translate/%s.txt' % (lfrom, lto)
have = []
out = '#Automatically generated from langs/%s/lexicon.txt\n' % lfrom
joinby = '\n'
if isfile(fname):
pl = ParseLine.fromfile(fname)
for l in pl:
if l.label == 'stage':
have += [x.label for x in l.children]
else:
have.append(l.label)
out = '\n\n' + out + 'stage\n '
joinby += ' '
morphdict = Morpheme.itermorph(lfrom)
foundany = False
for pos in sorted(morphdict.keys()):
for root in sorted(morphdict[pos].keys()):
s = pos + '=' + root
if s not in have:
out += s + ': ~' + joinby
foundany = True
if foundany:
f = open(fname, 'a')
f.write(out)
f.close()
class Sentence:
def __init__(self, lang, name, trees, gloss):
self.lang = lang
self.name = name
self.trees = trees
self.gloss = gloss
def fromparseline(pl, lang):
trees = {'':None}
if pl.val:
trees[''] = toobj(pl.val, lang, pl.num)
for l in pl.children:
if l.label != 'gloss':
trees[l.label] = toobj(l.val, lang, l.num)
g = pl.first('gloss')
return Sentence(lang, pl.label, trees, g.val if g else '')
def toparseline(self):
ret = ParseLine(0, self.name, [], None, [])
if self.gloss:
ret.children.append(ParseLine(0, 'gloss', [], self.gloss, []))
for k in sorted(self.trees.keys()):
if not k:
ret.val = self.trees[k].writecompile()
else:
ret.children.append(ParseLine(0, k, [], self.trees[k].writecompile(), []))
return ret
def translate(self, tlang):
ret = Sentence(self.lang, self.name, {}, self.gloss if Globals.keepmeta else '')
if not self.trees:
return ret
tr = LangLink.getormake(self.lang, tlang)
for k in self.trees:
if not self.trees[k]:
continue
#if a sentence doesn't have a tree it will show up as None
for i, s in enumerate(tr.translate(self.trees[k])):
if Globals.partial or s.alllang(tlang):
ret.trees[k+'-'+str(i) if k else str(i)] = s
return ret
def totext(self):
lang = Language.getormake(self.lang)
for k in sorted(self.trees.keys()):
#this should default to tree ''
if self.trees[k]:
return lang.totext(self.trees[k])
return ''
def graph(self):
for k in sorted(self.trees.keys()):
self.trees[k].flatten()
f = open(Globals.path + 'test/%s-%s.dot' % (self.name, k), 'w')
f.write(self.trees[k].graph('n', True))
f.close()
yield '<h3>%s</h3>' % (k or '(default)'), '%s-%s.dot' % (self.name, k)
def readfile(fname):
"""Read in a .pdtxt file, return the Language and a list of Sentences."""
pl = ParseLine.fromfile(fname)
lang = int(pl[0].firstval('lang'))
Language.getormake(lang)
return lang, [Sentence.fromparseline(l, lang) for l in pl[1:]]
def graphtext(infile, outfile):
"""Use GraphViz to generate a set of images for the trees of a document."""
gls = []
f = open(outfile, 'w')
f.write('<html><head></head><body>\n')
for s in readfile(infile)[1]:
f.write('<h1>%s</h1>\n' % s.name)
for h3, fn in s.graph():
f.write('%s<img src="%s.svg"></img>\n' % (h3, fn))
gls.append('test/' + fn)
f.write('</body></html>')
f.close()
run('dot', '-Tsvg', '-O', *gls)
def translatefile(infile, outfile, tlang):
"""Read in a .pdtxt file, translate it, and write it out to another file."""
pl = ParseLine.fromfile(infile)
flang = int(pl[0].firstval('lang'))
if isinstance(outfile, str):
f = open(outfile, 'w')
else:
f = outfile
if Globals.keepmeta:
meta = pl[0]
for x in meta.children:
if x.label == 'lang':
x.vals = [str(tlang)]
else:
meta = ParseLine(0, 'metadata', children=[ParseLine(1, 'lang', val=str(tlang))])
f.write(meta.tofilestr(0))
for l in pl[1:]:
f.write(Sentence.fromparseline(l, flang).translate(tlang).toparseline().tofilestr(0))
if isinstance(outfile, str):
f.close()
class GeneratorError(Exception):
pass
def gen(pats, tree, depth, setvars):
"""Generate a random sentence."""
if isinstance(tree, Node):
r = copy.copy(tree)
rc = []
for c in copy.deepcopy(r.children):
rc.append(gen(pats, c, depth+1, setvars))
r.children = rc
return r
elif isinstance(tree, list):
return random.choice(tree)
elif isinstance(tree, Variable):
if not tree.opt or random.randint(1,100) < 10/depth:
if tree.label in setvars:
return setvars[tree.label]
else:
newtree = pats[tree.ntype]
if isinstance(newtree, list):
newtree = random.choice(newtree)
return gen(pats, newtree, depth+1, setvars)
elif isinstance(tree, SyntaxPat):
vrs = {}
for v in tree.vrs:
vrs[v.label] = gen(pats, v, depth, {})
il = []
for i, cl in enumerate(tree.conds):
for c in cl:
if not c.checkset(vrs):
break
else:
il.append(i)
if not il:
raise GeneratorError("None of the conditions for generation rule '%s' could be satisfied." % tree.name)
return gen(pats, tree.opts[random.choice(il)], depth, vrs)
else:
return tree
def make(lang):
"""Generate a random sentence. Wrapper for gen()"""
p = lang.getpats()
return gen(p, p[lang.syntaxstart], 1, {})
class LimitList:
"""List wrapper for tracking which Morphemes in it are in use"""
def __init__(self, few, many):
self.few = few
self.many = many
def each(self):
for i, x in enumerate(self.few):
yield x, LimitList(self.few[:i]+self.few[i+1:], self.many)
for x in self.many:
yield x, self
def __len__(self):
return len(self.few)+len(self.many)
def __str__(self):
return str(self.few + self.many)
def makeall(words):
"""Generate all possible trees containing a particular set of Morphemes."""
if not words:
return []
lang = Language.getormake(words[0].lang)
pats = lang.getpats()
for k in pats:
if isinstance(pats[k], list):
many = [x for x in pats[k] if 'audible' in x and x['audible'] == 'false']
few = [x for x in words if x.ntype == k]
pats[k] = LimitList(few, many)
def product(ls):
if len(ls) == 0:
yield ()
else:
for x in genall(*ls[0]):
for y in product(ls[1:]):
yield (x,) + y
def genall(tree, setvars):
nonlocal pats
if isinstance(tree, Node):
for ch in product([[c, setvars] for c in tree.children]):
yield tree.swapchildren(ch)
elif isinstance(tree, list):
yield from tree
elif isinstance(tree, Variable):
if tree.label in setvars:
yield setvars[tree.label]
elif isinstance(pats[tree.ntype], LimitList):
old = pats[tree.ntype]
for r, l in old.each():
pats[tree.ntype] = l
yield r
pats[tree.ntype] = old
else:
yield from genall(pats[tree.ntype], setvars)
if tree.opt:
yield None
elif isinstance(tree, SyntaxPat):
idx = []
for i, req in enumerate(tree.require):
if all(len(pats[x]) > 0 for x in req):
idx.append(i)
if idx:
labs = [v.label for v in tree.vrs]
for vrs in product([[v, {}] for v in tree.vrs]):
dct = dict(zip(labs, vrs))
for i in idx:
if all(c.checkset(dct) for c in tree.conds[i]):
for x in genall(tree.opts[i], dct):
yield x
else:
yield tree
return genall(pats[lang.syntaxstart], {})
def parse(lang, num, text):
"""Attempt to a parse a sentence by generating possible corresponding trees."""
ret = Sentence(lang, str(num), {}, text)
tags = transduce(text, lang, False)
w = []
for m in Target.iterlex():
r = re.compile(m.tagify(True))
for t in tags:
if r.search(t):
w.append(m)
ln = Language.getormake(lang)
n = 0
for x in makeall(w):
if ln.totext(x) == text:
n += 1
ret.trees[str(n)] = x
return ret
def trans(sen, flang, tlang):
"""Translate a sentence."""
tr = LangLink.getormake(flang, tlang).translate(sen)
ret = []
for s in tr:
if Globals.partial or s.alllang(tlang):
ret.append(s)
return ret
if __name__ == '__main__':
import argparse, sys
parser = argparse.ArgumentParser(description='Generate, translate, and parse sentences.')
def writelines(lines, where):
j = '\n'*Globals.spacing or ' '
if where:
f = open(where[0], 'w')
if Globals.blob:
f.write(j.join(lines) + '\n')
else:
for l in lines:
f.write(l + j)
f.close()
elif Globals.blob:
print(j.join(lines))
else:
for l in lines:
print(l, end=j)
def readline(lang, src):
if src.isnumeric():
pass
STDINLINE = 1
class TranslateAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if len(values) == 0:
print('Translate single tree: -t L1 [SRC] L2 [DEST]\n' + \
' read a tree from SRC (leave blank for stdin)\n' + \
' translate from L1 to L2\n' + \
' output to DEST (leave blank for stdout)\n' + \
'Translate .pdtxt file: -t SRC LANG [DEST]\n' + \
' translate contents of file SRC to LANG\n' + \
' output to DEST (leave blank for stdout)')
elif values[0].isnumeric():
flang = int(values.pop(0))
if values[0].isnumeric():
line = sys.stdin.readline()
global STDINLINE
where = 'standard input line %s' % STDINLINE
STDINLINE += 1
else:
where = values.pop(0)
f = open(where)
where += ' line 1'
line = f.readline()
f.close()
tree = toobj(line, flang, where)
tr = trans(tree, int(values.pop(0)))
writelines((t.writecompile() for t in tr), values)
else:
if len(values) >= 3:
translatefile(values[0], values[2], int(values[1]))
else:
translatefile(values[0], sys.stdout, int(values[1]))
class GenerateAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
lang = Language.getormake(int(values.pop(0)))
sen = make(lang)
writelines([sen.writecompile()], values)
class ParseAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values[0].isnumeric():
lines = [sys.stdin.readline()]
else:
f = open(values.pop(0))
lines = f.readlines()
f.close()
lang = int(values.pop(0))
if values:
out = open(values[0], 'w')
else:
out = sys.stdout
for i, t in enumerate(lines):
l = t.strip()
if l:
out.write(parse(lang, i+1, l).toparseline().tofilestr(0))
if values:
out.close()
class DisplayAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values[0].isnumeric() or (len(values) > 1 and values[1].isnumeric()):
if values[0].isnumeric():
line = sys.stdin.readline()
global STDINLINE
where = 'standard input line %s' % STDINLINE
STDINLINE += 1
else:
where = values.pop(0)
f = open(where)
where += ' line 1'
line = f.readline()
f.close()
lang = int(values.pop(0))
if values:
f = open(values[0], 'w')
else:
f = sys.stdout
txt = Language.getormake(lang).totext(toobj(line, lang, where))
f.write(txt + '\n')
if values:
f.close()
else:
lines = readfile(values.pop(0))[1]
writelines((l.totext() for l in lines), values)
class BlankAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values:
filltrans(int(values[0]), int(values[1]))
else:
addmissing()
class SetGlobal(argparse.Action):
def __init__(self, *args, **kwargs):
self.todo = kwargs['todo']
del kwargs['todo']
kwargs['nargs'] = 0
argparse.Action.__init__(self, *args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
global Globals
Globals.__dict__[self.todo[0]] = self.todo[1]
parser.add_argument('-t', '--translate', type=str, nargs='*', action=TranslateAction, metavar='ARG', help="Translate trees (run 'doodle.py -t' for detailed help)")
parser.add_argument('-g', '--generate', type=str, nargs='+', action=GenerateAction, metavar=('LANG', 'DEST'), help='Randomly generate a tree in LANG and output to DEST or stdout')
parser.add_argument('-p', '--parse', type=str, nargs='+', action=ParseAction, metavar=('[SRC] LANG', 'DEST'), help='Attempt to parse SRC or next line of std into trees in LANG, output to DEST or stdout')
parser.add_argument('-d', '--display', type=str, nargs='+', action=DisplayAction, metavar=('SRC [LANG]', 'DEST'), help='Get trees from SRC or stdin, convert to text and output to DEST or stdout')
parser.add_argument('-F', '--flatten', action=SetGlobal, todo=('flat', True), help='Start flattening phrases into single nodes')
parser.add_argument('-DF', '--dont-flatten', action=SetGlobal, todo=('flat', False), help='Stop flattening phrases')
parser.add_argument('-U', '--use-unknown', action=SetGlobal, todo=('unknown_error', False), help='Begin logging unknown morphemes to missing_morphemes.txt, don\'t error')
parser.add_argument('-am', '--add-missing', nargs=0, action=BlankAction, help='Append everything in missing_morphemes.txt to the relevant lexicon files')
parser.add_argument('-ft', '--fill-trans', nargs=2, action=BlankAction, metavar=('LANG1', 'LANG2'), help='Add blank entries in translation file from LANG1 to LANG2 for any morpheme not already listed')
args = parser.parse_args()
|
mit
| 781,176,970,670,820,000
| 39.28469
| 207
| 0.488751
| false
| 3.893117
| false
| false
| false
|
dmerejkowsky/qibuild
|
python/qitoolchain/feed.py
|
1
|
3967
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" Toolchain feeds
"""
import os
import sys
import hashlib
import urlparse
from xml.etree import ElementTree
from qisys import ui
import qisys
import qisys.archive
import qisys.remote
import qisys.version
import qibuild.config
import qitoolchain
def is_url(location):
""" Check that a given location is an URL """
return "://" in location
def raise_parse_error(package_tree, feed, message):
""" Raise a nice pasing error about the given
package_tree element.
"""
as_str = ElementTree.tostring(package_tree)
mess = "Error when parsing feed: '%s'\n" % feed
mess += "Could not parse:\t%s\n" % as_str
mess += message
raise Exception(mess)
def tree_from_feed(feed_location):
""" Returns an ElementTree object from an
feed location
"""
fp = None
tree = None
try:
if os.path.exists(feed_location):
fp = open(feed_location, "r")
else:
if is_url(feed_location):
fp = qisys.remote.open_remote_location(feed_location)
else:
raise Exception("Feed location is not an existing path nor an url")
tree = ElementTree.ElementTree()
tree.parse(fp)
except Exception:
ui.error("Could not parse", feed_location)
raise
finally:
if fp:
fp.close()
return tree
class ToolchainFeedParser:
""" A class to handle feed parsing
"""
def __init__(self):
self.packages = list()
# A dict name -> version used to only keep the latest
# version
self.blacklist = list()
self._versions = dict()
def get_packages(self):
""" Get the parsed packages """
res = [x for x in self.packages if not x.name in self.blacklist]
return res
def append_package(self, package_tree):
""" Add a package to self.packages.
If an older version of the package exists,
replace by the new version
"""
version = package_tree.get("version")
name = package_tree.get("name")
names = self._versions.keys()
if name not in names:
self._versions[name] = version
self.packages.append(qitoolchain.qipackage.from_xml(package_tree))
else:
if version is None:
# if version not defined, don't keep it
return
prev_version = self._versions[name]
if prev_version and qisys.version.compare(prev_version, version) > 0:
return
else:
self.packages = [x for x in self.packages if x.name != name]
self.packages.append(qitoolchain.qipackage.from_xml(package_tree))
self._versions[name] = version
def parse(self, feed):
""" Recursively parse the feed, filling the self.packages
"""
tree = tree_from_feed(feed)
package_trees = tree.findall("package")
package_trees.extend(tree.findall("svn_package"))
for package_tree in package_trees:
package_tree.set("feed", feed)
self.append_package(package_tree)
feeds = tree.findall("feed")
for feed_tree in feeds:
feed_url = feed_tree.get("url")
if feed_url:
# feed_url can be relative to feed:
if not "://" in feed_url:
feed_url = urlparse.urljoin(feed, feed_url)
self.parse(feed_url)
select_tree = tree.find("select")
if select_tree is not None:
blacklist_trees = select_tree.findall("blacklist")
for blacklist_tree in blacklist_trees:
name = blacklist_tree.get("name")
if name:
self.blacklist.append(name)
|
bsd-3-clause
| -1,921,144,499,196,732,200
| 29.05303
| 83
| 0.586589
| false
| 4.115145
| false
| false
| false
|
onyedikilo/tacotron
|
prepro.py
|
1
|
1722
|
# -*- coding: utf-8 -*-
#/usr/bin/python2
'''
By kyubyong park. kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/tacotron
'''
import numpy as np
import librosa
from hyperparams import Hyperparams as hp
import glob
import re
import os
import csv
import codecs
def load_vocab():
vocab = "E abcdefghijklmnopqrstuvwxyz'" # E: Empty
char2idx = {char:idx for idx, char in enumerate(vocab)}
idx2char = {idx:char for idx, char in enumerate(vocab)}
return char2idx, idx2char
def create_train_data():
# Load vocabulary
char2idx, idx2char = load_vocab()
texts, sound_files = [], []
reader = csv.reader(codecs.open(hp.text_file, 'rb', 'utf-8'))
for row in reader:
sound_fname, text, duration = row
sound_file = hp.sound_fpath + "/" + sound_fname + ".wav"
text = re.sub(r"[^ a-z']", "", text.strip().lower())
if (len(text) <= hp.max_len) and (1. < float(duration) <= hp.max_duration):
texts.append(np.array([char2idx[char] for char in text], np.int32).tostring())
sound_files.append(sound_file)
return texts, sound_files
def load_train_data():
"""We train on the whole data but the last mini-batch."""
texts, sound_files = create_train_data()
return texts[:-hp.batch_size], sound_files[:-hp.batch_size]
def load_eval_data():
"""We evaluate on the last mini-batch."""
texts, _ = create_train_data()
texts = texts[-hp.batch_size:]
X = np.zeros(shape=[hp.batch_size, hp.max_len], dtype=np.int32)
for i, text in enumerate(texts):
_text = np.fromstring(text, np.int32) # byte to int
X[i, :len(_text)] = _text
return X
|
apache-2.0
| -1,971,481,082,590,631,700
| 28.689655
| 90
| 0.613821
| false
| 3.230769
| false
| false
| false
|
hroyrh/svt
|
applications_scalability/websockets_perf/test_scripts/v_user.py
|
1
|
2067
|
from websocket import create_connection
from ConfigParser import SafeConfigParser
import ssl
import gevent
import time
import json
class Transaction(object):
def __init__(self, varfile='ose_vars.cfg'):
"""
Gets instantiated once only
"""
parser = SafeConfigParser()
parser.read(varfile)
self.ose_server = parser.get('wss', 'ose_server')
self.ose_project = parser.get('wss', 'ose_project')
self.ose_resver = parser.get('wss', 'ose_resver')
self.ose_token = parser.get('wss', 'ose_token')
self.custom_timers = {}
def run(self):
"""
Each thread runs this method independently
"""
url = 'wss://{}/api/v1/namespaces/{}/events?watch={}&resourceVersion={}&access_token={}'.format(self.ose_server,
self.ose_project,
'true',
self.ose_resver,
self.ose_token)
start = time.time()
# Ignore self signed certificates
ws = create_connection(url, sslopt={"cert_reqs": ssl.CERT_NONE})
self.ws = ws
def _receive():
while True:
res = ws.recv()
start_at = time.time()
data = json.loads(res)
print(res, data)
end_at = time.time()
response_time = int((end_at - start_at))
gevent.spawn(_receive)
def on_quit(self):
self.ws.close()
if __name__ == '__main__':
trans = Transaction()
trans.run()
|
apache-2.0
| -8,251,251,100,229,687,000
| 34.637931
| 159
| 0.400581
| false
| 5.259542
| false
| false
| false
|
ztane/jaspyx
|
jaspyx/visitor/function.py
|
1
|
2365
|
from __future__ import absolute_import, division, print_function
import ast
from jaspyx.ast_util import ast_call, ast_load
from jaspyx.context.function import FunctionContext
from jaspyx.visitor import BaseVisitor
from jaspyx.compat import get_arg_id
class Function(BaseVisitor):
def visit_FunctionDef(self, node):
if node.name:
self.stack[-1].scope.declare(node.name)
args = [get_arg_id(arg) for arg in node.args.args]
if node.args.kwarg is not None:
raise Exception('**kwargs not supported')
func = FunctionContext(self.stack[-1], args)
self.push(func)
# Emit vararg
if node.args.vararg is not None:
self.visit(
ast.Assign(
[ast.Name(node.args.vararg, ast.Store())],
ast_call(
ast_load('Array.prototype.slice.call'),
ast.Name('arguments', ast.Load()),
ast.Num(len(args)),
)
)
)
# Emit default arguments
def_args = node.args.defaults
for arg_name, arg_val in zip(args[-len(def_args):], def_args):
self.block([
ast.If(
ast.Compare(
ast_call(
ast.Name('type', ast.Load()),
ast.Name(arg_name, ast.Load()),
),
[ast.Eq(), ],
[ast.Str('undefined'), ],
),
[
ast.Assign(
[ast.Name(arg_name, ast.Store())],
arg_val
),
],
[],
)
])
# Emit function body
self.block(node.body)
body = ast_call(
ast_load('JS'),
ast.Str(str(self.stack.pop())),
)
for decorator in node.decorator_list:
body = ast_call(
decorator,
body
)
if not node.name:
self.visit(body)
else:
self.visit(
ast.Assign(
[ast_load(node.name)],
body,
)
)
|
mit
| -2,808,383,720,302,337,500
| 28.936709
| 70
| 0.421564
| false
| 4.601167
| false
| false
| false
|
jacobian/valor
|
valor/link.py
|
1
|
6044
|
import re
import six
import json
import requests
from .model import model_factory
from .utils import is_ref, python_attr
PARAMETER_REGEX = re.compile(r'\{\([%\/a-zA-Z0-9_-]*\)\}')
class Link(object):
def __init__(self, schema, session, url, link_schema):
self._schema = schema
self._session = session
self._url = url
self._link = link_schema
self._name = python_attr(link_schema['title'])
def __call__(self, *args, **kwargs):
# Prepare a request object. We do this instead of using
# session.request() so that we can re-use the prepared request further
# down if the response is paginated.
request = requests.Request(
method = self._link['method'],
url = self.interpolate_args(args),
data = self.construct_body(kwargs)
)
request = self._session.prepare_request(request)
# FIXME: verify SSL - don't want to just to verify=True because that
# makes testing hard, but it should be true by default and overridable
# by passing in a different session. Not sure how to make that work
# though.
response = self._session.send(request)
# FIXME: are we 100% sure the response is always JSON?
response_body = response.json()
# Handle 206 (partial conteent) by paginating.
# See https://devcenter.heroku.com/articles/platform-api-reference#ranges
if response.status_code == 206:
next_range = response.headers['Next-Range']
while next_range:
request.headers['range'] = next_range
response = self._session.send(request)
response_body.extend(response.json())
next_range = response.headers.get('Next-Range', None)
# FIXME: if-none-match???
elif response.status_code not in (200, 201, 202):
response.raise_for_status()
# targetSchema is the schema for the object(s) returned by the API call.
# It can either be an array, in which case the schema is actually
# link.targetSchema.items, or it can be a dict in which case the
# targetSchema itself is the schema.
model_schema = self._link['targetSchema']
if model_schema.get('type') == ['array']:
target_type = 'multi'
model_schema = model_schema['items']
else:
target_type = 'single'
# If the target schema was a ref, resolve it.
if is_ref(model_schema):
model_schema = self._schema.resolve_ref(model_schema['$ref'])
# If the target schema has patternProperties, the response is a plain
# old dict, so just return that. I'm not sure if this is the right way
# of handling this; we may want Model to understand patternProperties
# instead.
if 'patternProperties' in model_schema:
return response_body
# Create a Model subclass representing the expected return object.
# FIXME: this feels super jank for a name, but is there a better way?
name = model_schema['title'].split('-', 1)[-1]
name = re.sub(r'[^\w]', '', name)
# Python 3 excepts text class names; Python 2 expects bytes. No way to
# to work around it without version checkking.
if six.PY2:
name = name.encode('ascii', 'ignore')
cls = model_factory(name, self._schema, model_schema)
if target_type == 'multi':
return [cls(**i) for i in response_body]
else:
return cls(**response_body)
def interpolate_args(self, args):
"""
Interpolate arguments into the link's URL.
"""
# This doesn't really validate the definition refs embedded in the URL
# patterns, but in practice that doesn't seem to matter much.
num_expected_args = len(PARAMETER_REGEX.findall(self._url))
if num_expected_args != len(args):
raise TypeError("%s() takes exactly %s arguments (%s given)" % (self._name, num_expected_args, len(args)))
# I can't figure out how to get the match number in a re.sub() callback,
# so sub one at a time. This feels inelegant, but I can't find a better
# option, so (shrug).
url = self._url
for i, arg in enumerate(args):
url = PARAMETER_REGEX.sub(format_path_parameter(arg), url, count=1)
return url
def construct_body(self, kwargs):
"""
Construct a request body based on given arguments.
"""
# This does do some light validation on the *keys* of the body params,
# but doesn't validate the contents of the body. I'm not sure if this
# will prove to matter in practice or not.
if 'schema' not in self._link:
if kwargs:
raise TypeError("%s() got unexpected keyword arguments: %s" % (self._name, kwargs.keys()))
return None
# If we've got patternProperties, then this API takes arbitrary params,
# so just punt on any sort of validation.
if 'patternProperties' in self._link['schema']:
return json.dumps(kwargs)
given_keys = set(kwargs.keys())
possible_keys = set(self._link['schema']['properties'].keys())
required_keys = set(self._link['schema'].get('required', []))
if required_keys - given_keys:
raise TypeError("%s() missing required arguments: %s")
if given_keys - possible_keys:
raise TypeError("%s() got unepected keyword arguments: %s" % (self._name, list(given_keys - possible_keys)))
# Is that really all?
return json.dumps(kwargs)
def format_path_parameter(val):
"""
Format a path paramater.
Basically: convert to string, with a special rule for datetime objects.
"""
if hasattr(val, 'identity'):
val = val.identity()
if hasattr(val, 'strftime'):
val = val.strftime('%Y-%m-%dT%H:%M:%SZ')
return six.text_type(val)
|
bsd-3-clause
| 8,497,193,895,521,270,000
| 39.02649
| 120
| 0.604732
| false
| 4.139726
| false
| false
| false
|
kevinpetersavage/BOUT-dev
|
examples/MMS/GBS/circle.py
|
3
|
3721
|
# Generates an input mesh for circular, large aspect-ratio
# simulations:
#
# o Constant magnetic field
# o Curvature output as a 3D logB variable
# o Z is poloidal direction
# o Y is parallel (toroidal)
#
# NOTE: This reverses the standard BOUT/BOUT++ convention
# so here Bt and Bp are reversed
#
from __future__ import division
from __future__ import print_function
from builtins import range
from numpy import zeros, ndarray, pi, cos, sin, outer, linspace,sqrt
from boututils import DataFile # Wrapper around NetCDF4 libraries
def generate(nx, ny,
R = 2.0, r=0.2, # Major & minor radius
dr=0.05, # Radial width of domain
Bt=1.0, # Toroidal magnetic field
q=5.0, # Safety factor
mxg=2,
file="circle.nc"
):
# q = rBt / RBp
Bp = r*Bt / (R*q)
# Minor radius as function of x. Choose so boundary
# is half-way between grid points
h = dr / (nx - 2.*mxg) # Grid spacing in r
rminor = linspace(r - 0.5*dr - (mxg-0.5)*h,
r + 0.5*dr + (mxg-0.5)*h,
nx)
# mesh spacing in x and y
dx = ndarray([nx,ny])
dx[:,:] = r*Bt*h # NOTE: dx is toroidal flux
dy = ndarray([nx,ny])
dy[:,:] = 2.*pi / ny
# LogB = log(1/(1+r/R cos(theta))) =(approx) -(r/R)*cos(theta)
logB = zeros([nx, ny, 3]) # (constant, n=1 real, n=1 imag)
# At y = 0, Rmaj = R + r*cos(theta)
logB[:,0,1] = -(rminor/R)
# Moving in y, phase shift by (toroidal angle) / q
for y in range(1,ny):
dtheta = y * 2.*pi / ny / q # Change in poloidal angle
logB[:,y,1] = -(rminor/R)*cos(dtheta)
logB[:,y,2] = -(rminor/R)*sin(dtheta)
# Shift angle from one end of y to the other
ShiftAngle = ndarray([nx])
ShiftAngle[:] = 2.*pi / q
Rxy = ndarray([nx,ny])
Rxy[:,:] = r # NOTE : opposite to standard BOUT convention
Btxy = ndarray([nx,ny])
Btxy[:,:] = Bp
Bpxy = ndarray([nx,ny])
Bpxy[:,:] = Bt
Bxy = ndarray([nx,ny])
Bxy[:,:] = sqrt(Bt**2 + Bp**2)
hthe = ndarray([nx,ny])
hthe[:,:] = R
print("Writing to file '"+file+"'")
f = DataFile()
f.open(file, create=True)
# Mesh size
f.write("nx", nx)
f.write("ny", ny)
# Mesh spacing
f.write("dx", dx)
f.write("dy", dy)
# Metric components
f.write("Rxy", Rxy)
f.write("Btxy", Btxy)
f.write("Bpxy", Bpxy)
f.write("Bxy", Bxy)
f.write("hthe", hthe)
# Shift
f.write("ShiftAngle", ShiftAngle);
# Curvature
f.write("logB", logB)
# Input parameters
f.write("R", R)
f.write("r", r)
f.write("dr", dr)
f.write("Bt", Bt)
f.write("q", q)
f.write("mxg", mxg)
f.close()
def coordinates(nx, ny, nz,
R = 2.0, r=0.2, # Major & minor radius
dr=0.05, # Radial width of domain
Bt=1.0, # Toroidal magnetic field
q=5.0, # Safety factor
mxg=2
):
"""
Returns coordinates (R,Z) as a pair of arrays
"""
h = dr / (nx - 2.*mxg) # Grid spacing in r
rminor = linspace(r - 0.5*dr - (mxg-0.5)*h,
r + 0.5*dr + (mxg-0.5)*h,
nx)
print("Grid spacing: Lx = %e, Lz = %e" % (h, 2.*pi*r/nz))
Rxyz = ndarray([nx, ny, nz])
Zxyz = ndarray([nx, ny, nz])
for y in range(0,ny):
dtheta = y * 2.*pi / ny / q # Change in poloidal angle
theta = linspace(0,2.*pi, nz, endpoint=False) + dtheta
Rxyz[:,y,:] = R + outer(rminor, cos(theta))
Zxyz[:,y,:] = outer(rminor, sin(theta))
return Rxyz, Zxyz
|
gpl-3.0
| -8,111,180,562,648,416,000
| 24.312925
| 68
| 0.515453
| false
| 2.916144
| false
| false
| false
|
gurneyalex/odoo
|
addons/l10n_it_edi/models/ir_mail_server.py
|
4
|
18207
|
# -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import zipfile
import io
import re
import logging
import email
import dateutil
import pytz
import base64
try:
from xmlrpc import client as xmlrpclib
except ImportError:
import xmlrpclib
from lxml import etree
from datetime import datetime
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError, UserError
_logger = logging.getLogger(__name__)
class FetchmailServer(models.Model):
_name = 'fetchmail.server'
_inherit = 'fetchmail.server'
l10n_it_is_pec = fields.Boolean('PEC server', help="If PEC Server, only mail from '...@pec.fatturapa.it' will be processed.")
l10n_it_last_uid = fields.Integer(string='Last message UID', default=1)
@api.constrains('l10n_it_is_pec', 'server_type')
def _check_pec(self):
for record in self:
if record.l10n_it_is_pec and record.server_type != 'imap':
raise ValidationError(_("PEC mail server must be of type IMAP."))
def fetch_mail(self):
""" WARNING: meant for cron usage only - will commit() after each email! """
MailThread = self.env['mail.thread']
for server in self.filtered(lambda s: s.l10n_it_is_pec):
_logger.info('start checking for new emails on %s PEC server %s', server.server_type, server.name)
count, failed = 0, 0
imap_server = None
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.uid('search', None, '(FROM "@pec.fatturapa.it")', '(UID %s:*)' % (server.l10n_it_last_uid))
new_max_uid = server.l10n_it_last_uid
for uid in data[0].split():
if int(uid) <= server.l10n_it_last_uid:
# We get always minimum 1 message. If no new message, we receive the newest already managed.
continue
result, data = imap_server.uid('fetch', uid, '(RFC822)')
if not data[0]:
continue
message = data[0][1]
# To leave the mail in the state in which they were.
if "Seen" not in data[1].decode("utf-8"):
imap_server.uid('STORE', uid, '+FLAGS', '\\Seen')
else:
imap_server.uid('STORE', uid, '-FLAGS', '\\Seen')
# See details in message_process() in mail_thread.py
if isinstance(message, xmlrpclib.Binary):
message = bytes(message.data)
if isinstance(message, str):
message = message.encode('utf-8')
msg_txt = email.message_from_bytes(message)
try:
self._attachment_invoice(msg_txt)
new_max_uid = max(new_max_uid, int(uid))
except Exception:
_logger.info('Failed to process mail from %s server %s.', server.server_type, server.name, exc_info=True)
failed += 1
self._cr.commit()
count += 1
server.write({'l10n_it_last_uid': new_max_uid})
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.server_type, server.name, (count - failed), failed)
except Exception:
_logger.info("General failure when trying to fetch mail from %s server %s.", server.server_type, server.name, exc_info=True)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
server.write({'date': fields.Datetime.now()})
return super(FetchmailServer, self.filtered(lambda s: not s.l10n_it_is_pec)).fetch_mail()
def _attachment_invoice(self, msg_txt):
parsed_values = self.env['mail.thread']._message_parse_extract_payload(msg_txt)
body, attachments = parsed_values['body'], parsed_values['attachments']
from_address = tools.decode_smtp_header(msg_txt.get('from'))
for attachment in attachments:
split_attachment = attachment.fname.rpartition('.')
if len(split_attachment) < 3:
_logger.info('E-invoice filename not compliant: %s', attachment.fname)
continue
attachment_name = split_attachment[0]
attachment_ext = split_attachment[2]
split_underscore = attachment_name.rsplit('_', 2)
if len(split_underscore) < 2:
_logger.info('E-invoice filename not compliant: %s', attachment.fname)
continue
if attachment_ext != 'zip':
if split_underscore[1] in ['RC', 'NS', 'MC', 'MT', 'EC', 'SE', 'NE', 'DT']:
# we have a receipt
self._message_receipt_invoice(split_underscore[1], attachment)
elif re.search("([A-Z]{2}[A-Za-z0-9]{2,28}_[A-Za-z0-9]{0,5}.(xml.p7m|xml))", attachment.fname):
# we have a new E-invoice
self._create_invoice_from_mail(attachment.content, attachment.fname, from_address)
else:
if split_underscore[1] == 'AT':
# Attestazione di avvenuta trasmissione della fattura con impossibilità di recapito
self._message_AT_invoice(attachment)
else:
_logger.info('New E-invoice in zip file: %s', attachment.fname)
self._create_invoice_from_mail_with_zip(attachment, from_address)
def _create_invoice_from_mail(self, att_content, att_name, from_address):
if self.env['account.move'].search([('l10n_it_einvoice_name', '=', att_name)], limit=1):
# invoice already exist
_logger.info('E-invoice already exist: %s', att_name)
return
invoice_attachment = self.env['ir.attachment'].create({
'name': att_name,
'datas': base64.encodestring(att_content),
'type': 'binary',
})
try:
tree = etree.fromstring(att_content)
except Exception:
raise UserError(_('The xml file is badly formatted : {}').format(att_name))
invoice = self.env['account.move']._import_xml_invoice(tree)
invoice.l10n_it_send_state = "new"
invoice.source_email = from_address
self._cr.commit()
_logger.info('New E-invoice: %s', att_name)
def _create_invoice_from_mail_with_zip(self, attachment_zip, from_address):
with zipfile.ZipFile(io.BytesIO(attachment_zip.content)) as z:
for att_name in z.namelist():
if self.env['account.move'].search([('l10n_it_einvoice_name', '=', att_name)], limit=1):
# invoice already exist
_logger.info('E-invoice in zip file (%s) already exist: %s', attachment_zip.fname, att_name)
continue
att_content = z.open(att_name).read()
self._create_invoice_from_mail(att_content, att_name, from_address)
def _message_AT_invoice(self, attachment_zip):
with zipfile.ZipFile(io.BytesIO(attachment_zip.content)) as z:
for attachment_name in z.namelist():
split_name_attachment = attachment_name.rpartition('.')
if len(split_name_attachment) < 3:
continue
split_underscore = split_name_attachment[0].rsplit('_', 2)
if len(split_underscore) < 2:
continue
if split_underscore[1] == 'AT':
attachment = z.open(attachment_name).read()
_logger.info('New AT receipt for: %s', split_underscore[0])
try:
tree = etree.fromstring(attachment)
except:
_logger.info('Error in decoding new receipt file: %s', attachment_name)
return
elements = tree.xpath('//NomeFile')
if elements and elements[0].text:
filename = elements[0].text
else:
return
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename)])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', filename)
return
related_invoice.l10n_it_send_state = 'failed_delivery'
info = self._return_multi_line_xml(tree, ['//IdentificativoSdI', '//DataOraRicezione', '//MessageId', '//PecMessageId', '//Note'])
related_invoice.message_post(
body=(_("ES certify that it has received the invoice and that the file \
could not be delivered to the addressee. <br/>%s") % (info))
)
def _message_receipt_invoice(self, receipt_type, attachment):
try:
tree = etree.fromstring(attachment.content)
except:
_logger.info('Error in decoding new receipt file: %s', attachment.fname)
return {}
elements = tree.xpath('//NomeFile')
if elements and elements[0].text:
filename = elements[0].text
else:
return {}
if receipt_type == 'RC':
# Delivery receipt
# This is the receipt sent by the ES to the transmitting subject to communicate
# delivery of the file to the addressee
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'sent')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
related_invoice.l10n_it_send_state = 'delivered'
info = self._return_multi_line_xml(tree, ['//IdentificativoSdI', '//DataOraRicezione', '//DataOraConsegna', '//Note'])
related_invoice.message_post(
body=(_("E-Invoice is delivery to the destinatory:<br/>%s") % (info))
)
elif receipt_type == 'NS':
# Rejection notice
# This is the receipt sent by the ES to the transmitting subject if one or more of
# the checks carried out by the ES on the file received do not have a successful result.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'sent')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
related_invoice.l10n_it_send_state = 'invalid'
error = self._return_error_xml(tree)
related_invoice.message_post(
body=(_("Errors in the E-Invoice :<br/>%s") % (error))
)
activity_vals = {
'activity_type_id': self.env.ref('mail.mail_activity_data_todo').id,
'invoice_user_id': related_invoice.invoice_user_id.id if related_invoice.invoice_user_id else self.env.user.id
}
related_invoice.activity_schedule(summary='Rejection notice', **activity_vals)
elif receipt_type == 'MC':
# Failed delivery notice
# This is the receipt sent by the ES to the transmitting subject if the file is not
# delivered to the addressee.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'sent')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
info = self._return_multi_line_xml(tree, [
'//IdentificativoSdI',
'//DataOraRicezione',
'//Descrizione',
'//MessageId',
'//Note'])
related_invoice.message_post(
body=(_("The E-invoice is not delivered to the addressee. The Exchange System is\
unable to deliver the file to the Public Administration. The Exchange System will\
contact the PA to report the problem and request that they provide a solution. \
During the following 15 days, the Exchange System will try to forward the FatturaPA\
file to the Administration in question again. More informations:<br/>%s") % (info))
)
elif receipt_type == 'NE':
# Outcome notice
# This is the receipt sent by the ES to the invoice sender to communicate the result
# (acceptance or refusal of the invoice) of the checks carried out on the document by
# the addressee.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'delivered')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
elements = tree.xpath('//Esito')
if elements and elements[0].text:
if elements[0].text == 'EC01':
related_invoice.l10n_it_send_state = 'delivered_accepted'
elif elements[0].text == 'EC02':
related_invoice.l10n_it_send_state = 'delivered_refused'
info = self._return_multi_line_xml(tree,
['//Esito',
'//Descrizione',
'//IdentificativoSdI',
'//DataOraRicezione',
'//DataOraConsegna',
'//Note'
])
related_invoice.message_post(
body=(_("Outcome notice: %s<br/>%s") % (related_invoice.l10n_it_send_state, info))
)
if related_invoice.l10n_it_send_state == 'delivered_refused':
activity_vals = {
'activity_type_id': self.env.ref('mail.mail_activity_data_todo').id,
'invoice_user_id': related_invoice.invoice_user_id.id if related_invoice.invoice_user_id else self.env.user.id
}
related_invoice.activity_schedule(summary='Outcome notice: Refused', **activity_vals)
# elif receipt_type == 'MT':
# Metadata file
# This is the file sent by the ES to the addressee together with the invoice file,
# containing the main reference data of the file useful for processing, including
# the IdentificativoSDI.
# Useless for Odoo
elif receipt_type == 'DT':
# Deadline passed notice
# This is the receipt sent by the ES to both the invoice sender and the invoice
# addressee to communicate the expiry of the maximum term for communication of
# acceptance/refusal.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename), ('l10n_it_send_state', '=', 'delivered')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
related_invoice.l10n_it_send_state = 'delivered_expired'
info = self._return_multi_line_xml(tree, [
'//Descrizione',
'//IdentificativoSdI',
'//Note'])
related_invoice.message_post(
body=(_("Expiration of the maximum term for communication of acceptance/refusal:\
%s<br/>%s") % (filename, info))
)
def _return_multi_line_xml(self, tree, element_tags):
output_str = "<ul>"
for element_tag in element_tags:
elements = tree.xpath(element_tag)
if not elements:
continue
for element in elements:
if element.text:
text = " ".join(element.text.split())
output_str += "<li>%s: %s</li>" % (element.tag, text)
return output_str + "</ul>"
def _return_error_xml(self, tree):
output_str = "<ul>"
elements = tree.xpath('//Errore')
if not elements:
return
for element in elements:
descrizione = " ".join(element[1].text.split())
if descrizione:
output_str += "<li>Errore %s: %s</li>" % (element[0].text, descrizione)
return output_str + "</ul>"
class IrMailServer(models.Model):
_name = "ir.mail_server"
_inherit = "ir.mail_server"
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None,
body_alternative=None, subtype_alternative='plain'):
if self.env.context.get('wo_bounce_return_path') and headers:
headers['Return-Path'] = email_from
return super(IrMailServer, self).build_email(email_from, email_to, subject, body, email_cc=email_cc, email_bcc=email_bcc, reply_to=reply_to,
attachments=attachments, message_id=message_id, references=references, object_id=object_id, subtype=subtype, headers=headers,
body_alternative=body_alternative, subtype_alternative=subtype_alternative)
|
agpl-3.0
| -5,329,390,525,778,321,000
| 47.420213
| 159
| 0.543502
| false
| 4.215328
| false
| false
| false
|
genome/flow-workflow
|
flow_workflow/parallel_id.py
|
1
|
1987
|
from collections import OrderedDict
import json
import logging
LOG = logging.getLogger(__name__)
class ParallelIdentifier(object):
def __init__(self, parallel_id=[]):
self._entries = OrderedDict([(int(op_id), int(par_idx))
for op_id, par_idx in parallel_id])
@property
def index(self):
if self._entries:
return self._entries.values()[-1]
def refers_to(self, operation):
return int(operation.operation_id) in self._entries
@property
def _parent_entries(self):
parent_entries = OrderedDict(self._entries)
parent_entries.popitem()
return parent_entries
@property
def parent_identifier(self):
return ParallelIdentifier(self._parent_entries.iteritems())
def _child_entries(self, operation_id, parallel_idx):
if int(operation_id) in self._entries:
raise ValueError('operation_id already in ParallelIdentifier '
'op_id (%r) in %r' % (operation_id, self._entries))
child_entries = OrderedDict(self._entries)
child_entries[int(operation_id)] = int(parallel_idx)
return child_entries
def child_identifier(self, operation_id, parallel_idx):
return ParallelIdentifier(self._child_entries(
operation_id, parallel_idx).iteritems())
@property
def stack_iterator(self):
current_id = self
while len(current_id):
yield current_id
current_id = current_id.parent_identifier
yield current_id
def __iter__(self):
return self._entries.iteritems()
def __len__(self):
return len(self._entries)
def __repr__(self):
return 'ParallelIdentifier(%r)' % list(self)
def __cmp__(self, other):
return cmp(self._entries, other._entries)
def serialize(self):
return json.dumps(list(self))
@classmethod
def deserialize(cls, data='[]'):
return cls(json.loads(data))
|
agpl-3.0
| -4,437,954,520,125,725,700
| 26.985915
| 74
| 0.618017
| false
| 4.183158
| false
| false
| false
|
jaredhoney/pyrad
|
pyrad/client.py
|
1
|
6822
|
# client.py
#
# Copyright 2002-2007 Wichert Akkerman <wichert@wiggy.net>
__docformat__ = "epytext en"
import select
import socket
import time
import six
from pyrad import host
from pyrad import packet
class Timeout(Exception):
"""Simple exception class which is raised when a timeout occurs
while waiting for a RADIUS server to respond."""
class Client(host.Host):
"""Basic RADIUS client.
This class implements a basic RADIUS client. It can send requests
to a RADIUS server, taking care of timeouts and retries, and
validate its replies.
:ivar retries: number of times to retry sending a RADIUS request
:type retries: integer
:ivar timeout: number of seconds to wait for an answer
:type timeout: integer
"""
def __init__(self, server, authport=1812, acctport=1813,
coaport=3799, discport=1700, secret=six.b(''), dict=None):
"""Constructor.
:param server: hostname or IP address of RADIUS server
:type server: string
:param authport: port to use for authentication packets
:type authport: integer
:param acctport: port to use for accounting packets
:type acctport: integer
:param coaport: port to use for CoA packets
:type coaport: integer
:param discport: port to use for CoA packets
:type discport: integer
:param secret: RADIUS secret
:type secret: string
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary
"""
host.Host.__init__(self, authport, acctport, coaport, discport, dict)
self.server = server
self.secret = secret
self._socket = None
self.retries = 3
self.timeout = 5
def bind(self, addr):
"""Bind socket to an address.
Binding the socket used for communicating to an address can be
usefull when working on a machine with multiple addresses.
:param addr: network address (hostname or IP) and port to bind to
:type addr: host,port tuple
"""
self._CloseSocket()
self._SocketOpen()
self._socket.bind(addr)
def _SocketOpen(self):
if not self._socket:
self._socket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
def _CloseSocket(self):
if self._socket:
self._socket.close()
self._socket = None
def CreateAuthPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateAuthPacket(self, secret=self.secret, **args)
def CreateAcctPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateAcctPacket(self, secret=self.secret, **args)
def CreateCoAPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateCoAPacket(self, secret=self.secret, **args)
def CreateDiscPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateDiscPacket(self, secret=self.secret, **args)
def _SendPacket(self, pkt, port):
"""Send a packet to a RADIUS server.
:param pkt: the packet to send
:type pkt: pyrad.packet.Packet
:param port: UDP port to send packet to
:type port: integer
:return: the reply packet received
:rtype: pyrad.packet.Packet
:raise Timeout: RADIUS server does not reply
"""
self._SocketOpen()
for attempt in range(self.retries):
if attempt and pkt.code == packet.AccountingRequest:
if "Acct-Delay-Time" in pkt:
pkt["Acct-Delay-Time"] = \
pkt["Acct-Delay-Time"][0] + self.timeout
else:
pkt["Acct-Delay-Time"] = self.timeout
self._socket.sendto(pkt.RequestPacket(), (self.server, port))
now = time.time()
waitto = now + self.timeout
while now < waitto:
ready = select.select([self._socket], [], [],
(waitto - now))
if ready[0]:
rawreply = self._socket.recv(4096)
else:
now = time.time()
continue
try:
reply = pkt.CreateReply(packet=rawreply)
if pkt.VerifyReply(reply, rawreply):
return reply
except packet.PacketError:
pass
now = time.time()
raise Timeout
def SendPacket(self, pkt):
"""Send a packet to a RADIUS server.
:param pkt: the packet to send
:type pkt: pyrad.packet.Packet
:return: the reply packet received
:rtype: pyrad.packet.Packet
:raise Timeout: RADIUS server does not reply
"""
if isinstance(pkt, packet.AuthPacket):
return self._SendPacket(pkt, self.authport)
elif isinstance(pkt, packet.CoAPacket):
return self._SendPacket(pkt, self.coaport)
elif isinstance(pkt, packet.DiscPacket):
return self._SendPacket(pkt, self.discport)
else:
return self._SendPacket(pkt, self.acctport)
|
bsd-3-clause
| 2,550,055,677,371,712,000
| 34.34715
| 77
| 0.595133
| false
| 4.320456
| false
| false
| false
|
Swappsco/koalixerp
|
crm_core/admin.py
|
1
|
6050
|
import reversion
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from crm_core.models import (UserExtension, Customer, Invoice, PurchaseOrder,
Quote, Supplier, HTMLFile, TemplateSet,
CustomerBillingCycle, CustomerGroup, Contract,
Unit, TaxRate, UnitTransform, CompanyContactData)
# Define an inline admin descriptor
# which acts a bit like a singleton
class CRMUserProfileInline(admin.TabularInline):
model = UserExtension
can_delete = False
extra = 1
max_num = 1
verbose_name_plural = _('User Profile Extensions')
# Define a new User admin
class NewUserAdmin(UserAdmin):
inlines = (CRMUserProfileInline,)
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
class CustomerBillingCycleAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (u'id', 'name', 'days_to_payment')
search_fields = ('name',)
admin.site.register(CustomerBillingCycle, CustomerBillingCycleAdmin)
class CustomerGroupAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (u'id', 'name')
search_fields = ('name',)
admin.site.register(CustomerGroup, CustomerGroupAdmin)
class CustomerAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'prefix',
'name',
'firstname',
'default_currency',
'billingcycle',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
list_filter = (
'billingcycle',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
raw_id_fields = ('ismemberof',)
search_fields = ('name',)
exclude = ('lastmodifiedby',)
admin.site.register(Customer, CustomerAdmin)
class SupplierAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'prefix',
'name',
'default_currency',
'direct_shipment_to_customers',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
list_filter = (
'direct_shipment_to_customers',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
search_fields = ('name',)
admin.site.register(Supplier, SupplierAdmin)
class ContractAdmin(admin.ModelAdmin):
list_display = (
u'id',
'state',
'default_customer',
'default_supplier',
'description',
'default_currency',
'staff',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
list_filter = (
'default_customer',
'default_supplier',
'staff',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
admin.site.register(Contract, ContractAdmin)
class PurchaseOrderAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'contract',
'customer',
'validuntil',
'discount',
'staff',
'lastmodifiedby',
)
list_filter = (
'validuntil',
'contract',
'customer',
'staff',
'lastmodifiedby',
)
admin.site.register(PurchaseOrder, PurchaseOrderAdmin)
class QuoteAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'contract',
'customer',
'validuntil',
'discount',
'staff',
'lastmodifiedby',
)
list_filter = (
'validuntil',
'contract',
'customer',
'staff',
'lastmodifiedby',
)
admin.site.register(Quote, QuoteAdmin)
class InvoiceAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'contract',
'customer',
'payableuntil',
'discount',
'staff',
'lastmodifiedby',
)
list_filter = (
'payableuntil',
'contract',
'customer',
'staff',
'lastmodifiedby',
)
admin.site.register(Invoice, InvoiceAdmin)
class UnitAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'shortname',
'description',
'fractionof',
'factor',
)
list_filter = ('fractionof',)
admin.site.register(Unit, UnitAdmin)
class TaxRateAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'name',
'taxrate_in_percent',
)
search_fields = ('name',)
admin.site.register(TaxRate, TaxRateAdmin)
class UnitTransformAdmin(admin.ModelAdmin):
list_display = (u'id', 'from_unit', 'to_unit', 'product', 'factor')
list_filter = ('from_unit', 'to_unit', 'product')
admin.site.register(UnitTransform, UnitTransformAdmin)
class HTMLFileAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (u'id', 'title', 'file')
admin.site.register(HTMLFile, HTMLFileAdmin)
class TemplateSetAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'invoice_html_file',
'quote_html_file',
'purchaseorder_html_file',
)
list_filter = (
'invoice_html_file',
'quote_html_file',
'purchaseorder_html_file',
)
admin.site.register(TemplateSet, TemplateSetAdmin)
class CompanyContactDataAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
'name',
)
admin.site.register(CompanyContactData, CompanyContactDataAdmin)
|
bsd-3-clause
| -2,307,273,416,514,383,000
| 22.003802
| 78
| 0.614711
| false
| 3.736875
| false
| false
| false
|
nearlg/greenPi
|
relays/relays/log.py
|
1
|
2044
|
#!/usr/bin/env python
import time
import json
from datetime import datetime
class Log:
fileName = "/home/pi/.greenPi/relays/log.json"
#fileName = os.environ['HOME'] + "/.greenPi/relays/log.json"
@staticmethod
def writeLog(key, cycleName, numRelays, mode, seconds=None):
log = Log.getLog()
strNumRelays = '%s' % ' '.join(map(str, numRelays))
state = "on" if mode else "off"
dicc = {"date": time.strftime('%b %d %Y %H:%M:%S'),
"key": key, "cycleName": cycleName, "numRelays": strNumRelays,
"mode": state}
if seconds is not None and seconds > 0:
dicc["lapsedSeconds"] = seconds
log.append(dicc)
with open(Log.fileName, 'w') as outfile:
json.dump(log, outfile)
@staticmethod
def getLog():
try:
with open(Log.fileName, "r") as data_file:
return json.load(data_file)
except:
Log.resetLog()
return []
@staticmethod
def resetLog():
f = open(Log.fileName, 'w')
f.write("[]")
f.close()
@staticmethod
def getLastLog():
log = Log.getLog()
lenLog = len(log)
if lenLog > 0:
return log[lenLog - 1]
return []
@staticmethod
def readLastLog():
lastLog = Log.getLastLog()
if len(lastLog) > 3:
date = datetime.strptime(lastLog["date"], '%b %d %Y %H:%M:%S')
seconds = (datetime.now() - date).total_seconds()
seconds = int(round(seconds))
finalLog = {"lapsedSeconds": seconds}
for item in lastLog:
if item == "date":
continue
elif item == "lapsedSeconds":
finalLog[item] += lastLog[item]
elif item == "mode":
finalLog[item] = True if lastLog[item] == "on" else False
else:
finalLog[item] = lastLog[item]
return finalLog
return {}
|
gpl-3.0
| 5,454,230,350,684,585,000
| 29.507463
| 77
| 0.51272
| false
| 3.849341
| false
| false
| false
|
AntSharesSDK/antshares-python
|
sdk/AntShares/Network/RemoteNode.py
|
1
|
2537
|
# -*- coding:utf-8 -*-
"""
Description:
Remote Node, use to broadcast tx
Usage:
from AntShares.Network.RemoteNode import RemoteNode
"""
#from AntShares.Network.RPC.RpcClient import RpcClient
from RPC.RpcClient import RpcClient
class RemoteNode(object):
"""docstring for RemoteNode"""
def __init__(self, url="http://localhost:20332/"):
super(RemoteNode, self).__init__()
self.rpc = RpcClient(url)
def sendRawTransaction(self, tx):
"""
Send Transaction
"""
return self.rpc.call(method="sendrawtransaction",
params=[tx])
def getBestBlockhash(self):
"""
Get Best BlockHash from chain
"""
return self.rpc.call(method="getbestblockhash",
params=[]).get("result", "")
def getBlock(self, hint, verbose=1):
"""
Get Block from chain with hash or index
hint : blockhash or index
Verbose: 0-Simple, 1-Verbose
"""
if verbose not in (0, 1):
raise ValueError, 'verbose, should be 0 or 1.'
return self.rpc.call(method="getblock",params=[hint, verbose])
def getBlockCount(self):
"""
Get Block Count from chain
"""
return self.rpc.call(method="getblockcount",
params=[]).get('result', 0)
def getBlockHash(self, index):
"""
Get BlockHash from chain by index
"""
return self.rpc.call(method="getblockhash",
params=[index]).get('result', '')
def getConnectionCount(self):
"""
Get Connection Count from chain
"""
return self.rpc.call(method="getconnectioncount",
params=[]).get('result', 0)
def getRawMemPool(self):
"""
Get Uncomfirmed tx in Memory Pool
"""
return self.rpc.call(method="getrawmempool",
params=[])
def getRawTransaction(self, txid, verbose=0):
"""
Get comfirmed tx from chain
Verbose: 0-Simple, 1-Verbose
"""
if verbose not in (0, 1):
raise ValueError, 'verbose, should be 0 or 1.'
return self.rpc.call(method="getrawtransaction",
params=[txid, verbose])
def getTxOut(self, txid, n=0):
"""
Get Tx Output from chain
"""
return self.rpc.call(method="gettxout",
params=[txid, n])
|
apache-2.0
| 1,516,765,743,379,418,400
| 27.829545
| 70
| 0.53449
| false
| 4.389273
| false
| false
| false
|
freundTech/deepl-cli
|
test/translator.py
|
1
|
1489
|
import unittest
import requests
import deepl
paragraph_text = """This is a text with multiple paragraphs. This is still the first one.
This is the second one.
This is the third paragraph."""
paragraph_list = [
'This is a text with multiple paragraphs. This is still the first one.',
'This is the second one.',
'This is the third paragraph.'
]
sentence_list = [
'This is a text with multiple paragraphs.',
'This is still the first one.',
'This is the second one.',
'This is the third paragraph.'
]
class TestOfflineMethods(unittest.TestCase):
def test_split_paragraphs(self):
self.assertListEqual(deepl.translator._split_paragraphs(paragraph_text), paragraph_list)
@unittest.skip("Not yet implemented")
def test_insert_translation(self):
pass
class TestOnlineMethods(unittest.TestCase):
def setUp(self):
try:
requests.get("https://www.deepl.com/jsonrpc")
except ConnectionError:
self.skipTest("Can't contact deepl API. Skipping online tests")
def test_split_sentences(self):
self.assertListEqual(deepl.translator._request_split_sentences(paragraph_list, "EN", ["EN"]),
sentence_list)
def test_translate(self):
self.assertListEqual(
deepl.translator._request_translate(["This is a test"], "EN", "DE", ["EN", "DE"])["translations"],
["Das ist ein Test"])
if __name__ == '__main__':
unittest.main()
|
mit
| -7,055,180,190,798,253,000
| 28.196078
| 110
| 0.650772
| false
| 4.057221
| true
| false
| false
|
nelsonmonteiro/django-sage-api
|
sage_api/models.py
|
1
|
9916
|
from __future__ import unicode_literals
try:
from urllib import urlencode, quote
except ImportError:
from urllib.parse import urlencode, quote
import json
import pytz
import datetime
import base64
import requests
import hashlib
import hmac
import urlparse
from collections import OrderedDict
from uuid import uuid4
from django.db import models
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.core.exceptions import PermissionDenied
from .settings import SageSettings
sage_settings = SageSettings()
@python_2_unicode_compatible
class Sage(models.Model):
"""
Model to connect and save tokens from SAGE API related with a specific user.
"""
class Meta:
verbose_name = 'Sage account'
verbose_name_plural = 'Sage accounts'
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='sage')
access_token_key = models.CharField(max_length=2048, blank=True, null=True)
access_token_type = models.CharField(max_length=20)
access_token_expires_on = models.DateTimeField(null=True, blank=True)
refresh_token = models.CharField(max_length=200, blank=True, null=True)
refresh_token_expires_on = models.DateTimeField(null=True, blank=True)
def __str__(self):
return '%s' % self.user
@classmethod
def get_authorization_url(cls, user):
"""
Return the link to use for oAuth authentication.
"""
state_code, created = AuthStateCode.objects.get_or_create(user=user, defaults={'code': uuid4()})
params = {
'client_id': sage_settings.CLIENT_ID,
'response_type': 'code',
'state': state_code.code,
'redirect_uri': sage_settings.AUTH_REDIRECT_URL,
'scope': sage_settings.SCOPE,
}
return '%s?%s' % (sage_settings.AUTH_URL, urlencode(params))
@classmethod
def create_for_user(cls, user, auth_code, state_code):
"""
Create a Sage model for an user and generates the first access token.
Verify if the state code is valid to protect from attacks.
"""
try:
state_code = AuthStateCode.objects.get(user=user, code=state_code)
state_code.delete()
sage_auth, created = cls.objects.get_or_create(user=user)
sage_auth.__get_access_token(auth_code)
except AuthStateCode.DoesNotExist:
raise PermissionDenied('State code is invalid for this user')
def __set_access_token(self, response):
"""
Saves access_token json response fields on database to use it later.
"""
if not ('error' in response):
now = datetime.datetime.now(tz=pytz.utc)
self.access_token_key = response['access_token']
self.access_token_type = response['token_type']
self.access_token_expires_on = now + datetime.timedelta(seconds=response['expires_in'])
self.refresh_token = response['refresh_token']
self.refresh_token_expires_on = now + datetime.timedelta(seconds=response['refresh_token_expires_in'])
self.save()
def __get_access_token(self, code):
"""
Make an API call to get the access_token from the authorization_code.
"""
params = urlencode({
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': sage_settings.AUTH_REDIRECT_URL,
})
authorization = base64.b64encode('%s:%s' % (sage_settings.CLIENT_ID, sage_settings.SECRET_KEY))
request = requests.post(sage_settings.ACCESS_TOKEN_URL, params, headers={
'Authorization': 'Basic %s' % authorization,
'ContentType': 'application/x-www-form-urlencoded;charset=UTF-8',
})
self.__set_access_token(request.json())
def __refresh_access_token(self):
"""
Make an API call to renew the access_token.
"""
params = urlencode({
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
})
authorization = base64.b64encode('%s:%s' % (sage_settings.CLIENT_ID, sage_settings.SECRET_KEY))
request = requests.post(sage_settings.ACCESS_TOKEN_URL, params, headers={
'Authorization': 'Basic %s' % authorization,
'ContentType': 'application/x-www-form-urlencoded;charset=UTF-8',
})
self.__set_access_token(request.json())
@property
def access_token(self):
"""
Return a valid access_token.
"""
now = datetime.datetime.now(tz=pytz.utc)
if self.access_token_expires_on < now:
if self.refresh_token_expires_on > now:
self.__refresh_access_token()
else:
return None
return self.access_token_key
def __get_signature(self, url, params, data, method, nonce):
"""
Return the signature to put in the API request's headers.
"""
if method in ['POST', 'PUT']:
params['body'] = base64.b64encode(json.dumps(data))
ordered_params = OrderedDict(sorted(params.items()))
encoded_params = quote(urlencode(ordered_params), safe='')
raw_string = '%s&%s&%s&%s' % (method, quote(url.lower(), safe=''), encoded_params, nonce)
signing_key = '%s&%s' % (quote(sage_settings.SIGNING_KEY, safe=''), quote(self.access_token, safe=''))
signature = hmac.new(signing_key, raw_string, hashlib.sha1).digest().encode('base64').rstrip('\n')
return signature
def __get_headers(self, url, params, data, method, site_id=None, company_id=None):
"""
Return the API request's headers already with signature.
"""
nonce = str(uuid4().hex)
return {
'Authorization': '%s %s' % (self.access_token_type.capitalize(), self.access_token),
'ocp-apim-subscription-key': sage_settings.SUBSCRIPTION_KEY,
'X-Site': site_id or '',
'X-Company': company_id or '',
'X-Signature': self.__get_signature(url, params, data, method, nonce),
'X-Nonce': nonce,
'Accept': 'application/json',
'Content-Type': 'application/json',
}
@staticmethod
def __get_absolute_url(relative_url):
"""
Return the absolute url for a API call.
"""
return urlparse.urljoin(sage_settings.API_URL, relative_url)
@staticmethod
def __clean_response(response):
if response.status_code != 200:
error_msg = """
STATUS_CODE:
%(status_code)s
URL:
%(url)s
REQUEST HEADERS:
%(request_headers)s
REQUEST BODY:
%(request_body)s
RESPONSE HEADERS:
%(response_headers)s
RESPONSE BODY:
%(response_body)s
""" % {
'status_code': response.status_code,
'url': response.request.url,
'request_headers': response.request.headers,
'request_body': response.request.body,
'response_headers': response.headers,
'response_body': response.content,
}
raise Exception(error_msg)
return response.json()
def api_get(self, relative_url, params=None, site_id=None, company_id=None):
"""
Make an API GET request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
headers = self.__get_headers(url, params or {}, {}, 'GET', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.get(url, headers=headers)
return self.__clean_response(response)
def api_post(self, relative_url, params=None, data=None, site_id=None, company_id=None):
"""
Make an API POST request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
data = data or {}
headers = self.__get_headers(url, params, data, 'POST', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.post(url, json.dumps(data), headers=headers)
return self.__clean_response(response)
def api_put(self, relative_url, params=None, data=None, site_id=None, company_id=None):
"""
Make an API PUT request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
data = data or {}
headers = self.__get_headers(url, params or {}, data, 'PUT', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.put(url, json.dumps(data), headers=headers)
return self.__clean_response(response)
def api_delete(self, relative_url, params=None, site_id=None, company_id=None):
"""
Make an API DELETE request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
headers = self.__get_headers(url, params, {}, 'DELETE', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.delete(url, headers=headers)
return self.__clean_response(response)
def get_sites(self):
return self.api_get('accounts/v1/sites')
@python_2_unicode_compatible
class AuthStateCode(models.Model):
"""
Model to save a random code for an user to prevent external attacks.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='sage_state_code')
code = models.CharField(max_length=50)
def __str__(self):
return '%s' % self.user
|
mit
| 354,929,042,206,438,900
| 35.725926
| 114
| 0.590056
| false
| 3.983929
| false
| false
| false
|
scoin/redis-py-datamapper
|
redislist.py
|
1
|
1913
|
class RedisList:
import redis
r = redis.StrictRedis(host='localhost', port=6379, db=0)
def __init__(self, key):
self.key = key
def append(self, *values):
self.r.rpush(self.key, *values)
def unshift(self, *values):
self.r.lpush(self.key, *values)
def insert(self, pivot, value):
self.r.linsert(self.key, 'before', pivot, value)
def pop(self):
return self.r.rpop(self.key).decode()
def shift(self):
return self.r.lpop(self.key).decode()
def sort(self):
return [w.decode() for w in self.r.sort(self.key, alpha = True)]
def clear(self):
self.r.delete(self.key)
def __len__(self):
return self.r.llen(self.key)
def __getitem__(self, index):
if(type(index) == int):
if(index >= len(self)): raise IndexError('Out of Range')
return self.r.lindex(self.key, index).decode()
elif(type(index) == slice):
return [w.decode() for w in self.r.lrange(self.key, index.start or 0, (index.stop or len(self))-1)]
def __setitem__(self, index, value):
if(type(index) == int):
if(index >= len(self)): raise IndexError('Out of Range')
self.r.lset(self.key, index, value)
elif(type(index) == slice):
if(type(value) != tuple and type(value) != list): raise TypeError('Assignment must be iterable')
stop, start = index.stop or len(self)-1, index.start or 0
if (stop - start) != len(value): raise TypeError("Incorrect number of arguments")
pipe = self.r.pipeline()
for vindex, rindex in enumerate(range(index.start or 0, index.stop or len(self) - 1)):
pipe.lset(self.key, rindex, value[vindex])
pipe.execute()
def __repr__(self):
return "RedisList(" + str([w.decode() for w in self.r.lrange(self.key, 0, -1)]) + ")"
|
mit
| 1,374,646,387,132,393,700
| 35.788462
| 111
| 0.573968
| false
| 3.403915
| false
| false
| false
|
chippey/gaffer
|
python/GafferSceneUI/OutputsUI.py
|
1
|
8592
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import re
import IECore
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferScene.Outputs,
"description",
"""
Defines the image outputs to be created by the renderer. Arbitrary
outputs can be defined within the UI and also via the
`Outputs::addOutput()` API. Commonly used outputs may also
be predefined at startup via a config file - see
$GAFFER_ROOT/startup/gui/outputs.py for an example.
""",
plugs = {
"outputs" : [
"description",
"""
The outputs defined by this node.
""",
"plugValueWidget:type", "GafferSceneUI.OutputsUI.OutputsPlugValueWidget",
],
"outputs.*.parameters.quantize.value" : [
"description",
"""
The bit depth of the image.
""",
"preset:8 bit", IECore.IntVectorData( [ 0, 255, 0, 255 ] ),
"preset:16 bit", IECore.IntVectorData( [ 0, 65535, 0, 65535 ] ),
"preset:Float", IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"outputs.*.fileName" : [
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"pathPlugValueWidget:bookmarks", "image",
"pathPlugValueWidget:leaf", True,
],
"outputs.*.active" : [
"boolPlugValueWidget:displayMode", "switch",
],
}
)
##########################################################################
# Custom PlugValueWidgets for listing outputs
##########################################################################
class OutputsPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
column = GafferUI.ListContainer( spacing = 6 )
GafferUI.PlugValueWidget.__init__( self, column, plug )
with column :
# this will take care of laying out our list of outputs, as
# each output is represented as a child plug of the main plug.
GafferUI.PlugLayout( plug )
# now we just need a little footer with a button for adding new outputs
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
GafferUI.MenuButton(
image="plus.png", hasFrame=False, menu = GafferUI.Menu( Gaffer.WeakMethod( self.__addMenuDefinition ) )
)
GafferUI.Spacer( IECore.V2i( 1 ), maximumSize = IECore.V2i( 100000, 1 ), parenting = { "expand" : True } )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
pass
def __addMenuDefinition( self ) :
node = self.getPlug().node()
currentNames = set( [ output["name"].getValue() for output in node["outputs"].children() ] )
m = IECore.MenuDefinition()
registeredOutputs = node.registeredOutputs()
for name in registeredOutputs :
menuPath = name
if not menuPath.startswith( "/" ) :
menuPath = "/" + menuPath
m.append(
menuPath,
{
"command" : IECore.curry( node.addOutput, name ),
"active" : name not in currentNames
}
)
if len( registeredOutputs ) :
m.append( "/BlankDivider", { "divider" : True } )
m.append( "/Blank", { "command" : IECore.curry( node.addOutput, "", IECore.Display( "", "", "" ) ) } )
return m
# A widget for representing an individual output.
class _ChildPlugWidget( GafferUI.PlugValueWidget ) :
def __init__( self, childPlug ) :
column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=4 )
GafferUI.PlugValueWidget.__init__( self, column, childPlug )
with column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=4 ) as header :
collapseButton = GafferUI.Button( image = "collapsibleArrowRight.png", hasFrame=False )
collapseButton.__clickedConnection = collapseButton.clickedSignal().connect( Gaffer.WeakMethod( self.__collapseButtonClicked ) )
GafferUI.PlugValueWidget.create( childPlug["active"] )
self.__label = GafferUI.Label( self.__namePlug().getValue() )
GafferUI.Spacer( IECore.V2i( 1 ), maximumSize = IECore.V2i( 100000, 1 ), parenting = { "expand" : True } )
self.__deleteButton = GafferUI.Button( image = "delete.png", hasFrame=False )
self.__deleteButton.__clickedConnection = self.__deleteButton.clickedSignal().connect( Gaffer.WeakMethod( self.__deleteButtonClicked ) )
self.__deleteButton.setVisible( False )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing= 4 ) as self.__detailsColumn :
GafferUI.PlugWidget( self.__namePlug() )
GafferUI.PlugWidget( self.__fileNamePlug() )
GafferUI.PlugWidget( childPlug["type"] )
GafferUI.PlugWidget( childPlug["data"] )
GafferUI.CompoundDataPlugValueWidget( childPlug["parameters"] )
GafferUI.Divider( GafferUI.Divider.Orientation.Horizontal )
self.__detailsColumn.setVisible( False )
self.__enterConnection = header.enterSignal().connect( Gaffer.WeakMethod( self.__enter ) )
self.__leaveConnection = header.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ) )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
with self.getContext() :
enabled = self.getPlug()["active"].getValue()
self.__label.setEnabled( enabled )
self.__detailsColumn.setEnabled( enabled )
self.__label.setText( self.__namePlug().getValue() )
def __namePlug( self ) :
plug = self.getPlug()
# backwards compatibility with old plug layout
return plug.getChild( "label" ) or plug.getChild( "name" )
def __fileNamePlug( self ) :
plug = self.getPlug()
# backwards compatibility with old plug layout
return plug.getChild( "fileName" ) or plug.getChild( "name" )
def __enter( self, widget ) :
self.__deleteButton.setVisible( True )
def __leave( self, widget ) :
self.__deleteButton.setVisible( False )
def __collapseButtonClicked( self, button ) :
visible = not self.__detailsColumn.getVisible()
self.__detailsColumn.setVisible( visible )
button.setImage( "collapsibleArrowDown.png" if visible else "collapsibleArrowRight.png" )
def __deleteButtonClicked( self, button ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().parent().removeChild( self.getPlug() )
## \todo This regex is an interesting case to be considered during the string matching unification for #707. Once that
# is done, intuitively we want to use an "outputs.*" glob expression, but because the "*" will match anything
# at all, including ".", it will match the children of what we want too. We might want to prevent wildcards from
# matching "." when we come to use them in this context.
GafferUI.PlugValueWidget.registerCreator( GafferScene.Outputs, re.compile( "outputs\.[^\.]+$" ), _ChildPlugWidget )
|
bsd-3-clause
| -2,263,930,681,211,126,500
| 31.91954
| 140
| 0.670507
| false
| 3.818667
| false
| false
| false
|
chewse/djangorestframework-signed-permissions
|
signedpermissions/permissions.py
|
1
|
2401
|
# -*- coding: utf-8 -*-
from django.core import signing
from rest_framework import permissions
from .signing import unsign_filters_and_actions
class SignedPermission(permissions.BasePermission):
"""
Allow access to a particular set of filters if the sign is valid.
This permission allows access to sets of items based on json encoded
filters. It takes these filters and applies to them to the proper queryset
use **kwargs expansion, or in the case of a create (POST), it checks the
POST data.
"""
def has_permission(self, request, view):
"""Check list and create permissions based on sign and filters."""
if view.suffix == 'Instance':
return True
filter_and_actions = self._get_filter_and_actions(
request.query_params.get('sign'),
view.action,
'{}.{}'.format(
view.queryset.model._meta.app_label,
view.queryset.model._meta.model_name
)
)
if not filter_and_actions:
return False
if request.method == 'POST':
for key, value in request.data.iteritems():
# Do unicode conversion because value will always be a
# string
if (key in filter_and_actions['filters'] and not
unicode(filter_and_actions['filters'][key]) == unicode(value)):
return False
return True
def has_object_permission(self, request, view, obj=None):
"""Check object permissions based on filters."""
filter_and_actions = self._get_filter_and_actions(
request.query_params.get('sign'),
view.action,
'{}.{}'.format(obj._meta.app_label, obj._meta.model_name))
if not filter_and_actions:
return False
qs = view.queryset.filter(**filter_and_actions['filters'])
return qs.filter(id=obj.id).exists()
@staticmethod
def _get_filter_and_actions(sign, action, dotted_model_name):
try:
filters_and_actions = unsign_filters_and_actions(
sign,
dotted_model_name
)
except signing.BadSignature:
return {}
for filtered_action in filters_and_actions:
if action in filtered_action['actions']:
return filtered_action
return {}
|
mit
| -497,435,760,355,794,200
| 35.378788
| 87
| 0.588088
| false
| 4.530189
| false
| false
| false
|
zmughal/xerox-parc-uplib-mirror
|
win32/stopStartUpLibServices.py
|
1
|
3546
|
#
# This file is part of the "UpLib 1.7.11" release.
# Copyright (C) 2003-2011 Palo Alto Research Center, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys, os, string, traceback, time
import win32serviceutil
import win32service
import win32event
# Stop all uplib services
def stopUplibServices():
try:
manH = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
sList = win32service.EnumServicesStatus(manH,win32service.SERVICE_WIN32,win32service.SERVICE_ACTIVE)
for svc in sList:
name = svc[0]
if (name.lower().startswith("uplib")):
serveH = win32service.OpenService(manH,name,win32service.SERVICE_ALL_ACCESS)
if (win32service.QueryServiceStatus(serveH)[1] == win32service.SERVICE_RUNNING):
win32service.ControlService(serveH, win32service.SERVICE_CONTROL_STOP)
while (win32service.QueryServiceStatus(serveH)[1] != win32service.SERVICE_STOPPED):
time.sleep(5)
win32service.CloseServiceHandle(serveH)
win32service.CloseServiceHandle(manH)
except:
t, v, b = sys.exc_info()
sys.stderr.write("Problem Stopping UpLib Services. %s."% string.join(traceback.format_exception(t, v, b)))
# Start all uplib services
def startUplibServices():
try:
manH = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
sList = win32service.EnumServicesStatus(manH,win32service.SERVICE_WIN32,win32service.SERVICE_INACTIVE)
for svc in sList:
name = svc[0]
if (name.lower().startswith("uplib")):
serveH = win32service.OpenService(manH,name,win32service.SERVICE_ALL_ACCESS)
if (win32service.QueryServiceStatus(serveH)[1] == win32service.SERVICE_STOPPED and win32service.QueryServiceConfig(serveH)[1] == win32service.SERVICE_AUTO_START):
win32service.StartService(serveH, None)
win32service.CloseServiceHandle(serveH)
win32service.CloseServiceHandle(manH)
except:
t, v, b = sys.exc_info()
sys.stderr.write("Problem Starting UpLib Services. %s."% string.join(traceback.format_exception(t, v, b)))
if __name__ == "__main__":
usage = False
if (len(sys.argv) == 2):
if (sys.argv[1].lower() == "stop"):
stopUplibServices()
elif (sys.argv[1].lower() == "start"):
startUplibServices()
else:
usage = True
else:
usage = True
if (usage):
print "Usage: "+sys.argv[0]+" OPTION"
print "Where OPTION includes:"
print "stop - Stop All UpLib services"
print "start - Start All UpLib services"
|
gpl-2.0
| -4,299,353,383,845,718,000
| 39.905882
| 178
| 0.64608
| false
| 3.796574
| false
| false
| false
|
mozman/ezdxf
|
examples/tiled_window_setup.py
|
1
|
2522
|
# Purpose: tiled window model space setup for AutoCAD
# Copyright (c) 2018 Manfred Moitzi
# License: MIT License
import ezdxf
FILENAME = r'C:\Users\manfred\Desktop\Outbox\tiled_windows_R2000.dxf'
# FILENAME = 'tiled_windows_R2000.dxf'
def draw_raster(doc):
marker = doc.blocks.new(name='MARKER')
attribs = {'color': 2}
marker.add_line((-1, 0), (1, 0), dxfattribs=attribs)
marker.add_line((0, -1), (0, 1), dxfattribs=attribs)
marker.add_circle((0, 0), .4, dxfattribs=attribs)
marker.add_attdef('XPOS', (0.5, -1.0), dxfattribs={'height': 0.25, 'color': 4})
marker.add_attdef('YPOS', (0.5, -1.5), dxfattribs={'height': 0.25, 'color': 4})
modelspace = doc.modelspace()
for x in range(10):
for y in range(10):
xcoord = x * 10
ycoord = y * 10
values = {
'XPOS': f"x = {xcoord}",
'YPOS': f"y = {ycoord}",
}
modelspace.add_auto_blockref('MARKER', (xcoord, ycoord), values)
def setup_active_viewport(doc):
# delete '*Active' viewport configuration
doc.viewports.delete_config('*ACTIVE')
# the available display area in AutoCAD has the virtual lower-left corner (0, 0) and the virtual upper-right corner
# (1, 1)
# first viewport, uses the left half of the screen
viewport = doc.viewports.new('*ACTIVE')
viewport.dxf.lower_left = (0, 0)
viewport.dxf.upper_right = (.5, 1)
viewport.dxf.target = (0, 0, 0) # target point defines the origin of the DCS, this is the default value
viewport.dxf.center = (40, 30) # move this location (in DCS) to the center of the viewport
viewport.dxf.height = 15 # height of viewport in drawing units, this parameter works
viewport.dxf.aspect_ratio = 1.0 # aspect ratio of viewport (x/y)
# second viewport, uses the right half of the screen
viewport = doc.viewports.new('*ACTIVE')
viewport.dxf.lower_left = (.5, 0)
viewport.dxf.upper_right = (1, 1)
viewport.dxf.target = (60, 20, 0) # target point defines the origin of the DCS
viewport.dxf.center = (0, 0) # move this location (in DCS, model space = 60, 20) to the center of the viewport
viewport.dxf.height = 15 # height of viewport in drawing units, this parameter works
viewport.dxf.aspect_ratio = 2.0 # aspect ratio of viewport (x/y)
if __name__ == '__main__':
doc = ezdxf.new('R2000')
draw_raster(doc)
setup_active_viewport(doc)
doc.saveas(FILENAME)
print(f"DXF file '{FILENAME}' created.")
|
mit
| -3,377,911,553,956,720,600
| 39.031746
| 119
| 0.635607
| false
| 3.09828
| false
| false
| false
|
godiard/pathagar
|
books/management/commands/addbooks.py
|
1
|
3644
|
# Copyright (C) 2010, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from django.core.management.base import BaseCommand, CommandError
from django.core.files import File
from django.db.utils import IntegrityError
import sys
import os
import csv
import json
from optparse import make_option
from books.models import Book, Status
class Command(BaseCommand):
help = "Adds a book collection (via a CSV file)"
args = 'Absolute path to CSV file'
option_list = BaseCommand.option_list + (
make_option('--json',
action='store_true',
dest='is_json_format',
default=False,
help='The file is in JSON format'),
)
def _handle_csv(self, csvpath):
"""
Store books from a file in CSV format.
WARN: does not handle tags
"""
csvfile = open(csvpath)
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
#TODO: Figure out if this is a valid CSV file
for row in reader:
path = row[0]
title = row[1]
author = row[2]
summary = row[3]
f = open(path)
book = Book(book_file = File(f), a_title = title, a_author = author, a_summary = summary)
book.save()
def _handle_json(self, jsonpath):
"""
Store books from a file in JSON format.
"""
jsonfile = open(jsonpath)
data_list = json.loads(jsonfile.read())
for d in data_list:
# Get a Django File from the given path:
f = open(d['book_path'])
d['book_file'] = File(f)
del d['book_path']
if d.has_key('cover_path'):
f_cover = open(d['cover_path'])
d['cover_img'] = File(f_cover)
del d['cover_path']
if d.has_key('a_status'):
d['a_status'] = Status.objects.get(status = d['a_status'])
tags = d['tags']
del d['tags']
book = Book(**d)
try:
book.save() # must save item to generate Book.id before creating tags
[book.tags.add(tag) for tag in tags]
book.save() # save again after tags are generated
except IntegrityError as e:
if str(e) == "column file_sha256sum is not unique":
print "The book (", d['book_file'], ") was not saved because the file already exsists in the database."
else:
raise CommandError('Error adding file %s: %s' % (d['book_file'], sys.exc_info()[1]))
def handle(self, filepath='', *args, **options):
if not os.path.exists(filepath):
raise CommandError("%r is not a valid path" % filepath)
if options['is_json_format']:
self._handle_json(filepath)
else:
self._handle_csv(filepath)
|
gpl-2.0
| -4,907,735,612,702,819,000
| 31.828829
| 123
| 0.585071
| false
| 4.053393
| false
| false
| false
|
manastech/de-bee
|
index.py
|
1
|
3273
|
from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext.webapp import template
from model import Membership
from util import membershipsOfUser
from util import descriptionOfBalanceInGroup
from util import descriptionOfTotalBalance
from comparators import compareMembershipsByGroupNick
from i18n import getDefaultLanguage
from i18n import getLanguage
from i18n import addMasterKeys
from i18n import _
import os
class IndexHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
lang = getLanguage(self, user)
userMemberships = membershipsOfUser(user)
userMemberships.sort(cmp = compareMembershipsByGroupNick)
hasUserMemberships = len(userMemberships) > 0
if hasUserMemberships:
group = userMemberships[0].group
else:
group = 0
debts = self.getDebts(user, userMemberships, lang)
message = self.request.get('msg')
hasMessage = len(message) > 0
model = {
'username': user.nickname(),
'signout_url': users.create_logout_url("/"),
'debts': debts,
'hasUserMemberships': hasUserMemberships,
'userMemberships': userMemberships,
'group': group,
'hasMessage': hasMessage,
'message': message,
# i18n
'DontBelong': _("You don't belong to any group. You can create your own and invite your friends.", lang),
'Name': _('Name', lang),
'YouOweNobody': _('You owe nobody, and nobody owes you. Hurray!', lang),
'GoToGroup': _('Go to group', lang),
'SelectGroup': _('select group', lang),
'CreateGroup': _('Create Group', lang),
}
addMasterKeys(model, lang)
path = os.path.join(os.path.dirname(__file__), 'dashboard.html')
self.response.out.write(template.render(path, model))
else:
lang = getDefaultLanguage(self)
model = {
'loginurl': users.create_login_url("/"),
# i18n
'introduction': _('introduction', lang),
}
addMasterKeys(model, lang)
path = os.path.join(os.path.dirname(__file__), 'introduction.html')
self.response.out.write(template.render(path, model))
def getDebts(self, user, memberships, lang):
total = 0
items = []
for m in memberships:
if abs(m.balance) <= 1e-07:
continue
link = '/group?group=%s' % m.group.key()
total += m.balance
items.append({
'isOweToSelf' : m.balance > 0.0,
'desc': descriptionOfBalanceInGroup(m, link, lang)
})
return {
'isZero': abs(total) <= 1e-07,
'isOweToSelf' : total > 0.0,
'items' : items,
'desc': descriptionOfTotalBalance(total, lang),
'hasMoreThanOneItem' : len(items) > 1,
}
|
mit
| 6,012,039,414,046,494,000
| 31.418367
| 117
| 0.545066
| false
| 4.164122
| false
| false
| false
|
steinwurf/bongo
|
bongo/settings.py
|
1
|
3513
|
#! /usr/bin/env python
# encoding: utf-8
"""
Django settings for bongo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
from config import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Application definition
INSTALLED_APPS = (
'django.contrib.messages',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'file_server',
'utils',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
ROOT_URLCONF = 'bongo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'bongo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Cache
# https://docs.djangoproject.com/en/1.3/ref/settings/#std:setting-CACHES
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
# 'LOCATION': '/var/tmp/bongo_cache',
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = '/var/www/bongo/static/'
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
'social.backends.github.GithubTeamOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
STATICFILES_DIRS = (
# Twitter Bootstrap stuff
os.path.join(BASE_DIR, "bootstrap/dist"),
os.path.join(BASE_DIR, "bootstrap/assets")
)
SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['read:org']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_URL = '/'
|
bsd-3-clause
| 921,367,275,572,019,300
| 27.330645
| 75
| 0.674637
| false
| 3.502493
| false
| false
| false
|
dedeco/cnddh-denuncias
|
cnddh/models.py
|
1
|
25670
|
# coding=latin-1
from database import db
from sqlalchemy.orm import relationship, backref, with_polymorphic
from sqlalchemy import Sequence, ForeignKey, UniqueConstraint
from cnddh.uploadsets import anexos_upload
import datetime
class Denuncia(db.Model):
__tablename__ = 'denuncias'
id = db.Column(db.Integer, Sequence('denuncias_id_seq'), primary_key=True)
numero = db.Column(db.Integer, unique=True, nullable=False)
dtcriacao = db.Column(db.DateTime, unique=False, nullable=False)
dtdenuncia = db.Column(db.DateTime, unique=False, nullable=False)
status_id = db.Column(db.Integer, ForeignKey('status.id'), nullable=False)
status = relationship("Status")
tipofonte_id = db.Column(db.Integer, ForeignKey('tipofontes.id'),nullable=False)
fonte = db.Column(db.String(240), unique=False, nullable=False)
protocolo = db.Column(db.Integer)
resumo = db.Column(db.String(1024), unique=False, nullable=False)
descricao = db.Column(db.String(8192), unique=False, nullable=False)
observacao = db.Column(db.String(8192), unique=False)
tipolocal = db.Column(db.String(240), unique=False, nullable=False)
endereco = db.Column(db.String(240), unique=False, nullable=False)
num = db.Column(db.String(60), unique=False)
complemento = db.Column(db.String(240), unique=False)
referencia = db.Column(db.String(240), unique=False)
bairro = db.Column(db.String(240), unique=False)
cidade = db.Column(db.String(60), unique=False, nullable=False)
cep = db.Column(db.String(60), unique=False, nullable=False)
estado = db.Column(db.String(2), unique=False, nullable=False)
pais = db.Column(db.String(60), unique=False, nullable=False)
vitimas = relationship("Vitima", backref="denuncia")
suspeitos = relationship("Suspeito", backref="denuncia")
violacoes = relationship("Violacao", backref="denuncia")
anexos = relationship("Anexo", backref="denuncia")
historico = relationship("Historico", backref="denuncia",order_by="asc(Historico.dtcriacao)")
def __init__(self, numero):
self.numero = numero
self.status_id = 1
self.dtcriacao = datetime.datetime.today()
class Vitima(db.Model):
__tablename__ = 'vitimas'
id = db.Column(db.Integer, Sequence('vitimas_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
tipovitima_id = db.Column(db.Integer, ForeignKey('tipovitimas.id'), nullable=False)
qtdevitimas = db.Column(db.Integer, unique=False, nullable=False, default=1)
nomenaoidentificado = db.Column(db.String(1), unique=False)
nome = db.Column(db.String(100), unique=False, nullable=False)
idade = db.Column(db.Integer, unique=False, nullable=False, default=0)
sexo = db.Column(db.String(20), unique=False, nullable=False)
cor = db.Column(db.String(20), unique=False, nullable=False)
violacoes = relationship("Violacao", backref="vitima")
tipovitima = relationship("TipoVitima")
def __init__(self, tipovitima_id):
self.tipovitima_id = tipovitima_id
class TipoVitima(db.Model):
__tablename__ = 'tipovitimas'
id = db.Column(db.Integer, Sequence('tipovitimas_id_seq'), primary_key=True)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, tipo, legenda):
self.tipo = tipo
self.legenda = legenda
class Suspeito(db.Model):
__tablename__ = 'suspeitos'
id = db.Column(db.Integer, Sequence('suspeitos_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
tiposuspeito_id =db.Column(db.Integer, ForeignKey('tiposuspeitos.id'), nullable=False)
qtdesuspeitos = db.Column(db.Integer, unique=False, nullable=False, default=1)
nomeinstituicao = db.Column(db.String(255), unique=False, nullable=False)
nomenaoidentificado = db.Column(db.String(1), unique=False)
nome = db.Column(db.String(255), unique=False, nullable=False)
idade = db.Column(db.Integer, unique=False, nullable=False, default=0)
sexo = db.Column(db.String(20), unique=False, nullable=False)
cor = db.Column(db.String(20), unique=False, nullable=False)
violacoes = relationship("Violacao", backref="suspeito")
tiposuspeito = relationship("TipoSuspeito")
def __init__(self, tiposuspeito_id):
self.tiposuspeito_id = tiposuspeito_id
class TipoSuspeito(db.Model):
__tablename__ = 'tiposuspeitos'
id = db.Column(db.Integer, Sequence('tiposuspeitos_id_seq'), primary_key=True)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
instituicao = db.Column(db.String(255), unique=False, nullable=False)
classificacao = db.Column(db.String(255), unique=False, nullable=False)
def __init__(self, tipo, legenda, instituicao, classificacao):
self.tipo = tipo
self.legenda = legenda
self.instituicao = instituicao
self.classificacao = classificacao
class TipoViolacao(db.Model):
__tablename__ = 'tipoviolacoes'
id = db.Column(db.Integer, Sequence('tipoviolacoes_id_seq'), primary_key=True)
macrocategoria = db.Column(db.String(255), unique=False, nullable=False)
microcategoria = db.Column(db.String(255), unique=False, nullable=False)
violacoes = relationship("Violacao", backref="tipoviolacao")
def __init__(self, macrocategoria, microcategoria):
self.macrocategoria = macrocategoria
self.microcategoria = microcategoria
class Violacao(db.Model):
__tablename__ = 'violacoes'
id = db.Column(db.Integer, Sequence('violacoes_id_seq'), primary_key=True)
tipoviolacoes_id = db.Column(db.Integer, ForeignKey('tipoviolacoes.id'), nullable=False)
tipo = db.Column(db.String(20))
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
vitima_id = db.Column(db.Integer, ForeignKey('vitimas.id'), nullable=False)
suspeito_id = db.Column(db.Integer, ForeignKey('suspeitos.id'), nullable=False)
__table_args__ = (UniqueConstraint('tipoviolacoes_id', 'denuncia_id', 'vitima_id', 'suspeito_id', name='uix_violacao'),)
def __init__(self, denuncia_id, tipoviolacoes_id, suspeito_id, vitima_id):
self.denuncia_id = denuncia_id
self.tipoviolacoes_id = tipoviolacoes_id
self.suspeito_id = suspeito_id
self.vitima_id = vitima_id
__mapper_args__ = {
'polymorphic_on':tipo,
'polymorphic_identity':'violacoes',
'with_polymorphic':'*'
}
class Homicidio(Violacao):
__tablename__ = 'homicidios'
id = db.Column(db.Integer, ForeignKey('violacoes.id'),primary_key=True)
rco = db.Column(db.String(100), unique=False)
bo = db.Column(db.String(100), unique=False)
ip = db.Column(db.String(100), unique=False)
reds = db.Column(db.String(100), unique=False)
dtfato = db.Column(db.DateTime, unique=False)
prfato = db.Column(db.String(20), unique=False)
situacao = db.Column(db.String(20), unique=False)
obs = db.Column(db.String(255), unique=False)
arquivo = db.Column(db.String(255), unique=False)
meiosutilizados = relationship("HomicidioMeioUtilizado",cascade="all,delete")
__mapper_args__ = {'polymorphic_identity':'homicidios'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class TipoMeioUtilizado(db.Model):
__tablename__ = 'tipomeioutilizados'
id = db.Column(db.Integer, Sequence('tipomeioutilizados_id_seq'), primary_key=True)
meio = db.Column(db.String(255), unique=False, nullable=False)
def __init__(self, meio):
self.meio = meio
class HomicidioMeioUtilizado(db.Model):
__tablename__ = 'homicidiomeioutilizado'
homicidio_id = db.Column(db.Integer, ForeignKey('homicidios.id'), primary_key=True)
tipomeioutilizado_id = db.Column(db.Integer, ForeignKey('tipomeioutilizados.id'), primary_key=True)
__table_args__ = (UniqueConstraint('homicidio_id', 'tipomeioutilizado_id', name='uix_meioutilizado'),)
def __init__(self, homicidio_id, tipomeioutilizado_id):
self.homicidio_id = homicidio_id
self.tipomeioutilizado_id = tipomeioutilizado_id
class TipoFonte(db.Model):
__tablename__ = 'tipofontes'
id = db.Column(db.Integer, Sequence('tipofontes_id_seq'), primary_key=True)
tipofonte = db.Column(db.String(255), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
exemplo = db.Column(db.String(255), unique=False)
def __init__(self, tipofonte, legenda, exemplo):
self.tipofonte = tipofonte
self.legenda = legenda
self.exemplo = exemplo
class Status(db.Model):
__tablename__ = 'status'
id = db.Column(db.Integer, Sequence('status_id_seq'), primary_key=True)
status = db.Column(db.String(255), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, status, legenda):
self.status = status
self.legenda = legenda
class Acao(db.Model):
__tablename__ = 'acoes'
id = db.Column(db.Integer, Sequence('acoes_id_seq'), primary_key=True)
acao = db.Column(db.String(255), unique=False, nullable=False,)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, acao, legenda):
self.acao = acao
self.legenda = legenda
class Historico(db.Model):
__tablename__ = 'historico'
id = db.Column(db.Integer, Sequence('historico_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
acao_id = db.Column(db.Integer, ForeignKey('acoes.id'), nullable=False)
dtcriacao = db.Column(db.DateTime, unique=False, nullable=False)
motivo = db.Column(db.String(1024), unique=False)
acao = relationship("Acao")
encaminhamento = relationship("Encaminhamento", backref="historico", lazy='joined',cascade="all,delete")
def __init__(self, denuncia_id):
self.denuncia_id = denuncia_id
self.dtcriacao = datetime.datetime.today()
class TipoEncaminhamento(db.Model):
__tablename__ = 'tipoencaminhamentos'
id = db.Column(db.Integer, Sequence('tipoencaminhamentos_id_seq'), primary_key=True)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, tipo):
self.tipo = tipo
class Orgao(db.Model):
__tablename__ = 'orgaos'
id = db.Column(db.Integer, Sequence('orgaos_id_seq'), primary_key=True)
orgao = db.Column(db.String(200), unique=False, nullable=False)
def __init__(self, orgao):
self.orgao = orgao
class Encaminhamento(db.Model):
__tablename__ = 'encaminhamentos'
id = db.Column(db.Integer, Sequence('encaminhamentos_id_seq'), primary_key=True)
historico_id = db.Column(db.Integer, ForeignKey('historico.id'), nullable=False)
orgao_id = db.Column(db.Integer, ForeignKey('orgaos.id'), nullable=False)
tipo_id = db.Column(db.Integer, ForeignKey('tipoencaminhamentos.id'), nullable=False)
dtenvio = db.Column(db.DateTime, unique=False, nullable=False)
dtlimite = db.Column(db.DateTime, unique=False)
dtretorno = db.Column(db.DateTime, unique=False)
dtcriacao = db.Column(db.DateTime, unique=False, nullable=False)
tipo = db.Column(db.String(20))
orgao = relationship("Orgao")
tipo_encaminhamento = relationship("TipoEncaminhamento")
retorno = relationship("Retorno",cascade="all,delete")
__mapper_args__ = {
'polymorphic_on':tipo,
'polymorphic_identity':'encaminhamentos',
'with_polymorphic':'*'
}
def __init__(self, historico_id):
self.historico_id = historico_id
dtcriacao = datetime.datetime.today()
class Oficio(Encaminhamento):
__tablename__ = 'oficios'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
numero = db.Column(db.String(255), nullable=False)
assunto = db.Column(db.String(255), nullable=False)
obs = db.Column(db.String(255), nullable=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'oficios'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Telefonema(Encaminhamento):
__tablename__ = 'telefonemas'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
numero = db.Column(db.String(255), unique=False, nullable=False)
destinatario = db.Column(db.String(255), nullable=True, unique=False)
obs = db.Column(db.String(255), nullable=True, unique=False)
__mapper_args__ = {'polymorphic_identity':'telefonemas'}
class Reuniao(Encaminhamento):
__tablename__ = 'reunioes'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
pauta = db.Column(db.String(255), nullable=False, unique=False)
participantes = db.Column(db.String(4000), nullable=False, unique=False)
obs = db.Column(db.String(255), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'reunioes'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Email(Encaminhamento):
__tablename__ = 'emails'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
para = db.Column(db.String(255), nullable=False, unique=False)
de = db.Column(db.String(255), nullable=False, unique=False)
assunto = db.Column(db.String(255), nullable=False, unique=False)
texto = db.Column(db.String(4000), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'emails'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Generico(Encaminhamento):
__tablename__ = 'genericos'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
obs = db.Column(db.String(255), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'genericos'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Retorno(db.Model):
__tablename__ = 'retornos'
id = db.Column(db.Integer, Sequence('retornos_id_seq'), primary_key=True)
encaminhamento_id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'), nullable=False)
descricao = db.Column(db.String(255), nullable=False, unique=False)
dtretorno = db.Column(db.Date, unique=False)
dtcriacao = db.Column(db.DateTime, unique=False)
tiporetorno_id = db.Column(db.Integer, ForeignKey('tiporetornos.id'), nullable=False)
tipo = db.Column(db.String(80))
tiporetorno = relationship("TipoRetorno")
arquivo = db.Column(db.String(255), unique=False)
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
__mapper_args__ = {
'polymorphic_on':tipo,
'polymorphic_identity':'retornos',
'with_polymorphic':'*'
}
def __init__(self, encaminhamento_id):
self.encaminhamento_id = encaminhamento_id
dtcriacao = datetime.datetime.today()
class RetornoGenerico(Retorno):
__tablename__ = 'retornogenerico'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
observacao = db.Column(db.String(255), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornogenerico'}
class RetornoPessoasassistidas(Retorno):
__tablename__ = 'retornopessoasassistidas'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
tipoassistencia = db.Column(db.String(255), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornopessoasassistidas'}
class RetornoInquerito(Retorno):
__tablename__ = 'retornoinquerito'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
ip = db.Column(db.String(100), unique=False)
situacao = db.Column(db.String(20), unique=False)
motivo = db.Column(db.String(80), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornoinquerito'}
class RetornoProcesso(Retorno):
__tablename__ = 'retornoprocesso'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
np = db.Column(db.String(100), unique=False)
situacao = db.Column(db.String(20), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornoprocesso'}
class RetornoBO(Retorno):
__tablename__ = 'retornobo'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
bo = db.Column(db.String(100), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornobo'}
class RetornoRCO(Retorno):
__tablename__ = 'retornorco'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
rco = db.Column(db.String(100), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornorco'}
class RetornoREDS(Retorno):
__tablename__ = 'retornoreds'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
reds = db.Column(db.String(100), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornoreds'}
class RetornoPoliticaPSR(Retorno):
__tablename__ = 'retornopoliticapsr'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
tipopolitica = db.Column(db.String(255), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornopoliticapsr'}
class TipoRetorno(db.Model):
__tablename__ = 'tiporetornos'
id = db.Column(db.Integer, Sequence('tiporetorno_id_seq'), primary_key=True)
nome = db.Column(db.String(255), unique=False, nullable=False)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, nome, tipo, legenda):
self.nome = nome
self.tipo = tipo
self.legenda = legenda
class Anexo(db.Model):
__tablename__ = 'anexos'
id = db.Column(db.Integer, Sequence('anexos_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
descricaoanexo = db.Column(db.String(255), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
def __init__(self, denuncia_id):
self.denuncia_id = denuncia_id
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Cidade(db.Model):
__tablename__ = 'cidades'
id = db.Column(db.Integer, primary_key=True)
estado = db.Column(db.String(2), nullable=False)
cidade = db.Column(db.String(200), nullable=False)
def __init__(self, estado, cidade):
self.estado = estado
self.cidade = cidade
class TipoLocal(db.Model):
__tablename__ = 'tipolocais'
id = db.Column(db.Integer, Sequence('tipolocais_id_seq'), primary_key=True)
local = db.Column(db.String(100), unique=False, nullable=False)
def __init__(self, local):
self.local = local
class Usuario(db.Model):
__tablename__ = 'usuarios'
id = db.Column(db.Integer, Sequence('usuarios_id_seq'), primary_key=True)
login = db.Column(db.String(16), nullable=False, unique=True, index=True)
nome = db.Column(db.String(80), nullable=False, unique=False)
ddd = db.Column(db.String(2), nullable=False, unique=False)
telefone = db.Column(db.String(10), nullable=False, unique=False)
senhahash = db.Column(db.String(80), nullable=False, unique=False)
email = db.Column(db.String(200), nullable=False, unique=True, index=True)
dtregistro = db.Column(db.DateTime, nullable=False, unique=False)
dtultlogin = db.Column(db.DateTime, nullable=True, unique=False)
permissoes = relationship("PermissaoUsuario", backref="usuario")
perfis = relationship("PerfilUsuario", backref="usuario")
__table_args__ = (UniqueConstraint('login', 'email', name='uix_usuario'),)
def __init__(self, login, nome, ddd, telefone, senhahash, email):
self.login = login
self.nome = nome
self.ddd = ddd
self.telefone = telefone
self.senhahash = senhahash
self.email = email
self.dtregistro = datetime.datetime.today()
def checa_permissao(self, permissao):
if permissao in self.permissoes:
if permissao == p.permissao.nome:
return True
for pf in self.perfis:
for pp in pf.perfil.permissoesperfis:
if permissao == pp.permissao.nome:
return True
return False
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<Usuário %r>' % (self.login)
class Permissao(db.Model):
__tablename__ = 'permissoes'
id = db.Column(db.Integer, Sequence('permissoes_id_seq'), primary_key=True)
nome = db.Column(db.String(80), nullable=False, unique=True)
descricao = db.Column(db.String(255), nullable=False, unique=False)
permissoesusuario = relationship("PermissaoUsuario", backref="permissao")
permissoesperfis = relationship("PermissaoPerfil", backref="permissao")
class PermissaoUsuario(db.Model):
__tablename__ = 'permissoesusuarios'
id = db.Column(db.Integer, Sequence('permissoesusuarios_id_seq'), primary_key=True)
usuario_id = db.Column(db.Integer, ForeignKey('usuarios.id'), nullable=False)
permissao_id = db.Column(db.Integer, ForeignKey('permissoes.id'), nullable=False)
tipo = db.Column(db.Integer)
__table_args__ = (UniqueConstraint('usuario_id', 'permissao_id', name='uix_permmissao_usuario'),)
class Perfil(db.Model):
__tablename__ = 'perfis'
id = db.Column(db.Integer, Sequence('perfis_id_seq'), primary_key=True)
nome = db.Column(db.String(80), nullable=False, unique=True)
descricao = db.Column(db.String(255), nullable=False, unique=False)
permissoesperfis = relationship("PermissaoPerfil", backref="perfil")
perfisusuarios = relationship("PerfilUsuario", backref="perfil")
class PermissaoPerfil(db.Model):
__tablename__ = 'permissoesperfis'
id = db.Column(db.Integer, Sequence('permissoesperfis_id_seq'), primary_key=True)
permissao_id = db.Column(db.Integer, ForeignKey('permissoes.id'), nullable=False)
perfil_id = db.Column(db.Integer, ForeignKey('perfis.id'), nullable=False)
tipo = db.Column(db.Integer)
__table_args__ = (UniqueConstraint('permissao_id', 'perfil_id', name='uix_permissaoperfil'),)
class PerfilUsuario(db.Model):
__tablename__ = 'perfisusuarios'
id = db.Column(db.Integer, Sequence('permissoesusuarios_id_seq'), primary_key=True)
perfil_id = db.Column(db.Integer, ForeignKey('perfis.id'), unique=False)
usuario_id = db.Column(db.Integer, ForeignKey('usuarios.id'), unique=False)
tipo = db.Column(db.Integer)
__table_args__ = (UniqueConstraint('perfil_id', 'usuario_id', name='uix_perfisusuario'),)
|
apache-2.0
| -2,305,448,814,111,855,600
| 38.36478
| 124
| 0.628321
| false
| 3.194375
| false
| false
| false
|
yanikou19/pymatgen
|
pymatgen/io/abinitio/eos.py
|
1
|
10838
|
# coding: utf-8
"""Tools to compute equations of states with different models."""
from __future__ import unicode_literals, division, print_function
import collections
import numpy as np
import pymatgen.core.units as units
from pymatgen.core.units import FloatWithUnit
import logging
logger = logging.getLogger(__file__)
__all__ = [
"EOS",
]
def quadratic(V, a, b, c):
"""Quadratic fit"""
return a*V**2 + b*V + c
def murnaghan(V, E0, B0, B1, V0):
"""From PRB 28,5480 (1983)"""
E = E0 + B0*V/B1*(((V0/V)**B1)/(B1-1)+1) - V0*B0/(B1-1)
return E
def birch(V, E0, B0, B1, V0):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos paper downloaded from Web
case where n=0
"""
E = (E0
+ 9.0/8.0*B0*V0*((V0/V)**(2.0/3.0) - 1.0)**2
+ 9.0/16.0*B0*V0*(B1-4.)*((V0/V)**(2.0/3.0) - 1.0)**3)
return E
def birch_murnaghan(V, E0, B0, B1, V0):
"""BirchMurnaghan equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
E = E0 + 9.*B0*V0/16.*(eta**2-1)**2*(6 + B1*(eta**2-1.) - 4.*eta**2)
return E
def pourier_tarantola(V, E0, B0, B1, V0):
"""Pourier-Tarantola equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
squiggle = -3.*np.log(eta)
E = E0 + B0*V0*squiggle**2/6.*(3. + squiggle*(B1 - 2))
return E
def vinet(V, E0, B0, B1, V0):
'Vinet equation from PRB 70, 224107'
eta = (V/V0)**(1./3.)
E = (E0 + 2.*B0*V0/(B1-1.)**2
* (2. - (5. +3.*B1*(eta-1.)-3.*eta)*np.exp(-3.*(B1-1.)*(eta-1.)/2.)))
return E
def deltafactor_polyfit(volumes, energies):
"""
This is the routine used to compute V0, B0, B1 in the deltafactor code.
Taken from deltafactor/eosfit.py
"""
fitdata = np.polyfit(volumes**(-2./3.), energies, 3, full=True)
ssr = fitdata[1]
sst = np.sum((energies - np.average(energies))**2.)
residuals0 = ssr/sst
deriv0 = np.poly1d(fitdata[0])
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
v0 = 0
x = 0
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. * x**(15./2.) * deriv3(x))
b0 = derivV2 / x**(3./2.)
b1 = -1 - x**(-3./2.) * derivV3 / derivV2
#print('deltafactor polyfit:')
#print('e0, b0, b1, v0')
#print(fitdata[0], b0, b1, v0)
n = collections.namedtuple("DeltaFitResults", "v0 b0 b1 poly1d")
return n(v0, b0, b1, fitdata[0])
class EOSError(Exception):
"""Exceptions raised by EOS."""
class EOS(object):
"""
Fit equation of state for bulk systems.
The following equation is used::
murnaghan
PRB 28, 5480 (1983)
birch
Intermetallic compounds: Principles and Practice, Vol I: Principles. pages 195-210
birchmurnaghan
PRB 70, 224107
pouriertarantola
PRB 70, 224107
vinet
PRB 70, 224107
Use::
eos = EOS(eos_name='murnaghan')
fit = eos.fit(volumes, energies)
print(fit)
fit.plot()
"""
Error = EOSError
#: Models available.
MODELS = {
"quadratic": quadratic,
"murnaghan": murnaghan,
"birch": birch,
"birch_murnaghan": birch_murnaghan,
"pourier_tarantola": pourier_tarantola,
"vinet": vinet,
"deltafactor": deltafactor_polyfit,
}
def __init__(self, eos_name='murnaghan'):
self._eos_name = eos_name
self._func = self.MODELS[eos_name]
@staticmethod
def Quadratic():
return EOS(eos_name="quadratic")
@staticmethod
def Murnaghan():
return EOS(eos_name='murnaghan')
@staticmethod
def Birch():
return EOS(eos_name='birch')
@staticmethod
def Birch_Murnaghan():
return EOS(eos_name='birch_murnaghan')
@staticmethod
def Pourier_Tarantola():
return EOS(eos_name='pourier_tarantola')
@staticmethod
def Vinet():
return EOS(eos_name='vinet')
@staticmethod
def DeltaFactor():
return EOS(eos_name='deltafactor')
def fit(self, volumes, energies, vol_unit="ang^3", ene_unit="eV"):
"""
Fit energies [eV] as function of volumes [Angstrom**3].
Returns `EosFit` instance that gives access to the optimal volume,
the minumum energy, and the bulk modulus.
Notice that the units for the bulk modulus is eV/Angstrom^3.
"""
# Convert volumes to Ang**3 and energies to eV (if needed).
volumes = units.ArrayWithUnit(volumes, vol_unit).to("ang^3")
energies = units.EnergyArray(energies, ene_unit).to("eV")
return EOS_Fit(volumes, energies, self._func, self._eos_name)
class EOS_Fit(object):
"""Performs the fit of E(V) and provides method to access the results of the fit."""
def __init__(self, volumes, energies, func, eos_name):
"""
args:
energies: list of energies in eV
volumes: list of volumes in Angstrom^3
func: callable function
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
assert len(self.volumes) == len(self.energies)
self.func = func
self.eos_name = eos_name
self.exceptions = []
self.ierr = 0
if eos_name == "deltafactor":
try:
results = deltafactor_polyfit(self.volumes, self.energies)
self.e0 = None
self.v0 = results.v0
self.b0 = results.b0
self.b1 = results.b1
self.p0 = results.poly1d
self.eos_params = results.poly1d
except EOSError as exc:
self.ierr = 1
logger.critical(str(exc))
self.exceptions.append(exc)
raise
elif eos_name == "quadratic":
# Quadratic fit
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.v0 = v0 = -b/(2*a)
self.e0 = a*v0**2 + b*v0 + c
self.b0 = 2*a*v0
self.b1 = np.inf
self.p0 = [a, b, c]
self.eos_params = [a, b, c]
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
else:
# Objective function that will be minimized
def objective(pars, x, y):
return y - self.func(x, *pars)
# Quadratic fit to get an initial guess for the parameters
a, b, c = np.polyfit(self.volumes, self.energies, 2)
v0 = -b/(2*a)
e0 = a*v0**2 + b*v0 + c
b0 = 2*a*v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
# Initial guesses for the parameters
self.p0 = [e0, b0, b1, v0]
from scipy.optimize import leastsq
self.eos_params, self.ierr = leastsq(objective, self.p0, args=(self.volumes, self.energies))
if self.ierr not in [1, 2, 3, 4]:
exc = EOSError("Optimal parameters not found")
logger.critical(str(exc))
self.exceptions.append(exc)
raise exc
self.e0 = self.eos_params[0]
self.b0 = self.eos_params[1]
self.b1 = self.eos_params[2]
self.v0 = self.eos_params[3]
print('EOS_fit:', func)
print('e0, b0, b1, v0')
print(self.eos_params)
def __str__(self):
lines = []
app = lines.append
app("Equation of State: %s" % self.name)
app("Minimum volume = %1.2f Ang^3" % self.v0)
app("Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa, b1 = %1.2f" % (self.b0, self.b0_GPa, self.b1))
return "\n".join(lines)
@property
def name(self):
return self.func.__name__
@property
def b0_GPa(self):
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
def plot(self, ax=None, **kwargs):
"""
Uses Matplotlib to plot the energy curve.
Args:
ax:
Axis object. If ax is None, a new figure is produced.
show:
True to show the figure
savefig:
'abc.png' or 'abc.eps' to save the figure to a file.
Returns:
Matplotlib figure.
"""
import matplotlib.pyplot as plt
vmin, vmax = self.volumes.min(), self.volumes.max()
emin, emax = self.energies.min(), self.energies.max()
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
emin, emax = (emin - 0.01 * abs(emin), emax + 0.01 * abs(emax))
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
else:
fig = plt.gcf()
lines, legends = [], []
# Plot input data.
line, = ax.plot(self.volumes, self.energies, "ro")
lines.append(line)
legends.append("Input Data")
# Plot EOS.
vfit = np.linspace(vmin, vmax, 100)
if self.eos_name == "deltafactor":
xx = vfit**(-2./3.)
line, = ax.plot(vfit, np.polyval(self.eos_params, xx), "b-")
else:
line, = ax.plot(vfit, self.func(vfit, *self.eos_params), "b-")
lines.append(line)
legends.append(self.name + ' fit')
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Volume $\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(lines, legends, loc='upper right', shadow=True)
# Add text with fit parameters.
text = []; app = text.append
app("Min Volume = %1.2f $\AA^3$" % self.v0)
app("Bulk modulus = %1.2f eV/$\AA^3$ = %1.2f GPa" % (self.b0, self.b0_GPa))
app("B1 = %1.2f" % self.b1)
fig.text(0.4, 0.5, "\n".join(text), transform=ax.transAxes)
if kwargs.pop("show", True):
plt.show()
savefig = kwargs.pop("savefig", None)
if savefig is not None:
fig.savefig(savefig)
return fig
|
mit
| 272,119,726,745,626,140
| 27.150649
| 104
| 0.537738
| false
| 3.116159
| false
| false
| false
|
salspaugh/queryutils
|
queryutils/csvparser.py
|
1
|
6137
|
import csv
import dateutil.parser
import os
import splparser.parser
from user import *
from query import *
from logging import getLogger as get_logger
from os import path
from splparser.exceptions import SPLSyntaxError, TerminatingSPLSyntaxError
BYTES_IN_MB = 1048576
LIMIT = 2000*BYTES_IN_MB
logger = get_logger("queryutils")
def get_users_from_file(filename, users):
"""Populate the users dictionary with users and their queris from the given file.
:param filename: The .csv file containing user queries
:type filename: str
:param users: The user dict into which to place the users
:type users: dict
:rtype: None
"""
logger.debug("Reading from file:" + filename)
first = True
with open(filename) as datafile:
reader = csv.DictReader(datafile)
for row in reader:
logger.debug("Attempting to read row.")
# Get basic user information.
username = row.get('user', None)
if username is not None:
username = unicode(username.decode("utf-8"))
case = row.get('case_id', None)
if case is not None:
case = unicode(case.decode("utf-8"))
# Check if we've seen this user before.
user = None
userhash = None
if username is not None and case is not None:
userhash = ".".join([username, case])
user = users.get(userhash, None)
elif username is not None and case is None:
userhash = username
user = users.get(userhash, None)
else:
userhash = ""
user = users.get(userhash, None)
if user is None:
user = User(username)
users[userhash] = user
user.case_id = case
# Get basic query information.
timestamp = row.get('_time', None)
if timestamp is not None:
timestamp = float(dateutil.parser.parse(timestamp).strftime('%s.%f'))
querystring = row.get('search', None)
if querystring is not None:
querystring = unicode(querystring.decode("utf-8")).strip()
# Tie the query and the user together.
query = Query(querystring, timestamp)
user.queries.append(query)
query.user = user
# Get additional query information and add it to the query.
runtime = row.get('runtime', None)
if runtime is None:
runtime = row.get('total_run_time', None)
if runtime is not None:
try:
runtime = float(runtime.decode("utf-8"))
except:
runtime = None
query.execution_time = runtime
search_et = row.get('search_et', None)
if search_et is not None:
try:
search_et = float(search_et.decode("utf-8"))
except:
search_et = None
query.earliest_event = search_et
search_lt = row.get('search_lt', None)
if search_lt is not None:
try:
search_lt = float(search_lt.decode("utf-8"))
except:
search_lt = None
query.latest_event = search_lt
range = row.get('range', None)
if range is not None:
try:
range = float(range.decode("utf-8"))
except:
range = None
query.range = range
is_realtime = row.get('is_realtime', None)
if is_realtime is not None and is_realtime == "false":
is_realtime = False
if is_realtime is not None and is_realtime == "true":
is_realtime = True
query.is_realtime = is_realtime
searchtype = row.get('searchtype', None)
if searchtype is None:
searchtype = row.get('search_type', None)
if searchtype is not None:
searchtype = unicode(searchtype.decode("utf-8"))
query.search_type = searchtype
if query.search_type == "adhoc":
query.is_interactive = True
splunk_id = row.get('search_id', None)
if splunk_id is not None:
splunk_id = unicode(splunk_id.decode("utf-8"))
query.splunk_search_id = splunk_id
savedsearch_name = row.get('savedsearch_name', None)
if savedsearch_name is not None:
savedsearch_name = unicode(savedsearch_name.decode("utf-8"))
query.saved_search_name = savedsearch_name
logger.debug("Successfully read query.")
def get_users_from_directory(directory, users, limit=LIMIT):
"""Populate the users dict with users from the .csv files.
:param directory: The path to the directory containing the .csv files
:type directory: str
:param users: The dict to contain the users read from the .csv files
:type users: dict
:param limit: The approximate number of bytes to read in (for testing)
:type limit: int
:rtype: None
"""
raw_data_files = get_csv_files(directory, limit=limit)
for f in raw_data_files:
get_users_from_file(f, users)
def get_csv_files(dir, limit=LIMIT):
"""Return the paths to all the .csv files in the given directory.
:param dir: The path to the given directory
:type dir: str
:param limit: The approximate number of bytes to read in (for testing)
:type limit: int
:rtype: list
"""
csv_files = []
bytes_added = 0.
for (dirpath, dirnames, filenames) in os.walk(dir):
for filename in filenames:
if filename[-4:] == '.csv':
full_filename = path.join(path.abspath(dir), filename)
csv_files.append(full_filename)
bytes_added += path.getsize(full_filename)
if bytes_added > limit:
return csv_files
return csv_files
|
bsd-3-clause
| -6,412,215,798,137,213,000
| 34.473988
| 85
| 0.556135
| false
| 4.309691
| false
| false
| false
|
Lilykos/inspire-next
|
setup.py
|
1
|
2682
|
# -*- coding: utf-8 -*-
#
## This file is part of INSPIRE.
## Copyright (C) 2012, 2013 CERN.
##
## INSPIRE is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
"""
INSPIRE overlay
----------------
INSPIRE overlay repository for Invenio.
"""
import os
from setuptools import setup, find_packages
packages = find_packages(exclude=['docs'])
# Load __version__, should not be done using import.
# http://python-packaging-user-guide.readthedocs.org/en/latest/tutorial.html
g = {}
with open(os.path.join('inspire', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='Inspire',
version=version,
url='https://github.com/inspirehep/inspire-next',
license='GPLv2',
author='CERN',
author_email='admin@inspirehep.net',
description=__doc__,
long_description=open('README.rst', 'rt').read(),
packages=packages,
namespace_packages=["inspire", "inspire.ext", ],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
"rt",
"HarvestingKit>=0.3",
"mixer==4.9.5",
"requests==2.3",
"raven==5.0.0",
"orcid",
"retrying"
],
extras_require={
'development': [
'Flask-DebugToolbar>=0.9',
'ipython',
'ipdb',
'kwalitee'
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GPLv2 License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
entry_points={
'invenio.config': [
"inspire = inspire.config"
]
},
test_suite='inspire.testsuite',
tests_require=[
'nose',
'Flask-Testing'
]
)
|
gpl-2.0
| 5,499,976,328,954,376,000
| 27.83871
| 78
| 0.62267
| false
| 3.709544
| false
| false
| false
|
mimischi/django-clock
|
config/settings/local.py
|
1
|
2439
|
# -*- coding: utf-8 -*-
import socket
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool("DJANGO_DEBUG", default=True)
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="CHANGEME!!!^e8je^d8+us-s9!j3ks@h2h1(*^kr$-jocui3wam6%i=+^mti9",
)
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# Database
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "db_app",
"USER": "db_user" if not env("TRAVIS_CI", default=False) else "postgres",
"PASSWORD": "db_pass",
"HOST": "db" if env("PYTHONBUFFERED", default=False) else "localhost",
"PORT": 5432,
}
}
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ("debug_toolbar.middleware.DebugToolbarMiddleware",)
INSTALLED_APPS += ("debug_toolbar",)
INTERNAL_IPS = ["127.0.0.1", "192.168.99.100", "192.168.99.101"]
# Fix django-debug-toolbar when running Django in a Docker container
if env("INSIDE_DOCKER", default=False):
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
"JQUERY_URL": "",
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("django_extensions", "rosetta")
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# Your local stuff: Below this line define 3rd party library settings
ALLOWED_HOSTS = ["*"]
|
mit
| 5,362,180,584,308,313,000
| 31.092105
| 84
| 0.507995
| false
| 3.859177
| false
| false
| false
|
libyal/libexe
|
tests/pyexe_test_support.py
|
1
|
3236
|
#!/usr/bin/env python
#
# Python-bindings support functions test script
#
# Copyright (C) 2011-2021, Joachim Metz <joachim.metz@gmail.com>
#
# Refer to AUTHORS for acknowledgements.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import sys
import unittest
import pyexe
class SupportFunctionsTests(unittest.TestCase):
"""Tests the support functions."""
def test_get_version(self):
"""Tests the get_version function."""
version = pyexe.get_version()
self.assertIsNotNone(version)
def test_check_file_signature(self):
"""Tests the check_file_signature function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
result = pyexe.check_file_signature(test_source)
self.assertTrue(result)
def test_check_file_signature_file_object(self):
"""Tests the check_file_signature_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
with open(test_source, "rb") as file_object:
result = pyexe.check_file_signature_file_object(file_object)
self.assertTrue(result)
def test_open(self):
"""Tests the open function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
exe_file = pyexe.open(test_source)
self.assertIsNotNone(exe_file)
exe_file.close()
with self.assertRaises(TypeError):
pyexe.open(None)
with self.assertRaises(ValueError):
pyexe.open(test_source, mode="w")
def test_open_file_object(self):
"""Tests the open_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
if not os.path.isfile(test_source):
raise unittest.SkipTest("source not a regular file")
with open(test_source, "rb") as file_object:
exe_file = pyexe.open_file_object(file_object)
self.assertIsNotNone(exe_file)
exe_file.close()
with self.assertRaises(TypeError):
pyexe.open_file_object(None)
with self.assertRaises(ValueError):
pyexe.open_file_object(file_object, mode="w")
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"source", nargs="?", action="store", metavar="PATH",
default=None, help="path of the source file.")
options, unknown_options = argument_parser.parse_known_args()
unknown_options.insert(0, sys.argv[0])
setattr(unittest, "source", options.source)
unittest.main(argv=unknown_options, verbosity=2)
|
lgpl-3.0
| -7,019,984,351,821,116,000
| 28.962963
| 77
| 0.704883
| false
| 3.838671
| true
| false
| false
|
relic7/prodimages
|
python/drafts/walk_scraps/walkdir_exiv2.py
|
1
|
2162
|
#!/usr/bin/env python
import os,sys
import PIL
def recursive_dirlist(rootdir):
walkedlist = []
for dirname, dirnames, filenames in os.walk(rootdir):
# print path to all subdirectories first.
#for subdirname in dirnames:
#print os.path.join(dirname, subdirname)
# print path to all filenames.
for filename in filenames:
file_path = os.path.abspath(os.path.join(dirname, filename))
if os.path.isfile(file_path):
walkedlist.append(file_path)
# Advanced usage:
# editing the 'dirnames' list will stop os.walk() from recursing into there.
if '.git' in dirnames:
# don't go into any .git directories.
dirnames.remove('.git')
walkedset = list(set(sorted(walkedlist)))
return walkedset
def get_exif(filepath):
ret = {}
i = Image.open(filepath)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
#######
from PIL import Image
import pyexiv2
#Exif.Photo.DateTimeOriginal
#for k,v in mdata.iteritems():
# print k,v
rootdir = sys.argv[1]
for line in walkedout:
file_path = line
filename = file_path.split('/')[-1]
colorstyle = filename.split('_')[0]
alt = file_path.split('_')[-1]
alt = alt.strip('.jpg')
photodate = pyexiv2.ImageMetadata(file_path)['DateTimeOriginal']
print "{0},{1},{2},{3}".format(colorstyle,photodate,file_path,alt)
def resize_image(source_path, dest_path, size):
from PIL import *
import pyexiv2
# resize image
image = Image.open(source_path)
image.thumbnail(size, Image.ANTIALIAS)
image.save(dest_path, "JPEG")
# copy EXIF data
source_image = pyexiv2.Image(source_path)
source_image.readMetadata()
dest_image = pyexiv2.Image(dest_path)
dest_image.readMetadata()
source_image.copyMetadataTo(dest_image)
# set EXIF image size info to resized size
dest_image["Exif.Photo.PixelXDimension"] = image.size[0]
dest_image["Exif.Photo.PixelYDimension"] = image.size[1]
dest_image.writeMetadata()
|
mit
| 3,377,195,295,547,329,000
| 26.730769
| 84
| 0.637835
| false
| 3.420886
| false
| false
| false
|
netgroup/svef
|
computepsnr.py
|
1
|
1285
|
#!/usr/bin/env python
# take a psnr file and compute the average psnr on a specified range
import sys
if len(sys.argv) < 4:
print >> sys.stderr, """
Usage:
%s <beginning frame> <ending frame> <psnr file1> [<psnr file 2>]
""" % (sys.argv[0])
sys.exit(1)
beginningframe = int(sys.argv[1])
endingframe = int(sys.argv[2])
psnrfilename = sys.argv[5]
try:
psnrfilename2 = sys.argv[6]
except IndexError:
psnrfilename2 = None
class PsnrEntry:
frameno = -1
value = 0.0
def psnrFile2List(filename):
psnrs = []
psnrfile = open(filename)
try:
for line in psnrfile:
words = line.split()
p = PsnrEntry()
p.frameno = int(words[0])
p.value = float(words[1].replace(",","."))
psnrs.append(p)
except IndexError:
pass
psnrfile.close()
return psnrs
totpsnr = 0.0
psnrs = psnrFile2List(psnrfilename)
pvalues = [p.value for p in psnrs if beginningframe <= p.frameno < endingframe]
psnr1 = sum(pvalues)/len(pvalues)
print "PSNR 1: %f" % psnr1
totpsnr += psnr1
if psnrfilename2 != None:
psnrs2 = psnrFile2List(psnrfilename2)
pvalues = [p.value for p in psnrs2 if beginningframe <= p.frameno < endingframe]
psnr2 = sum(pvalues)/len(pvalues)
print "PSNR 2: %f" % psnr2
totpsnr += psnr2
print "Total PSNR: %f" % totpsnr
|
gpl-3.0
| -3,360,558,190,853,625,000
| 21.946429
| 83
| 0.661479
| false
| 2.495146
| false
| false
| false
|
BhallaLab/moose-thalamocortical
|
pymoose/tests/randnum/kstest.py
|
1
|
2706
|
#!/usr/bin/env python
#
# This is a simple implementation of KS-test.
from math import *
from numpy import *
# Values taken from Knuth, TAOCP II: 3.3.1, Table 2
test_table = {1: [0.01000, 0.0500, 0.2500, 0.5000, 0.7500, 0.9500, 0.9900],
2: [0.01400, 0.06749, 0.2929, 0.5176, 0.7071, 1.0980, 1.2728],
5: [0.02152, 0.09471, 0.3249, 0.5242, 0.7674, 1.1392, 1.4024],
10: [0.02912, 0.1147, 0.3297, 0.5426, 0.7845, 1.1658, 1.444],
20: [0.03807, 0.1298, 0.3461, 0.5547, 0.7975, 1.1839, 1.4698],
30: [0.04354, 0.1351, 0.3509, 0.5605, 0.8036, 1.1916, 1.4801]}
p_list = [1.0, 5.0, 25.0, 50.0, 75.0, 95.0, 99.0] # percentage points the table entries correspond to
def ks_distribution(xx, nn):
"""Calculate P(Knn+ <= xx). See Knuth TAOCP Vol II for details."""
if nn < 30:
print "!! Larger sample size is recommended."
return (1 - exp(-2.0*xx*xx)*(1-2.0*xx/(3.0*sqrt(1.0 * nn))))
def ks_test(rand_num_list, distr_fn):
"""Execute a ks test on the given list of random numbers and tests if they have the distribution defined by distr_fn.
parameters:
rand_num_list - list containing the random sequence to be tested.
distr_fn - a function that calculates the distribution function for this sequence. TODO: allow another sample list to check if they are from same distribution.
Note that according to theory, KS test requires that the distribution be continuous"""
result = True
nn = len(rand_num_list)
inp_list = array(rand_num_list)
inp_list.sort()
distr_list = map(distr_fn, inp_list)
sample_distr = arange(nn+1) * 1.0/nn
k_plus = sqrt(nn) * max(sample_distr[1:] - distr_list)
k_minus = sqrt(nn) * max(distr_list - sample_distr[:nn])
p_k_plus = ks_distribution(k_plus, nn)
if p_k_plus < 0.05 or p_k_plus > 0.95:
print "ERROR: outside 5%-95% range. The P( K", nn, "+ <=", k_plus, ") is", p_k_plus
result = False
p_k_minus = ks_distribution(k_minus, nn)
if p_k_minus < 0.05 or p_k_minus > 0.95:
print "ERROR: outside 5%-95% range. The P( K", nn, "- <=", k_minus, ") is", p_k_minus
result = False
return result
def test_ks_distribution():
for key in test_table.keys():
values = test_table[key]
for ii in range(len(p_list)):
print "... Testing n =", key,
value = ks_distribution(values[ii], key)
print ", expected =", p_list[ii]/100.0, ", calculated =", value
if (fabs( value - p_list[ii]/100.0) <= 0.005):
print "... OK"
else:
print "FAILED"
if __name__ == "__main__":
test_ks_distribution()
|
lgpl-2.1
| 144,061,998,017,744,800
| 41.28125
| 160
| 0.585366
| false
| 2.833508
| true
| false
| false
|
michaelaye/pyciss
|
pyciss/solitons.py
|
1
|
1916
|
from datetime import datetime as dt
import pandas as pd
import pkg_resources as pr
from astropy import units as u
from numpy import poly1d
from . import io
from .ringcube import RingCube
def get_year_since_resonance(ringcube):
"Calculate the fraction of the year since moon swap."
t0 = dt(2006, 1, 21)
td = ringcube.imagetime - t0
return td.days / 365.25
def create_polynoms():
"""Create and return poly1d objects.
Uses the parameters from Morgan to create poly1d objects for
calculations.
"""
fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv')
res_df = pd.read_csv(fname)
polys = {}
for resorder, row in zip('65 54 43 21'.split(),
range(4)):
p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']])
polys['janus ' + ':'.join(resorder)] = p
return polys
def check_for_soliton(img_id):
"""Workhorse function.
Creates the polynom.
Calculates radius constraints from attributes in `ringcube` object.
Parameters
----------
ringcube : pyciss.ringcube.RingCube
A containter class for a ring-projected ISS image file.
Returns
-------
dict
Dictionary with all solitons found. Reason why it is a dict is
that it could be more than one in one image.
"""
pm = io.PathManager(img_id)
try:
ringcube = RingCube(pm.cubepath)
except FileNotFoundError:
ringcube = RingCube(pm.undestriped)
polys = create_polynoms()
minrad = ringcube.minrad.to(u.km)
maxrad = ringcube.maxrad.to(u.km)
delta_years = get_year_since_resonance(ringcube)
soliton_radii = {}
for k, p in polys.items():
current_r = p(delta_years) * u.km
if minrad < current_r < maxrad:
soliton_radii[k] = current_r
return soliton_radii if soliton_radii else None
|
isc
| -6,622,842,903,339,667,000
| 28.030303
| 89
| 0.640919
| false
| 3.385159
| false
| false
| false
|
euanlau/django-umessages
|
umessages/fields.py
|
1
|
2111
|
from django import forms
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _
from umessages.utils import get_user_model
class CommaSeparatedUserInput(widgets.Input):
input_type = 'text'
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif isinstance(value, (list, tuple)):
value = (', '.join([user.username for user in value]))
return super(CommaSeparatedUserInput, self).render(name, value, attrs)
class CommaSeparatedUserField(forms.Field):
"""
A :class:`CharField` that exists of comma separated usernames.
:param recipient_filter:
Optional function which receives as :class:`User` as parameter. The
function should return ``True`` if the user is allowed or ``False`` if
the user is not allowed.
:return:
A list of :class:`User`.
"""
widget = CommaSeparatedUserInput
def __init__(self, *args, **kwargs):
recipient_filter = kwargs.pop('recipient_filter', None)
self._recipient_filter = recipient_filter
super(CommaSeparatedUserField, self).__init__(*args, **kwargs)
def clean(self, value):
super(CommaSeparatedUserField, self).clean(value)
names = set(value.split(','))
names_set = set([name.strip() for name in names])
users = list(get_user_model().objects.filter(username__in=names_set))
# Check for unknown names.
unknown_names = names_set ^ set([user.username for user in users])
recipient_filter = self._recipient_filter
invalid_users = []
if recipient_filter is not None:
for r in users:
if recipient_filter(r) is False:
users.remove(r)
invalid_users.append(r.username)
if unknown_names or invalid_users:
humanized_usernames = ', '.join(list(unknown_names) + invalid_users)
raise forms.ValidationError(_("The following usernames are incorrect: %(users)s.") % {'users': humanized_usernames})
return users
|
bsd-3-clause
| -4,658,207,798,081,575,000
| 34.779661
| 128
| 0.632875
| false
| 4.196819
| false
| false
| false
|
liavkoren/djangoDev
|
django/db/models/base.py
|
1
|
60943
|
from __future__ import unicode_literals
import copy
import sys
from functools import update_wrapper
import warnings
from django.apps import apps
from django.apps.config import MODELS_MODULE_NAME
from django.conf import settings
from django.core import checks
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.db import (router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.deletion import Collector
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ForeignObjectRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
from django.db.models import signals
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
# If the model is imported before the configuration for its
# application is created (#21719), or isn't in an installed
# application (#21680), use the legacy logic to figure out the
# app_label by looking one level up from the package or module
# named 'models'. If no such package or module exists, fall
# back to looking one level up from the module this model is
# defined in.
# For 'django.contrib.sites.models', this would be 'sites'.
# For 'geo.models.places' this would be 'geo'.
msg = (
"Model class %s.%s doesn't declare an explicit app_label "
"and either isn't in an application in INSTALLED_APPS or "
"else was imported before its application was loaded. " %
(module, name))
if abstract:
msg += "Its app_label will be set to None in Django 1.9."
else:
msg += "This will no longer be supported in Django 1.9."
warnings.warn(msg, RemovedInDjango19Warning, stacklevel=2)
model_module = sys.modules[new_class.__module__]
package_components = model_module.__name__.split('.')
package_components.reverse() # find the last occurrence of 'models'
try:
app_label_index = package_components.index(MODELS_MODULE_NAME) + 1
except ValueError:
app_label_index = 1
kwargs = {"app_label": package_components[app_label_index]}
else:
kwargs = {"app_label": app_config.label}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = (
new_class._meta.local_fields +
new_class._meta.local_many_to_many +
new_class._meta.virtual_fields
)
field_names = set(f.name for f in new_fields)
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
parent_links[field.rel.to] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in parent_links:
field = parent_links[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers: # NOQA (redefinition of _)
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.attname for f in opts.fields))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
ensure_default_manager(cls)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if (f.attname not in self.__dict__ and
isinstance(self.__class__.__dict__[f.attname], DeferredAttribute))
]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = cls._base_manager.using(using).filter(
**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
filtered._update(values)
return True
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
clash_errors = cls._check_id_field() + cls._check_field_name_clashes()
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
hint=None,
obj=None,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
("'%s' references '%s.%s', which has not been installed, or is abstract.") % (
cls._meta.swappable, app_label, model_name
),
hint=None,
obj=None,
id='models.E002',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
managers = cls._meta.concrete_managers + cls._meta.abstract_managers
for (_, _, manager) in managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.rel.to, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.rel.through, ModelBase))
for f in fields:
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
("The model has two many-to-many relations through "
"the intermediate model '%s.%s'.") % (
f.rel.through._meta.app_label,
f.rel.through._meta.object_name
),
hint=None,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields
if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
("'id' can only be used as a field name if the field also "
"sets 'primary_key=True'."),
hint=None,
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.parents:
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
("The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'.") % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
hint=None,
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents.
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (f.name == "id" and
clash and clash.name == "id" and clash.model == cls)
if clash and not id_conflict:
errors.append(
checks.Error(
("The field '%s' clashes with the field '%s' "
"from model '%s'.") % (
f.name, clash.name, clash.model._meta
),
hint=None,
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
errors = []
for field_name in fields:
try:
field = cls._meta.get_field(field_name,
many_to_many=True)
except models.FieldDoesNotExist:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (option, field_name),
hint=None,
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.rel, models.ManyToManyRel):
errors.append(
checks.Error(
("'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'.") % (
option, field_name, option
),
hint=None,
obj=cls,
id='models.E013',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of lists and do all fields
exist? """
from django.db.models import FieldDoesNotExist
if not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
("'ordering' must be a tuple or list "
"(even if you want to order by only one field)."),
hint=None,
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
fields = (f for f in fields if
f != '_order' or not cls._meta.order_with_respect_to)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if '__' not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = (f for f in fields if f != 'pk')
for field_name in fields:
try:
cls._meta.get_field(field_name, many_to_many=False)
except FieldDoesNotExist:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % field_name,
hint=None,
obj=cls,
id='models.E015',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.model_name), func)(self, *args, **kwargs)
########
# MISC #
########
def simple_class_factory(model, attrs):
"""
Needed for dynamic classes.
"""
return model
def model_unpickle(model_id, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
|
bsd-3-clause
| 566,368,153,932,182,660
| 40.42964
| 166
| 0.545034
| false
| 4.456852
| false
| false
| false
|
kctan0805/vdpm
|
share/gdal/gdal-2.0.0/swig/python/samples/gdalinfo.py
|
1
|
23758
|
#!/usr/bin/env python
#/******************************************************************************
# * $Id: gdalinfo.py 28391 2015-01-30 19:57:31Z rouault $
# *
# * Project: GDAL Utilities
# * Purpose: Python port of Commandline application to list info about a file.
# * Author: Even Rouault, <even dot rouault at mines dash paris dot org>
# *
# * Port from gdalinfo.c whose author is Frank Warmerdam
# *
# ******************************************************************************
# * Copyright (c) 2010-2011, Even Rouault <even dot rouault at mines-paris dot org>
# * Copyright (c) 1998, Frank Warmerdam
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
try:
from osgeo import gdal
from osgeo import osr
except:
import gdal
import osr
#/************************************************************************/
#/* Usage() */
#/************************************************************************/
def Usage():
print( "Usage: gdalinfo [--help-general] [-mm] [-stats] [-hist] [-nogcp] [-nomd]\n" + \
" [-norat] [-noct] [-nofl] [-checksum] [-mdd domain]* datasetname" )
return 1
def EQUAL(a, b):
return a.lower() == b.lower()
#/************************************************************************/
#/* main() */
#/************************************************************************/
def main( argv = None ):
bComputeMinMax = False
bShowGCPs = True
bShowMetadata = True
bShowRAT=True
bStats = False
bApproxStats = True
bShowColorTable = True
bComputeChecksum = False
bReportHistograms = False
pszFilename = None
papszExtraMDDomains = [ ]
pszProjection = None
hTransform = None
bShowFileList = True
#/* Must process GDAL_SKIP before GDALAllRegister(), but we can't call */
#/* GDALGeneralCmdLineProcessor before it needs the drivers to be registered */
#/* for the --format or --formats options */
#for( i = 1; i < argc; i++ )
#{
# if EQUAL(argv[i],"--config") and i + 2 < argc and EQUAL(argv[i + 1], "GDAL_SKIP"):
# {
# CPLSetConfigOption( argv[i+1], argv[i+2] );
#
# i += 2;
# }
#}
#
#GDALAllRegister();
if argv is None:
argv = sys.argv
argv = gdal.GeneralCmdLineProcessor( argv )
if argv is None:
return 1
nArgc = len(argv)
#/* -------------------------------------------------------------------- */
#/* Parse arguments. */
#/* -------------------------------------------------------------------- */
i = 1
while i < nArgc:
if EQUAL(argv[i], "--utility_version"):
print("%s is running against GDAL %s" %
(argv[0], gdal.VersionInfo("RELEASE_NAME")))
return 0
elif EQUAL(argv[i], "-mm"):
bComputeMinMax = True
elif EQUAL(argv[i], "-hist"):
bReportHistograms = True
elif EQUAL(argv[i], "-stats"):
bStats = True
bApproxStats = False
elif EQUAL(argv[i], "-approx_stats"):
bStats = True
bApproxStats = True
elif EQUAL(argv[i], "-checksum"):
bComputeChecksum = True
elif EQUAL(argv[i], "-nogcp"):
bShowGCPs = False
elif EQUAL(argv[i], "-nomd"):
bShowMetadata = False
elif EQUAL(argv[i], "-norat"):
bShowRAT = False
elif EQUAL(argv[i], "-noct"):
bShowColorTable = False
elif EQUAL(argv[i], "-mdd") and i < nArgc-1:
i = i + 1
papszExtraMDDomains.append( argv[i] )
elif EQUAL(argv[i], "-nofl"):
bShowFileList = False
elif argv[i][0] == '-':
return Usage()
elif pszFilename is None:
pszFilename = argv[i]
else:
return Usage()
i = i + 1
if pszFilename is None:
return Usage()
#/* -------------------------------------------------------------------- */
#/* Open dataset. */
#/* -------------------------------------------------------------------- */
hDataset = gdal.Open( pszFilename, gdal.GA_ReadOnly )
if hDataset is None:
print("gdalinfo failed - unable to open '%s'." % pszFilename )
return 1
#/* -------------------------------------------------------------------- */
#/* Report general info. */
#/* -------------------------------------------------------------------- */
hDriver = hDataset.GetDriver();
print( "Driver: %s/%s" % ( \
hDriver.ShortName, \
hDriver.LongName ))
papszFileList = hDataset.GetFileList();
if papszFileList is None or len(papszFileList) == 0:
print( "Files: none associated" )
else:
print( "Files: %s" % papszFileList[0] )
if bShowFileList:
for i in range(1, len(papszFileList)):
print( " %s" % papszFileList[i] )
print( "Size is %d, %d" % (hDataset.RasterXSize, hDataset.RasterYSize))
#/* -------------------------------------------------------------------- */
#/* Report projection. */
#/* -------------------------------------------------------------------- */
pszProjection = hDataset.GetProjectionRef()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
print( "Coordinate System is:\n%s" % pszPrettyWkt )
else:
print( "Coordinate System is `%s'" % pszProjection )
#/* -------------------------------------------------------------------- */
#/* Report Geotransform. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
if adfGeoTransform[2] == 0.0 and adfGeoTransform[4] == 0.0:
print( "Origin = (%.15f,%.15f)" % ( \
adfGeoTransform[0], adfGeoTransform[3] ))
print( "Pixel Size = (%.15f,%.15f)" % ( \
adfGeoTransform[1], adfGeoTransform[5] ))
else:
print( "GeoTransform =\n" \
" %.16g, %.16g, %.16g\n" \
" %.16g, %.16g, %.16g" % ( \
adfGeoTransform[0], \
adfGeoTransform[1], \
adfGeoTransform[2], \
adfGeoTransform[3], \
adfGeoTransform[4], \
adfGeoTransform[5] ))
#/* -------------------------------------------------------------------- */
#/* Report GCPs. */
#/* -------------------------------------------------------------------- */
if bShowGCPs and hDataset.GetGCPCount() > 0:
pszProjection = hDataset.GetGCPProjection()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
print( "GCP Projection = \n%s" % pszPrettyWkt )
else:
print( "GCP Projection = %s" % \
pszProjection )
gcps = hDataset.GetGCPs()
i = 0
for gcp in gcps:
print( "GCP[%3d]: Id=%s, Info=%s\n" \
" (%.15g,%.15g) -> (%.15g,%.15g,%.15g)" % ( \
i, gcp.Id, gcp.Info, \
gcp.GCPPixel, gcp.GCPLine, \
gcp.GCPX, gcp.GCPY, gcp.GCPZ ))
i = i + 1
#/* -------------------------------------------------------------------- */
#/* Report metadata. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
for extra_domain in papszExtraMDDomains:
papszMetadata = hDataset.GetMetadata_List(extra_domain)
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata (%s):" % extra_domain)
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report "IMAGE_STRUCTURE" metadata. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report subdatasets. */
#/* -------------------------------------------------------------------- */
papszMetadata = hDataset.GetMetadata_List("SUBDATASETS")
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Subdatasets:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report geolocation. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("GEOLOCATION")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Geolocation:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report RPCs */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("RPC")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "RPC Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Setup projected to lat/long transform if appropriate. */
#/* -------------------------------------------------------------------- */
if pszProjection is not None and len(pszProjection) > 0:
hProj = osr.SpatialReference( pszProjection )
if hProj is not None:
hLatLong = hProj.CloneGeogCS()
if hLatLong is not None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
hTransform = osr.CoordinateTransformation( hProj, hLatLong )
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find( 'Unable to load PROJ.4 library' ) != -1:
hTransform = None
#/* -------------------------------------------------------------------- */
#/* Report corners. */
#/* -------------------------------------------------------------------- */
print( "Corner Coordinates:" )
GDALInfoReportCorner( hDataset, hTransform, "Upper Left", \
0.0, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Left", \
0.0, hDataset.RasterYSize);
GDALInfoReportCorner( hDataset, hTransform, "Upper Right", \
hDataset.RasterXSize, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Right", \
hDataset.RasterXSize, \
hDataset.RasterYSize );
GDALInfoReportCorner( hDataset, hTransform, "Center", \
hDataset.RasterXSize/2.0, \
hDataset.RasterYSize/2.0 );
#/* ==================================================================== */
#/* Loop over bands. */
#/* ==================================================================== */
for iBand in range(hDataset.RasterCount):
hBand = hDataset.GetRasterBand(iBand+1 )
#if( bSample )
#{
# float afSample[10000];
# int nCount;
#
# nCount = GDALGetRandomRasterSample( hBand, 10000, afSample );
# print( "Got %d samples.\n", nCount );
#}
(nBlockXSize, nBlockYSize) = hBand.GetBlockSize()
print( "Band %d Block=%dx%d Type=%s, ColorInterp=%s" % ( iBand+1, \
nBlockXSize, nBlockYSize, \
gdal.GetDataTypeName(hBand.DataType), \
gdal.GetColorInterpretationName( \
hBand.GetRasterColorInterpretation()) ))
if hBand.GetDescription() is not None \
and len(hBand.GetDescription()) > 0 :
print( " Description = %s" % hBand.GetDescription() )
dfMin = hBand.GetMinimum()
dfMax = hBand.GetMaximum()
if dfMin is not None or dfMax is not None or bComputeMinMax:
line = " "
if dfMin is not None:
line = line + ("Min=%.3f " % dfMin)
if dfMax is not None:
line = line + ("Max=%.3f " % dfMax)
if bComputeMinMax:
gdal.ErrorReset()
adfCMinMax = hBand.ComputeRasterMinMax(False)
if gdal.GetLastErrorType() == gdal.CE_None:
line = line + ( " Computed Min/Max=%.3f,%.3f" % ( \
adfCMinMax[0], adfCMinMax[1] ))
print( line )
stats = hBand.GetStatistics( bApproxStats, bStats)
# Dirty hack to recognize if stats are valid. If invalid, the returned
# stddev is negative
if stats[3] >= 0.0:
print( " Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
if bReportHistograms:
hist = hBand.GetDefaultHistogram(force = True, callback = gdal.TermProgress)
if hist is not None:
dfMin = hist[0]
dfMax = hist[1]
nBucketCount = hist[2]
panHistogram = hist[3]
print( " %d buckets from %g to %g:" % ( \
nBucketCount, dfMin, dfMax ))
line = ' '
for bucket in panHistogram:
line = line + ("%d " % bucket)
print(line)
if bComputeChecksum:
print( " Checksum=%d" % hBand.Checksum())
dfNoData = hBand.GetNoDataValue()
if dfNoData is not None:
if dfNoData != dfNoData:
print( " NoData Value=nan" )
else:
print( " NoData Value=%.18g" % dfNoData )
if hBand.GetOverviewCount() > 0:
line = " Overviews: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0 :
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%dx%d" % (hOverview.XSize, hOverview.YSize))
pszResampling = \
hOverview.GetMetadataItem( "RESAMPLING", "" )
if pszResampling is not None \
and len(pszResampling) >= 12 \
and EQUAL(pszResampling[0:12],"AVERAGE_BIT2"):
line = line + "*"
else:
line = line + "(null)"
print(line)
if bComputeChecksum:
line = " Overviews checksum: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
print(line)
if hBand.HasArbitraryOverviews():
print( " Overviews: arbitrary" )
nMaskFlags = hBand.GetMaskFlags()
if (nMaskFlags & (gdal.GMF_NODATA|gdal.GMF_ALL_VALID)) == 0:
hMaskBand = hBand.GetMaskBand()
line = " Mask Flags: "
if (nMaskFlags & gdal.GMF_PER_DATASET) != 0:
line = line + "PER_DATASET "
if (nMaskFlags & gdal.GMF_ALPHA) != 0:
line = line + "ALPHA "
if (nMaskFlags & gdal.GMF_NODATA) != 0:
line = line + "NODATA "
if (nMaskFlags & gdal.GMF_ALL_VALID) != 0:
line = line + "ALL_VALID "
print(line)
if hMaskBand is not None and \
hMaskBand.GetOverviewCount() > 0:
line = " Overviews of mask band: "
for iOverview in range(hMaskBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hMaskBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
if len(hBand.GetUnitType()) > 0:
print( " Unit Type: %s" % hBand.GetUnitType())
papszCategories = hBand.GetRasterCategoryNames()
if papszCategories is not None:
print( " Categories:" );
i = 0
for category in papszCategories:
print( " %3d: %s" % (i, category) )
i = i + 1
if hBand.GetScale() != 1.0 or hBand.GetOffset() != 0.0:
print( " Offset: %.15g, Scale:%.15g" % \
( hBand.GetOffset(), hBand.GetScale()))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
hTable = hBand.GetRasterColorTable()
if hBand.GetRasterColorInterpretation() == gdal.GCI_PaletteIndex \
and hTable is not None:
print( " Color Table (%s with %d entries)" % (\
gdal.GetPaletteInterpretationName( \
hTable.GetPaletteInterpretation( )), \
hTable.GetCount() ))
if bShowColorTable:
for i in range(hTable.GetCount()):
sEntry = hTable.GetColorEntry(i)
print( " %3d: %d,%d,%d,%d" % ( \
i, \
sEntry[0],\
sEntry[1],\
sEntry[2],\
sEntry[3] ))
if bShowRAT:
pass
#hRAT = hBand.GetDefaultRAT()
#GDALRATDumpReadable( hRAT, None );
return 0
#/************************************************************************/
#/* GDALInfoReportCorner() */
#/************************************************************************/
def GDALInfoReportCorner( hDataset, hTransform, corner_name, x, y ):
line = "%-11s " % corner_name
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
line = line + ("(%7.1f,%7.1f)" % (x, y ))
print(line)
return False
#/* -------------------------------------------------------------------- */
#/* Report the georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
if abs(dfGeoX) < 181 and abs(dfGeoY) < 91:
line = line + ( "(%12.7f,%12.7f) " % (dfGeoX, dfGeoY ))
else:
line = line + ( "(%12.3f,%12.3f) " % (dfGeoX, dfGeoY ))
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
line = line + ( "(%s," % gdal.DecToDMS( pnt[0], "Long", 2 ) )
line = line + ( "%s)" % gdal.DecToDMS( pnt[1], "Lat", 2 ) )
print(line)
return True
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of GetGeoTransform(can_return_null)
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
sys.exit(main(sys.argv))
|
lgpl-2.1
| -3,859,931,217,219,883,500
| 37.883797
| 95
| 0.440946
| false
| 4.30945
| false
| false
| false
|
DolphinDream/sverchok
|
nodes/object_nodes/getsetprop.py
|
1
|
9587
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import ast
import traceback
import bpy
from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty, FloatVectorProperty
from bpy.types import bpy_prop_array
import mathutils
from mathutils import Matrix, Vector, Euler, Quaternion, Color
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.utils.nodes_mixins.sv_animatable_nodes import SvAnimatableNode
from sverchok.data_structure import Matrix_generate, updateNode, node_id
def parse_to_path(p):
'''
Create a path and can be looked up easily.
Return an array of tuples with op type and value
ops are:
name - global name to use
attr - attribute to get using getattr(obj,attr)
key - key for accesing via obj[key]
'''
if isinstance(p, ast.Attribute):
return parse_to_path(p.value)+[("attr", p.attr)]
elif isinstance(p, ast.Subscript):
if isinstance(p.slice.value, ast.Num):
return parse_to_path(p.value) + [("key", p.slice.value.n)]
elif isinstance(p.slice.value, ast.Str):
return parse_to_path(p.value) + [("key", p.slice.value.s)]
elif isinstance(p, ast.Name):
return [("name", p.id)]
else:
raise NameError
def get_object(path):
'''
access the object speciefed from a path
generated by parse_to_path
will fail if path is invalid
'''
curr_object = globals()[path[0][1]]
for t, value in path[1:]:
if t == "attr":
curr_object = getattr(curr_object, value)
elif t == "key":
curr_object = curr_object[value]
return curr_object
def apply_alias(eval_str):
'''
apply standard aliases
will raise error if it isn't an bpy path
'''
if not eval_str.startswith("bpy."):
for alias, expanded in aliases.items():
if eval_str.startswith(alias):
eval_str = eval_str.replace(alias, expanded, 1)
break
if not eval_str.startswith("bpy."):
raise NameError
return eval_str
def wrap_output_data(tvar):
'''
create valid sverchok socket data from an object
from ek node
'''
if isinstance(tvar, (Vector, Color)):
data = [[tvar[:]]]
elif isinstance(tvar, Matrix):
data = [[r[:] for r in tvar[:]]]
elif isinstance(tvar, (Euler, Quaternion)):
tvar = tvar.to_matrix().to_4x4()
data = [[r[:] for r in tvar[:]]]
elif isinstance(tvar, list):
data = [tvar]
elif isinstance(tvar, (int, float)):
data = [[tvar]]
else:
data = tvar
return data
def assign_data(obj, data):
'''
assigns data to the object
'''
if isinstance(obj, (int, float)):
# doesn't work
obj = data[0][0]
elif isinstance(obj, (Vector, Color)):
obj[:] = data[0][0]
elif isinstance(obj, (Matrix, Euler, Quaternion)):
mats = Matrix_generate(data)
mat = mats[0]
if isinstance(obj, Euler):
eul = mat.to_euler(obj.order)
obj[:] = eul
elif isinstance(obj, Quaternion):
quat = mat.to_quaternion()
obj[:] = quat
else: #isinstance(obj, Matrix)
obj[:] = mat
else: # super optimistic guess
obj[:] = type(obj)(data[0][0])
aliases = {
"c": "bpy.context",
"C" : "bpy.context",
"scene": "bpy.context.scene",
"data": "bpy.data",
"D": "bpy.data",
"objs": "bpy.data.objects",
"mats": "bpy.data.materials",
"M": "bpy.data.materials",
"meshes": "bpy.data.meshes",
"texts": "bpy.data.texts"
}
types = {
int: "SvStringsSocket",
float: "SvStringsSocket",
str: "SvStringsSocket", # I WANT A PROPER TEXT SOCKET!!!
mathutils.Vector: "SvVerticesSocket",
mathutils.Color: "SvVerticesSocket",
mathutils.Matrix: "SvMatrixSocket",
mathutils.Euler: "SvMatrixSocket",
mathutils.Quaternion: "SvMatrixSocket"
}
def secondary_type_assesment(item):
"""
we can use this function to perform more granular attr/type identification
"""
if isinstance(item, bpy_prop_array):
if hasattr(item, "path_from_id") and item.path_from_id().endswith('color'):
return "SvColorSocket"
return None
class SvGetPropNode(bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode):
''' Get property '''
bl_idname = 'SvGetPropNode'
bl_label = 'Get property'
bl_icon = 'FORCE_VORTEX'
sv_icon = 'SV_PROP_GET'
bad_prop: BoolProperty(default=False)
def verify_prop(self, context):
try:
obj = self.obj
except:
traceback.print_exc()
self.bad_prop = True
return
self.bad_prop = False
with self.sv_throttle_tree_update():
s_type = types.get(type(self.obj))
if not s_type:
s_type = secondary_type_assesment(self.obj)
outputs = self.outputs
if s_type and outputs:
outputs[0].replace_socket(s_type)
elif s_type:
outputs.new(s_type, "Data")
updateNode(self, context)
prop_name: StringProperty(name='', update=verify_prop)
@property
def obj(self):
eval_str = apply_alias(self.prop_name)
ast_path = ast.parse(eval_str)
path = parse_to_path(ast_path.body[0].value)
return get_object(path)
def draw_buttons(self, context, layout):
layout.alert = self.bad_prop
if len(self.outputs) > 0:
self.draw_animatable_buttons(layout, icon_only=True)
layout.prop(self, "prop_name", text="")
def process(self):
# print(">> Get process is called")
self.outputs[0].sv_set(wrap_output_data(self.obj))
class SvSetPropNode(bpy.types.Node, SverchCustomTreeNode):
''' Set property '''
bl_idname = 'SvSetPropNode'
bl_label = 'Set property'
bl_icon = 'FORCE_VORTEX'
sv_icon = 'SV_PROP_SET'
ok_prop: BoolProperty(default=False)
bad_prop: BoolProperty(default=False)
@property
def obj(self):
eval_str = apply_alias(self.prop_name)
ast_path = ast.parse(eval_str)
path = parse_to_path(ast_path.body[0].value)
return get_object(path)
def verify_prop(self, context):
# test first
try:
obj = self.obj
except:
traceback.print_exc()
self.bad_prop = True
return
# execute second
self.bad_prop = False
with self.sv_throttle_tree_update():
s_type = types.get(type(self.obj))
if not s_type:
s_type = secondary_type_assesment(self.obj)
p_name = {
float: "float_prop",
int: "int_prop",
bpy_prop_array: "color_prop"
}.get(type(self.obj),"")
inputs = self.inputs
if inputs and s_type:
socket = inputs[0].replace_socket(s_type)
socket.prop_name = p_name
elif s_type:
inputs.new(s_type, "Data").prop_name = p_name
if s_type == "SvVerticesSocket":
inputs[0].use_prop = True
updateNode(self, context)
def local_updateNode(self, context):
# no further interaction with the nodetree is required.
self.process()
prop_name: StringProperty(name='', update=verify_prop)
float_prop: FloatProperty(update=updateNode, name="x")
int_prop: IntProperty(update=updateNode, name="x")
color_prop: FloatVectorProperty(
name="Color", description="Color", size=4,
min=0.0, max=1.0, subtype='COLOR', update=local_updateNode)
def draw_buttons(self, context, layout):
layout.alert = self.bad_prop
layout.prop(self, "prop_name", text="")
def process(self):
# print("<< Set process is called")
data = self.inputs[0].sv_get()
eval_str = apply_alias(self.prop_name)
ast_path = ast.parse(eval_str)
path = parse_to_path(ast_path.body[0].value)
obj = get_object(path)
#with self.sv_throttle_tree_update():
# changes here should not reflect back into the nodetree?
if isinstance(obj, (int, float, bpy_prop_array)):
obj = get_object(path[:-1])
p_type, value = path[-1]
if p_type == "attr":
setattr(obj, value, data[0][0])
else:
obj[value] = data[0][0]
else:
assign_data(obj, data)
def register():
bpy.utils.register_class(SvSetPropNode)
bpy.utils.register_class(SvGetPropNode)
def unregister():
bpy.utils.unregister_class(SvSetPropNode)
bpy.utils.unregister_class(SvGetPropNode)
|
gpl-3.0
| 4,452,298,741,122,461,000
| 29.826367
| 99
| 0.593616
| false
| 3.573239
| false
| false
| false
|
maxhutch/nek-analyze
|
interfaces/nek/slice.py
|
1
|
2125
|
from interfaces.abstract import AbstractSlice
import numpy as np
class DenseSlice(AbstractSlice):
""" Uninspired dense slice """
def __init__(self, shape, op=None):
self.shape = shape
self.op = op
if self.op is 'int' or self.op is None:
self.op = np.add
if self.op is np.maximum:
self.sl = np.zeros(self.shape) + np.finfo(np.float64).min
elif self.op is np.minimum:
self.sl = np.zeros(self.shape) + np.finfo(np.float64).max
else:
self.sl = np.zeros(self.shape)
def to_array(self):
return self.sl
def merge(self, sl2):
if isinstance(sl2, SparseSlice):
for pos,patch in sl2.patches.items():
self.add(pos, patch)
else:
self.sl = self.op(self.sl, sl2.to_array())
def add(self, pos, data):
block = data.shape
idx = tuple([np.s_[pos[j]:pos[j]+block[j]] for j in range(len(pos))])
self.sl[idx] = self.op(self.sl[idx], data)
class SparseSlice(AbstractSlice):
def __init__(self, shape, op=None):
self.shape = shape
self.op = op
if self.op is 'int' or self.op is None:
self.op = np.add
self.patches = {}
def to_array(self):
if self.op is np.maximum:
res = np.zeros(self.shape) + np.finfo(np.float64).min
elif self.op is np.minimum:
res = np.zeros(self.shape) + np.finfo(np.float64).max
else:
res = np.zeros(self.shape)
for pos,patch in self.patches.items():
shp = patch.shape
idx = tuple([np.s_[pos[j]:pos[j]+shp[j]] for j in range(len(pos))])
res[idx] = self.op(res[idx], patch)
return res
def merge(self, sl2):
for pos,patch in sl2.patches.items():
self.add(pos, patch)
def add(self, pos, data):
key = tuple(pos)
if key in self.patches:
self.patches[key] = self.op(self.patches[key], data)
else:
self.patches[key] = np.copy(data)
def __add__(self, other):
res = SparseSlice(self.shape, op=np.add)
for pos,patch in self.patches.items():
res.add(pos, patch)
for pos,patch in other.patches.items():
res.add(pos, patch)
return res
|
gpl-3.0
| -7,998,525,873,178,120,000
| 26.597403
| 75
| 0.600941
| false
| 3.08418
| false
| false
| false
|
dirmeier/dataframe
|
dataframe/dataframe.py
|
1
|
7810
|
# dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = 'mail@simon-dirmeier.net'
import dataframe
from ._dataframe_abstract import ADataFrame
from ._dataframe_column_set import DataFrameColumnSet
from ._check import is_none, is_callable, has_elements
from ._piping_exception import PipingException
class DataFrame(ADataFrame):
"""
The base DataFrame class.
"""
def __init__(self, **kwargs):
"""
Constructor for DataFrame.
:param kwargs: standard named vargs argument, i.e. list of named lists
:type kwargs: list of named lists
:return: returns a new DataFrame object
:rtype: DataFrame
"""
self.__data_columns = DataFrameColumnSet(**kwargs)
def __iter__(self):
"""
Iterator implementation for DataFrame.
Every iteration yields one row of the DataFrame.
:return: returns a row from the DataFrame
:rtype: DataFrameRow
"""
for i in range(self.nrow):
yield self.__row(i)
def __getitem__(self, item):
"""
Getter method for DataFrame. Returns the column with name item.
:param item: the name of a column
:type item: str
:return: returns a column from the DataFrame
:rtype: DataFrameColumn
"""
if isinstance(item, str) and item in self.colnames:
return self.__data_columns[self.colnames.index(item)]
elif isinstance(item, int):
return self.__row(item)
elif isinstance(item, slice):
return self.__rows(list(range(*item.indices(self.nrow))))
elif isinstance(item, tuple):
return self.__rows(list(item))
elif isinstance(item, list):
return self.__rows(item)
return None
def __repr__(self):
"""
String representation of DataFrame when print is called.
:return: returns the string representation
:rtype: str
"""
return self.__str__()
def __str__(self):
"""
ToString method for DataFrame.
:return: returns the string representation
:rtype: str
"""
return self.__data_columns.__str__()
def __rrshift__(self, other):
raise PipingException("")
def aggregate(self, clazz, new_col, *args):
"""
Aggregate the rows of the DataFrame into a single value.
:param clazz: name of a class that extends class Callable
:type clazz: class
:param new_col: name of the new column
:type new_col: str
:param args: list of column names of the object that function
should be applied to
:type args: tuple
:return: returns a new dataframe object with the aggregated value
:rtype: DataFrame
"""
if is_callable(clazz) and not is_none(new_col) and has_elements(*args):
return self.__do_aggregate(clazz, new_col, *args)
def __do_aggregate(self, clazz, new_col, *col_names):
# get columns
colvals = [self[x] for x in col_names]
if colvals is None:
return None
# instantiate class and call
res = [clazz()(*colvals)]
if len(res) != 1:
raise ValueError("The function you provided " +
"yields an array of false length!")
return DataFrame(**{new_col: res})
def subset(self, *args):
"""
Subset only some of the columns of the DataFrame.
:param args: list of column names of the object that should be subsetted
:type args: tuple
:return: returns dataframe with only the columns you selected
:rtype: DataFrame
"""
cols = {}
for k in self.colnames:
if k in args:
cols[str(k)] = \
self.__data_columns[self.colnames.index(k)].values
return DataFrame(**cols)
def group(self, *args):
"""
Group the dataframe into row-subsets.
:param args: list of column names taht should be used for grouping
:type args: tuple
:return: returns a dataframe that has grouping information
:rtype: GroupedDataFrame
"""
return dataframe.GroupedDataFrame(self, *args)
def modify(self, clazz, new_col, *args):
"""
Modify some columns (i.e. apply a function) and add the
result to the table.
:param clazz: name of a class that extends class Callable
:type clazz: class
:param new_col: name of the new column
:type new_col: str
:param args: list of column names of the object that
function should be applied to
:type args: tuple
:return: returns a new dataframe object with the modiefied values,
i.e. the new column
:rtype: DataFrame
"""
if is_callable(clazz) and not is_none(new_col) and has_elements(*args):
return self.__do_modify(clazz, new_col, *args)
def __do_modify(self, clazz, new_col, *col_names):
colvals = [self[x] for x in col_names]
if colvals is None:
return None
# instantiate class and call
res = clazz()(*colvals)
res = [res] if not isinstance(res, list) else res
if len(res) != len(colvals[0].values):
raise ValueError("The function you provided " +
"yields an array of false length!")
cols = {column.colname: column.values for column in self.__data_columns}
cols[new_col] = res
return DataFrame(**cols)
@property
def nrow(self):
"""
Getter for the number of rows in the DataFrame.
:return: returns the number of rows
:rtype: int
"""
return self.__data_columns.nrow
@property
def ncol(self):
"""
Getter for the number of columns in the DataFrame.
:return: returns the number of columns
:rtype: int
"""
return self.__data_columns.ncol
@property
def colnames(self):
"""
Getter for the columns names of the DataFrame.
:return: returns a list of column names
:rtype: list(str)
"""
return self.__data_columns.colnames
def which_colnames(self, *args):
"""
Computes the indexes of the columns in the DataFrame.
:param args: list of column names
:type args: tuple
:return: returns a list of indexes
:rtype: list(int)
"""
return self.__data_columns.which_colnames(*args)
def cbind(self, **kwargs):
"""
Bind a column to the DataFrame.
:param kwargs: named list of elements you want to add
:type kwargs: keyword tuple
:return: self
:rtype: DataFrame
"""
self.__data_columns.cbind(**kwargs)
return self
def __rows(self, idxs):
return self.__data_columns.rows(idxs)
def __row(self, idx):
return self.__data_columns.row(idx)
|
gpl-3.0
| -6,358,612,851,086,372,000
| 30.365462
| 80
| 0.596927
| false
| 4.286498
| false
| false
| false
|
LabProdam/LabDiario
|
ChefeDeGabinete/Exoneracao.py
|
1
|
2777
|
#!/usr/bin/python
#coding: utf-8
from DiarioTools.Parser import *
from DiarioTools.Process import *
from DiarioTools.Search import *
import re
class ParseExoneracaoChefeDeGabinete(GenericParser):
def Initialize(self):
self.AddExpression("^\s*Exonerar.{0,1000}?(senhora|senhor)([^,]+).{0,400}?Chefe de Gabinete.(.+)", [2,3,0], re.I|re.M)
class SearchExoneracaoChefeDeGabinete(DlSearch):
def SetOptions(self):
self.options["sort"] = u"data desc"
self.query = "exonerar \"chefe de gabinete\""
class ProcessorExoneracaoChefeDeGabinete(ResponseProcessor):
def __init__(self, configInstance, searchObject, parseObject, fileName, sessionName):
super(ProcessorExoneracaoChefeDeGabinete, self).__init__(configInstance, searchObject, parseObject, sessionName)
self.fileName = fileName
self.records = []
with open(self.fileName, "a") as fd:
fd.write("*** Exonerações ***\r\n")
def Persist(self, data):
if len(data) > 0:
strOut = """Em """ + self.ProcessDate(data) + """, """ + self.ProcessName(data) + """ foi exonerado do cargo Chefe de Gabinete """ + self.ProcessGabinete(data) + "\n"
self.records.append(strOut.encode("utf-8"))
with open(self.fileName, "a") as fd:
fd.write(strOut.encode("utf-8"))
def ProcessEnd(self):
message = "*** Exonerações ***\r\n"
if (len(self.records) == 0):
message += """Nenhum Chefe de Gabinete exonerado neste período\r\n\r\n"""
Log.Log("Sem Alterações")
else:
message += "\r\n".join(self.records)
message += "\r\n"
return message
def ProcessName(self, data):
return data[0]
def ProcessGabinete(self, data):
gabineteRe = re.search("(Funda..o|Controladoria|Secretaria|Subprefeitura|Superintend.ncia)\s*,?\s*(([^\.](?! constante))*)", data[1], re.I)
if gabineteRe is not None:
gabineteFromData = gabineteRe.group(0)
gabineteFromData = "da " + gabineteFromData
else:
gabineteRe = re.search("(Instituto|Servi.o)\s*,?\s*([^,]*)", data[1], re.I)
if gabineteRe is not None:
gabineteFromData = gabineteRe.group(0)
gabineteFromData = "do " + gabineteFromData
else:
gabineteRe = re.search("^([^,]*).\s*s.mbolo", data[1], re.I)
if gabineteRe is not None:
gabineteFromData = gabineteRe.group(1)
else:
gabineteFromData = data[1]
gabineteFromData = re.sub("s.mbolo \w*,", "", gabineteFromData, re.I)
gabineteFromData = re.sub(",?\s*da Chefia de Gabinete[^,]*x", "", gabineteFromData, re.I)
gabineteFromData = re.sub(",?\s*constante.*$", "", gabineteFromData, re.I)
return gabineteFromData
def ProcessDate(self, data):
date = self.GetDateFromId()
dateRe = re.search("a partir de ([^,]*)", data[2], re.I)
if dateRe is not None:
date = dateRe.group(1)
return date
|
gpl-2.0
| -2,094,007,997,226,392,300
| 37.472222
| 169
| 0.662094
| false
| 2.591207
| false
| false
| false
|
safchain/contrail-sandesh
|
library/python/pysandesh/sandesh_client.py
|
1
|
3139
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Sandesh Client
#
from sandesh_connection import SandeshConnection
from sandesh_logger import SandeshLogger
from transport import TTransport
from protocol import TXMLProtocol
from sandesh_uve import SandeshUVETypeMaps
class SandeshClient(object):
def __init__(self, sandesh, primary_collector, secondary_collector,
discovery_client):
self._sandesh_instance = sandesh
self._primary_collector = primary_collector
self._secondary_collector = secondary_collector
self._discovery_client = discovery_client
self._logger = sandesh._logger
self._connection = None
#end __init__
# Public functions
def initiate(self):
self._connection = SandeshConnection(self._sandesh_instance,
self,
self._primary_collector,
self._secondary_collector,
self._discovery_client)
#end initiate
def connection(self):
return self._connection
#end connection
def send_sandesh(self, sandesh):
if (self._connection.session() is not None) and \
(self._sandesh_instance._module is not None) and \
(self._sandesh_instance._module != ""):
self._connection.session().enqueue_sandesh(sandesh)
else:
if (self._connection.session() is None):
error_str = "No Connection"
else:
error_str = "No ModuleId"
if self._sandesh_instance.is_logging_dropped_allowed(sandesh):
self._logger.error(
"SANDESH: %s: %s" % (error_str, sandesh.log()))
return 0
#end send_sandesh
def send_uve_sandesh(self, uve_sandesh):
self._connection.statemachine().on_sandesh_uve_msg_send(uve_sandesh)
#end send_uve_sandesh
def handle_sandesh_msg(self, sandesh_name, sandesh_xml):
transport = TTransport.TMemoryBuffer(sandesh_xml)
protocol_factory = TXMLProtocol.TXMLProtocolFactory()
protocol = protocol_factory.getProtocol(transport)
sandesh_req = self._sandesh_instance.get_sandesh_request_object(sandesh_name)
if sandesh_req:
if sandesh_req.read(protocol) == -1:
self._logger.error('Failed to decode sandesh request "%s"' \
% (sandesh_name))
else:
self._sandesh_instance.enqueue_sandesh_request(sandesh_req)
#end handle_sandesh_msg
def handle_sandesh_ctrl_msg(self, sandesh_ctrl_msg):
uve_type_map = {}
self._logger.debug('Number of uve types in sandesh control message is %d' % (len(sandesh_ctrl_msg.type_info)))
for type_info in sandesh_ctrl_msg.type_info:
uve_type_map[type_info.type_name] = type_info.seq_num
self._sandesh_instance._uve_type_maps.sync_all_uve_types(uve_type_map, self._sandesh_instance)
#end handle_sandesh_ctrl_msg
#end class SandeshClient
|
apache-2.0
| -5,720,518,425,684,812,000
| 37.280488
| 118
| 0.606881
| false
| 3.692941
| false
| false
| false
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/puls4.py
|
1
|
1992
|
# coding: utf-8
from __future__ import unicode_literals
from .prosiebensat1 import ProSiebenSat1BaseIE
from ..utils import (
unified_strdate,
parse_duration,
compat_str,
)
class Puls4IE(ProSiebenSat1BaseIE):
_VALID_URL = r'https?://(?:www\.)?puls4\.com/(?P<id>[^?#&]+)'
_TESTS = [{
'url': 'http://www.puls4.com/2-minuten-2-millionen/staffel-3/videos/2min2miotalk/Tobias-Homberger-von-myclubs-im-2min2miotalk-118118',
'md5': 'fd3c6b0903ac72c9d004f04bc6bb3e03',
'info_dict': {
'id': '118118',
'ext': 'flv',
'title': 'Tobias Homberger von myclubs im #2min2miotalk',
'description': 'md5:f9def7c5e8745d6026d8885487d91955',
'upload_date': '20160830',
'uploader': 'PULS_4',
},
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident.-Norbert-Hofer',
'only_matching': True,
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident-Analyse-des-Interviews-mit-Norbert-Hofer-416598',
'only_matching': True,
}]
_TOKEN = 'puls4'
_SALT = '01!kaNgaiNgah1Ie4AeSha'
_CLIENT_NAME = ''
def _real_extract(self, url):
path = self._match_id(url)
content_path = self._download_json(
'http://www.puls4.com/api/json-fe/page/' + path, path)['content'][0]['url']
media = self._download_json(
'http://www.puls4.com' + content_path,
content_path)['mediaCurrent']
player_content = media['playerContent']
info = self._extract_video_info(url, player_content['id'])
info.update({
'id': compat_str(media['objectId']),
'title': player_content['title'],
'description': media.get('description'),
'thumbnail': media.get('previewLink'),
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(player_content.get('duration')),
'episode': player_content.get('episodePartName'),
'show': media.get('channel'),
'season_id': player_content.get('seasonId'),
'uploader': player_content.get('sourceCompany'),
})
return info
|
gpl-3.0
| 8,175,801,211,725,513,000
| 33.947368
| 147
| 0.677209
| false
| 2.444172
| false
| false
| false
|
aspiers/gertty
|
gertty/alembic/versions/50344aecd1c2_add_files_table.py
|
1
|
3437
|
"""add files table
Revision ID: 50344aecd1c2
Revises: 1bb187bcd401
Create Date: 2015-04-13 08:08:08.682803
"""
# revision identifiers, used by Alembic.
revision = '50344aecd1c2'
down_revision = '1bb187bcd401'
import re
import sys
from alembic import op, context
import sqlalchemy as sa
import git.exc
import gertty.db
import gertty.gitrepo
def upgrade():
op.create_table('file',
sa.Column('key', sa.Integer(), nullable=False),
sa.Column('revision_key', sa.Integer(), nullable=False, index=True),
sa.Column('path', sa.Text(), nullable=False, index=True),
sa.Column('old_path', sa.Text(), index=True),
sa.Column('status', sa.String(length=1)),
sa.Column('inserted', sa.Integer()),
sa.Column('deleted', sa.Integer()),
sa.PrimaryKeyConstraint('key')
)
pathre = re.compile('((.*?)\{|^)(.*?) => (.*?)(\}(.*)|$)')
insert = sa.text('insert into file (key, revision_key, path, old_path, status, inserted, deleted) '
' values (NULL, :revision_key, :path, :old_path, :status, :inserted, :deleted)')
conn = op.get_bind()
countres = conn.execute('select count(*) from revision')
revisions = countres.fetchone()[0]
if revisions > 50:
print('')
print('Adding support for searching for changes by file modified. '
'This may take a while.')
qres = conn.execute('select p.name, c.number, c.status, r.key, r.number, r."commit", r.parent from project p, change c, revision r '
'where r.change_key=c.key and c.project_key=p.key order by p.name')
count = 0
for (pname, cnumber, cstatus, rkey, rnumber, commit, parent) in qres.fetchall():
count += 1
sys.stdout.write('Diffstat revision %s / %s\r' % (count, revisions))
sys.stdout.flush()
ires = conn.execute(insert, revision_key=rkey, path='/COMMIT_MSG', old_path=None,
status=None, inserted=None, deleted=None)
repo = gertty.gitrepo.get_repo(pname, context.config.gertty_app.config)
try:
stats = repo.diffstat(parent, commit)
except git.exc.GitCommandError:
# Probably a missing commit
if cstatus not in ['MERGED', 'ABANDONED']:
print("Unable to examine diff for %s %s change %s,%s" % (cstatus, pname, cnumber, rnumber))
continue
for stat in stats:
try:
(added, removed, path) = stat
except ValueError:
if cstatus not in ['MERGED', 'ABANDONED']:
print("Empty diffstat for %s %s change %s,%s" % (cstatus, pname, cnumber, rnumber))
m = pathre.match(path)
status = gertty.db.File.STATUS_MODIFIED
old_path = None
if m:
status = gertty.db.File.STATUS_RENAMED
pre = m.group(2) or ''
post = m.group(6) or ''
old_path = pre+m.group(3)+post
path = pre+m.group(4)+post
try:
added = int(added)
except ValueError:
added = None
try:
removed = int(removed)
except ValueError:
removed = None
conn.execute(insert, revision_key=rkey, path=path, old_path=old_path,
status=status, inserted=added, deleted=removed)
print('')
def downgrade():
pass
|
apache-2.0
| -1,252,221,657,392,162,000
| 35.56383
| 136
| 0.571429
| false
| 3.617895
| false
| false
| false
|
KeithSloan/PressureClickBarometer
|
ReadSensor.py
|
1
|
1544
|
import smbus
import datetime
def GetTime ():
now = datetime.datetime.now()
return (str(now.hour)+":"+str(now.minute)+"."+str(now.second))
#init bus
bus = smbus.SMBus(1)
print GetTime()
# power up LPS331AP pressure sensor & set BDU
bus.write_byte_data(0x5d, 0x20, 0b10000100)
#write value 0b1 to register 0x21 on device at address 0x5d
# one shot enable
bus.write_byte_data(0x5d,0x21, 0b1)
Temp_LSB = bus.read_byte_data(0x5d, 0x2b)
Temp_MSB = bus.read_byte_data(0x5d, 0x2c)
#combine LSB & MSB
count = (Temp_MSB << 8) | Temp_LSB
# As value is negative convert 2's complement to decimal
comp = count - (1 << 16)
#calc temp according to data sheet
Temp = 42.5 + (comp/480.0)
print "Temperature: %.2f" % Temp
#print "Temp MSB ",format(Temp_MSB,'02x')
#print "Temp LSB ",format(Temp_LSB,'02x')
#print "Temp 2 comp ",format(count,'04x')
#print "Temp : ",format(comp,'04x')
#print "Temp MSB dec : ",Temp_MSB
#print "Temp_LSB dec : ",Temp_LSB
Pressure_LSB = bus.read_byte_data(0x5d, 0x29)
Pressure_MSB = bus.read_byte_data(0x5d, 0x2a)
Pressure_XLB = bus.read_byte_data(0x5d, 0x28)
count = (Pressure_MSB << 16) | ( Pressure_LSB << 8 ) | Pressure_XLB
#comp = count - (1 << 24)
#Pressure value is positive so just use value as decimal
Pressure = count/4096.0
print "Pressure: %.2f" % Pressure
#print "Pressure MSB ",format(Pressure_MSB,'02x')
#print "Pressure LSB ",format(Pressure_LSB,'02x')
#print "Pressure XLB ",format(Pressure_XLB,'02x')
#print "Pressure 2 comp ",format(count,'06x')
#print "Pressure : ",format(comp,'04x')
|
gpl-2.0
| -7,952,943,462,252,300,000
| 25.62069
| 67
| 0.686528
| false
| 2.4704
| false
| false
| false
|
uclmr/inferbeddings
|
scripts/wn18/UCL_WN18_adv_hinge_v1.py
|
1
|
4537
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import os
import os.path
import sys
import argparse
import logging
def cartesian_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def summary(configuration):
kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0])
return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs])
def to_cmd(c, _path=None):
if _path is None:
_path = '/home/pminervi/workspace/inferbeddings/'
command = 'python3 {}/bin/kbp-cli.py' \
' --train {}/data/wn18/wordnet-mlj12-train.txt' \
' --valid {}/data/wn18/wordnet-mlj12-valid.txt' \
' --test {}/data/wn18/wordnet-mlj12-test.txt' \
' --clauses {}/data/wn18/clauses/clauses_0.9.pl' \
' --nb-epochs {}' \
' --lr {}' \
' --nb-batches {}' \
' --model {}' \
' --similarity {}' \
' --margin {}' \
' --embedding-size {}' \
' --adv-lr {} --adv-init-ground --adversary-epochs {}' \
' --discriminator-epochs {} --adv-weight {} --adv-batch-size {} --loss hinge' \
''.format(_path, _path, _path, _path, _path,
c['epochs'], c['lr'], c['batches'],
c['model'], c['similarity'],
c['margin'], c['embedding_size'],
c['adv_lr'], c['adv_epochs'],
c['disc_epochs'], c['adv_weight'], c['adv_batch_size'])
return command
def to_logfile(c, path):
outfile = "%s/ucl_wn18_adv_hinge_v1.%s.log" % (path, summary(c))
return outfile
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Generating experiments for the UCL cluster', formatter_class=formatter)
argparser.add_argument('--debug', '-D', action='store_true', help='Debug flag')
argparser.add_argument('--path', '-p', action='store', type=str, default=None, help='Path')
args = argparser.parse_args(argv)
hyperparameters_space_transe = dict(
epochs=[100],
optimizer=['adagrad'],
lr=[.1],
batches=[10],
model=['TransE'],
similarity=['l1', 'l2'],
margin=[1, 2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
adv_lr=[.1],
adv_epochs=[0, 10],
disc_epochs=[10],
adv_weight=[0, 1, 10, 100, 1000, 10000],
adv_batch_size=[1, 10, 100]
)
hyperparameters_space_distmult_complex = dict(
epochs=[100],
optimizer=['adagrad'],
lr=[.1],
batches=[10],
model=['DistMult', 'ComplEx'],
similarity=['dot'],
margin=[1, 2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
adv_lr=[.1],
adv_epochs=[0, 10],
disc_epochs=[10],
adv_weight=[0, 1, 10, 100, 1000, 10000],
adv_batch_size=[1, 10, 100]
)
configurations_transe = cartesian_product(hyperparameters_space_transe)
configurations_distmult_complex = cartesian_product(hyperparameters_space_distmult_complex)
path = '/home/pminervi/workspace/inferbeddings/logs/ucl_wn18_adv_hinge_v1/'
if not os.path.exists(path):
os.makedirs(path)
configurations = list(configurations_transe) + list(configurations_distmult_complex)
for job_id, cfg in enumerate(configurations):
logfile = to_logfile(cfg, path)
completed = False
if os.path.isfile(logfile):
with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
completed = '### MICRO (test filtered)' in content
if not completed:
line = '{} >> {} 2>&1'.format(to_cmd(cfg, _path=args.path), logfile)
if args.debug:
print(line)
else:
file_name = 'ucl_wn18_adv_hinge_v1_{}.job'.format(job_id)
alias = ''
job_script = '#$ -S /bin/bash\n' \
'#$ -wd /tmp/\n' \
'#$ -l h_vmem=4G,tmem=4G\n' \
'#$ -l h_rt=24:00:00\n' \
'{}\n{}\n'.format(alias, line)
with open(file_name, 'w') as f:
f.write(job_script)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
|
mit
| -6,206,648,303,705,620,000
| 33.371212
| 112
| 0.517743
| false
| 3.476628
| true
| false
| false
|
florath/init4boot
|
init4boot/plugins/multipath.py
|
1
|
3244
|
#
# multipath iSCSI plugin
#
# (c) 2008-2009 by flonatel (sf@flonatel.org)
# (c) 2015 by Andreas Florath (andreas@florath.org)
#
# For licensing details see COPYING
#
import os
from init4boot.lib.FilesystemUtils import fsutils
class multipath(object):
def __init__(self, config, opts):
self.config = config
self.opts = opts
self.__root_dir = opts.root_dir
def check(self):
return fsutils.must_exist(self.__root_dir, ["sbin"], "multipath") \
and fsutils.must_exist(self.__root_dir, ["sbin"], "kpartx")
def go_CommandLineEvaluation(self):
class CommandLineEvaluation:
def output(self, ofile):
ofile.write("""
multipath:*)
bv_deps="${bv_deps} network multipath"
;;
""")
return CommandLineEvaluation()
def go_HandleInitialModuleSetup(self):
class HandleInitialModuleSetup:
def output(self, ofile):
ofile.write("""
if check_bv "multipath"; then
logp "Handling multipath"
modprobe dm-multipath
modprobe dm-emc
modprobe dm-round-robin
fi
""")
return HandleInitialModuleSetup()
def go_SetupHighLevelTransport(self):
class SetupHighLevelTransport:
# iSCSI must be done before multipath
def deps(self):
return ["iSCSI", ]
def output(self, ofile):
ofile.write("""
multipath:*)
maybe_break multipath
logp "Handling multipath"
if [ -e /bin/multipath ]; then
# Multipath needs in some situations more than one run
for i in 1 2 3 ; do
/bin/multipath
sleep 1
/bin/multipath -ll
done
log "Accessing all disk once to get the state corrected"
# Note that the following can take about 30 seconds for EACH disk.
# So the things executed in parallel
ddpids=""
for disk in /dev/mapper/*; do
[ "${disk}" = "/dev/mapper/control" ] && continue
log "... ${disk}"
dd if=${disk} of=/dev/null bs=1024 count=1 >/dev/null 2>&1 &
ddpids="${ddpids} $!"
done
log "Waiting for possible multipath switchover to end"
wait ${ddpids}
log "Creating block devices for partitions"
for disk in /dev/mapper/*; do
[ "${disk}" = "/dev/mapper/control" ] && continue
log "... ${disk}"
/bin/kpartx -a ${disk}
done
else
log "Multipath enabled, but binary not available - ignoring multipath"
fi
logpe
;;
""")
return SetupHighLevelTransport()
# ======================================================================
# === Create hooks
def mi_Copy(self):
class Copy:
def output(self, c):
c.copy_exec("sbin/multipath")
c.copy_exec("sbin/kpartx")
c.copy_exec_w_path("devmap_name", ["sbin", ])
c.copy_exec_w_path("dmsetup", ["sbin", ])
# Not available in Debian stable
# (Part of kpartx package which is only available in unstable)
# c.copy("lib/udev/dmsetup_env", "lib/udev")
# Copy all the dependend multipath so libs
c.copytree("lib/multipath", "lib/multipath")
return Copy()
|
gpl-3.0
| -1,211,690,267,482,373,000
| 25.809917
| 78
| 0.569667
| false
| 3.803048
| false
| false
| false
|
MjAbuz/foundation
|
foundation/organisation/migrations/0016_auto__add_projectlist.py
|
1
|
14482
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProjectList'
db.create_table(u'organisation_projectlist', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('theme', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Theme'], null=True, blank=True)),
('project_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.ProjectType'], null=True, blank=True)),
))
db.send_create_signal(u'organisation', ['ProjectList'])
def backwards(self, orm):
# Deleting model 'ProjectList'
db.delete_table(u'organisation_projectlist')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'organisation.board': {
'Meta': {'object_name': 'Board'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.BoardMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.boardmembership': {
'Meta': {'object_name': 'BoardMembership'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Board']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.featuredproject': {
'Meta': {'object_name': 'FeaturedProject', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['organisation.Project']"})
},
u'organisation.networkgroup': {
'Meta': {'unique_together': "(('country', 'region'),)", 'object_name': 'NetworkGroup'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_information': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.NetworkGroupMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.networkgroupmembership': {
'Meta': {'object_name': 'NetworkGroupMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'networkgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.NetworkGroup']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organisation.project': {
'Meta': {'ordering': "('name',)", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'sourcecode_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'teaser': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Theme']", 'symmetrical': 'False', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.ProjectType']", 'symmetrical': 'False', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.projectlist': {
'Meta': {'object_name': 'ProjectList', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.ProjectType']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'})
},
u'organisation.projecttype': {
'Meta': {'object_name': 'ProjectType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.theme': {
'Meta': {'object_name': 'Theme'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unit': {
'Meta': {'ordering': "['-order', 'name']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.UnitMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unitmembership': {
'Meta': {'object_name': 'UnitMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Unit']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.workinggroup': {
'Meta': {'object_name': 'WorkingGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incubation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['organisation']
|
mit
| -6,140,818,890,270,693,000
| 79.016575
| 200
| 0.558348
| false
| 3.670046
| false
| false
| false
|
Mohamad1994HD/LinkArchiever
|
app/models/linkList.py
|
1
|
1189
|
from sets import Set
from interfaceDB import insert_link_with_tag, is_link, is_tag, get_tags_ids_of_link, get_tags_from_ids, \
get_links_ids_from_tag, get_link_data_from_id
class LinkList(list):
def __init__(self, link_name, link_desc=None, link_tags=[]):
list.__init__([])
self.name = link_name
self.desc = link_desc
self.extend(link_tags)
def save_to_db(self):
is_existed = is_link(self.name)
for tag in self:
insert_link_with_tag(self.name, tag, existed_link=is_existed, existed_tag=is_tag(tag_name=tag))
def get_tags_from_db(self):
del self[:]
self.extend(get_tags_from_ids(get_tags_ids_of_link(self.name)))
return self
def __repr__(self):
return str(self.repr())
def repr(self):
return {'name': self.name, 'desc': self.desc, 'tags': [i for i in self]}
def get_links_ids_from_tags_lst(tags):
l = []
for tag in tags:
l.extend(get_links_ids_from_tag(tag))
my_set = Set(l)
return list(my_set)
def get_links_from_tags_lst(tags):
ids = get_links_ids_from_tags_lst(tags)
return [(get_link_data_from_id(id)) for id in ids]
|
gpl-3.0
| -3,530,685,358,061,000,700
| 26.651163
| 107
| 0.606392
| false
| 2.965087
| false
| false
| false
|
conda-forge/conda-forge-webservices
|
conda_forge_webservices/webapp.py
|
1
|
28798
|
import os
import asyncio
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.locks
import hmac
import hashlib
import json
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import atexit
# import functools
import logging
import requests
import github
from datetime import datetime
import conda_forge_webservices.linting as linting
import conda_forge_webservices.feedstocks_service as feedstocks_service
import conda_forge_webservices.update_teams as update_teams
import conda_forge_webservices.commands as commands
from conda_forge_webservices.update_me import get_current_versions
from conda_forge_webservices.feedstock_outputs import (
validate_feedstock_outputs,
copy_feedstock_outputs,
is_valid_feedstock_token,
comment_on_outputs_copy,
)
from conda_forge_webservices import status_monitor
STATUS_DATA_LOCK = tornado.locks.Lock()
LOGGER = logging.getLogger("conda_forge_webservices")
POOL = None
def _worker_pool():
global POOL
if POOL is None:
if "PYTEST_CURRENT_TEST" in os.environ:
# needed for mocks in testing
POOL = ThreadPoolExecutor(max_workers=2)
else:
POOL = ProcessPoolExecutor(max_workers=2)
return POOL
def _shutdown_worker_pool():
global POOL
if POOL is not None:
POOL.shutdown(wait=False)
atexit.register(_shutdown_worker_pool)
THREAD_POOL = None
def _thread_pool():
global THREAD_POOL
if THREAD_POOL is None:
THREAD_POOL = ThreadPoolExecutor(max_workers=2)
return THREAD_POOL
def _shutdown_thread_pool():
global THREAD_POOL
if THREAD_POOL is not None:
THREAD_POOL.shutdown(wait=False)
atexit.register(_shutdown_thread_pool)
def get_commit_message(full_name, commit):
return (
github.Github(os.environ['GH_TOKEN'])
.get_repo(full_name)
.get_commit(commit)
.commit
.message)
def print_rate_limiting_info_for_token(token, user):
# Compute some info about our GitHub API Rate Limit.
# Note that it doesn't count against our limit to
# get this info. So, we should be doing this regularly
# to better know when it is going to run out. Also,
# this will help us better understand where we are
# spending it and how to better optimize it.
# Get GitHub API Rate Limit usage and total
gh = github.Github(token)
gh_api_remaining = gh.get_rate_limit().core.remaining
gh_api_total = gh.get_rate_limit().core.limit
# Compute time until GitHub API Rate Limit reset
gh_api_reset_time = gh.get_rate_limit().core.reset
gh_api_reset_time -= datetime.utcnow()
msg = "{user} - remaining {remaining} out of {total}.".format(
remaining=gh_api_remaining,
total=gh_api_total, user=user,
)
LOGGER.info(
"github api requests: %s - %s",
msg,
"Will reset in {time}.".format(time=gh_api_reset_time)
)
def print_rate_limiting_info():
d = [(os.environ['GH_TOKEN'], "conda-forge-linter")]
LOGGER.info("")
LOGGER.info("GitHub API Rate Limit Info:")
for k, v in d:
print_rate_limiting_info_for_token(k, v)
LOGGER.info("")
def valid_request(body, signature):
our_hash = hmac.new(
os.environ['CF_WEBSERVICES_TOKEN'].encode('utf-8'),
body,
hashlib.sha1,
).hexdigest()
their_hash = signature.split("=")[1]
return hmac.compare_digest(their_hash, our_hash)
class LintingHookHandler(tornado.web.RequestHandler):
async def post(self):
headers = self.request.headers
event = headers.get('X-GitHub-Event', None)
if not valid_request(
self.request.body,
headers.get('X-Hub-Signature', ''),
):
self.set_status(403)
self.write_error(403)
return
if event == 'ping':
self.write('pong')
elif event == 'pull_request':
body = tornado.escape.json_decode(self.request.body)
repo_name = body['repository']['name']
owner = body['repository']['owner']['login']
pr_id = int(body['pull_request']['number'])
is_open = body['pull_request']['state'] == 'open'
if (
owner != 'conda-forge' or
not (
repo_name == 'staged-recipes' or
repo_name.endswith("-feedstock")
)
):
self.set_status(404)
self.write_error(404)
return
if body["action"] not in ["opened", "reopened", "synchronize", "unlocked"]:
return
if repo_name == 'staged-recipes':
stale = any(
label['name'] == 'stale'
for label in body['pull_request']['labels']
)
else:
stale = False
# Only do anything if we are working with conda-forge,
# and an open PR.
if is_open and owner == 'conda-forge' and not stale:
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("linting: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
lint_info = await tornado.ioloop.IOLoop.current().run_in_executor(
_worker_pool(),
linting.compute_lint_message,
owner,
repo_name,
pr_id,
repo_name == 'staged-recipes',
)
if lint_info:
msg = linting.comment_on_pr(
owner,
repo_name,
pr_id,
lint_info['message'],
search='conda-forge-linting service',
)
linting.set_pr_status(
owner,
repo_name,
lint_info,
target_url=msg.html_url,
)
print_rate_limiting_info()
else:
LOGGER.info('Unhandled event "{}".'.format(event))
self.set_status(404)
self.write_error(404)
class UpdateFeedstockHookHandler(tornado.web.RequestHandler):
async def post(self):
headers = self.request.headers
event = headers.get('X-GitHub-Event', None)
if not valid_request(
self.request.body,
headers.get('X-Hub-Signature', ''),
):
self.set_status(403)
self.write_error(403)
return
if event == 'ping':
self.write('pong')
return
elif event == 'push':
body = tornado.escape.json_decode(self.request.body)
repo_name = body['repository']['name']
owner = body['repository']['owner']['login']
ref = body['ref']
commit = body.get('head_commit', {}).get('id', None)
if commit:
commit_msg = get_commit_message(
body['repository']['full_name'],
commit,
)
else:
commit_msg = ""
# Only do anything if we are working with conda-forge, and a
# push to master.
if (
owner == 'conda-forge' and
ref == "refs/heads/master" and
"[cf admin skip feedstocks]" not in commit_msg and
"[cf admin skip]" not in commit_msg
):
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("feedstocks service: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
handled = await tornado.ioloop.IOLoop.current().run_in_executor(
_worker_pool(),
feedstocks_service.handle_feedstock_event,
owner,
repo_name,
)
if handled:
print_rate_limiting_info()
return
else:
LOGGER.info('Unhandled event "{}".'.format(event))
self.set_status(404)
self.write_error(404)
class UpdateTeamHookHandler(tornado.web.RequestHandler):
async def post(self):
headers = self.request.headers
event = headers.get('X-GitHub-Event', None)
if not valid_request(
self.request.body,
headers.get('X-Hub-Signature', ''),
):
self.set_status(403)
self.write_error(403)
return
if event == 'ping':
self.write('pong')
return
elif event == 'push':
body = tornado.escape.json_decode(self.request.body)
repo_name = body['repository']['name']
owner = body['repository']['owner']['login']
ref = body['ref']
commit = body.get('head_commit', {}).get('id', None)
if commit:
commit_msg = get_commit_message(
body['repository']['full_name'],
commit,
)
else:
commit_msg = ""
# Only do anything if we are working with conda-forge,
# and a push to master.
if (
owner == 'conda-forge' and
repo_name.endswith("-feedstock") and
ref == "refs/heads/master" and
"[cf admin skip teams]" not in commit_msg and
"[cf admin skip]" not in commit_msg
):
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("updating team: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
await tornado.ioloop.IOLoop.current().run_in_executor(
_thread_pool(), # always threads due to expensive lru_cache
update_teams.update_team,
owner,
repo_name,
commit,
)
print_rate_limiting_info()
return
else:
LOGGER.info('Unhandled event "{}".'.format(event))
self.set_status(404)
self.write_error(404)
class CommandHookHandler(tornado.web.RequestHandler):
async def post(self):
headers = self.request.headers
event = headers.get('X-GitHub-Event', None)
if not valid_request(
self.request.body,
headers.get('X-Hub-Signature', ''),
):
self.set_status(403)
self.write_error(403)
return
if event == 'ping':
self.write('pong')
return
elif (
event == 'pull_request_review' or
event == 'pull_request' or
event == 'pull_request_review_comment'
):
body = tornado.escape.json_decode(self.request.body)
action = body["action"]
repo_name = body['repository']['name']
owner = body['repository']['owner']['login']
# Only do anything if we are working with conda-forge
if (
owner != 'conda-forge' or
not (
repo_name == "staged-recipes" or
repo_name.endswith("-feedstock")
)
):
self.set_status(404)
self.write_error(404)
return
pr_repo = body['pull_request']['head']['repo']
pr_owner = pr_repo['owner']['login']
pr_repo = pr_repo['name']
pr_branch = body['pull_request']['head']['ref']
pr_num = body['pull_request']['number']
comment = None
if event == 'pull_request_review' and action != 'dismissed':
comment = body['review']['body']
elif (
event == 'pull_request' and
action in ['opened', 'edited', 'reopened']
):
comment = body['pull_request']['body']
elif (
event == 'pull_request_review_comment' and
action != 'deleted'
):
comment = body['comment']['body']
if comment:
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("PR command: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
await tornado.ioloop.IOLoop.current().run_in_executor(
_worker_pool(),
commands.pr_detailed_comment,
owner,
repo_name,
pr_owner,
pr_repo,
pr_branch,
pr_num,
comment,
)
print_rate_limiting_info()
return
elif event == 'issue_comment' or event == "issues":
body = tornado.escape.json_decode(self.request.body)
action = body["action"]
repo_name = body['repository']['name']
owner = body['repository']['owner']['login']
issue_num = body['issue']['number']
# Only do anything if we are working with conda-forge
if (
owner != 'conda-forge' or
not (
repo_name == "staged-recipes" or
repo_name.endswith("-feedstock")
)
):
self.set_status(404)
self.write_error(404)
return
pull_request = False
if "pull_request" in body["issue"]:
pull_request = True
if pull_request and action != 'deleted':
comment = body['comment']['body']
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("PR command: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
await tornado.ioloop.IOLoop.current().run_in_executor(
_worker_pool(),
commands.pr_comment,
owner,
repo_name,
issue_num,
comment,
)
print_rate_limiting_info()
return
if (
not pull_request and
action in ['opened', 'edited', 'created', 'reopened']
):
title = body['issue']['title'] if event == "issues" else ""
if 'comment' in body:
comment = body['comment']['body']
else:
comment = body['issue']['body']
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("issue command: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
await tornado.ioloop.IOLoop.current().run_in_executor(
_worker_pool(),
commands.issue_comment,
owner,
repo_name,
issue_num,
title,
comment,
)
print_rate_limiting_info()
return
else:
LOGGER.info('Unhandled event "{}".'.format(event))
self.set_status(404)
self.write_error(404)
class UpdateWebservicesVersionsHandler(tornado.web.RequestHandler):
async def get(self):
self.write(json.dumps(get_current_versions()))
def _repo_exists(feedstock):
r = requests.get("https://github.com/conda-forge/%s" % feedstock)
if r.status_code != 200:
return False
else:
return True
class OutputsValidationHandler(tornado.web.RequestHandler):
"""This is a stub that we keep around so that old CI jobs still work
if they have not bveen rerendered. We should remove it eventually."""
async def post(self):
self.write(json.dumps({"deprecated": True}))
def _do_copy(feedstock, outputs, channel, git_sha, comment_on_error):
valid, errors = validate_feedstock_outputs(
feedstock,
outputs,
)
outputs_to_copy = {}
for o in valid:
if valid[o]:
outputs_to_copy[o] = outputs[o]
if outputs_to_copy:
copied = copy_feedstock_outputs(
outputs_to_copy,
channel,
delete=False,
)
# send for github releases copy
if True:
try:
gh = github.Github(os.environ["GH_TOKEN"])
repo = gh.get_repo("conda-forge/repodata-shards")
for dist in copied:
if not copied[dist]:
continue
_subdir, _pkg = os.path.split(dist)
if channel == "main":
_url = f"https://conda.anaconda.org/cf-staging/{dist}"
else:
_url = (
"https://conda.anaconda.org/cf-staging/label/"
+ f"{channel}/{dist}"
)
repo.create_repository_dispatch(
"release",
{
"artifact_url": _url,
"md5": outputs_to_copy[dist],
"subdir": _subdir,
"package": _pkg,
"url": _url,
"feedstock": feedstock,
"label": channel,
"git_sha": git_sha,
"comment_on_error": comment_on_error,
}
)
LOGGER.info(" artifact %s sent for copy", dist)
except Exception as e:
LOGGER.info(
" repo dispatch for artifact copy failed: %s", repr(e)
)
else:
copied = {}
for o in outputs:
if o not in copied:
copied[o] = False
if not all(copied[o] for o in outputs) and comment_on_error:
comment_on_outputs_copy(
feedstock, git_sha, errors, valid, copied)
return valid, errors, copied
class OutputsCopyHandler(tornado.web.RequestHandler):
async def post(self):
headers = self.request.headers
feedstock_token = headers.get('FEEDSTOCK_TOKEN', None)
data = tornado.escape.json_decode(self.request.body)
feedstock = data.get("feedstock", None)
outputs = data.get("outputs", None)
channel = data.get("channel", None)
git_sha = data.get("git_sha", None)
# the old default was to comment only if the git sha was not None
# so we keep that here
comment_on_error = data.get("comment_on_error", git_sha is not None)
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("copy outputs for feedstock '%s'" % feedstock)
LOGGER.info("===================================================")
if feedstock is not None and len(feedstock) > 0:
feedstock_exists = _repo_exists(feedstock)
else:
feedstock_exists = False
valid_token = False
if (
feedstock_exists
and feedstock_token is not None
and len(feedstock_token) > 0
and is_valid_feedstock_token(
"conda-forge", feedstock, feedstock_token
)
):
valid_token = True
if (
(not feedstock_exists)
or outputs is None
or channel is None
or (not valid_token)
):
LOGGER.warning(' invalid outputs copy request for %s!' % feedstock)
LOGGER.warning(' feedstock exists: %s' % feedstock_exists)
LOGGER.warning(' outputs: %s' % outputs)
LOGGER.warning(' channel: %s' % channel)
LOGGER.warning(' valid token: %s' % valid_token)
err_msgs = []
if outputs is None:
err_msgs.append("no outputs data sent for copy")
if channel is None:
err_msgs.append("no channel sent for copy")
if not valid_token:
err_msgs.append("invalid feedstock token")
if feedstock_exists and comment_on_error:
comment_on_outputs_copy(
feedstock, git_sha,
err_msgs,
{}, {}
)
self.set_status(403)
self.write_error(403)
else:
(
valid,
errors,
copied,
) = await tornado.ioloop.IOLoop.current().run_in_executor(
_worker_pool(),
_do_copy,
feedstock,
outputs,
channel,
git_sha,
comment_on_error,
)
if not all(v for v in copied.values()):
self.set_status(403)
self.write(json.dumps(
{"errors": errors, "valid": valid, "copied": copied}))
LOGGER.info(" errors: %s", errors)
LOGGER.info(" valid: %s", valid)
LOGGER.info(" copied: %s", copied)
print_rate_limiting_info()
return
# code to pass everything through
# not used but can be to turn it all off if we need to
# if outputs is not None and channel is not None:
# copied = await tornado.ioloop.IOLoop.current().run_in_executor(
# _worker_pool(),
# copy_feedstock_outputs,
# outputs,
# channel,
# )
#
# if not all(v for v in copied.values()):
# self.set_status(403)
#
# if git_sha is not None and not all(copied[o] for o in outputs):
# comment_on_outputs_copy(
# feedstock, git_sha, ["some outputs did not copy"], {}, copied)
#
# self.write(json.dumps(
# {"errors": ["some outputs did not copy"],
# "valid": {},
# "copied": copied}))
#
# LOGGER.info(" errors: %s", ["some outputs did not copy"])
# LOGGER.info(" valid: %s", {})
# LOGGER.info(" copied: %s", copied)
#
# else:
# if git_sha is not None and feedstock is not None:
# comment_on_outputs_copy(
# feedstock, git_sha,
# ["invalid copy request (either bad data or bad feedstock token)"],
# {}, {}
# )
# self.set_status(403)
# self.write_error(403)
#
# return
class StatusMonitorPayloadHookHandler(tornado.web.RequestHandler):
async def post(self):
headers = self.request.headers
event = headers.get('X-GitHub-Event', None)
if not valid_request(
self.request.body,
headers.get('X-Hub-Signature', ''),
):
self.set_status(403)
self.write_error(403)
return
if event == 'ping':
self.write('pong')
return
body = tornado.escape.json_decode(self.request.body)
if event == 'check_run':
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("check run: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
async with STATUS_DATA_LOCK:
status_monitor.update_data_check_run(body)
return
elif event == 'check_suite':
self.write(event)
return
elif event == 'status':
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("status: %s", body['repository']['full_name'])
LOGGER.info("===================================================")
async with STATUS_DATA_LOCK:
status_monitor.update_data_status(body)
return
else:
LOGGER.info('Unhandled event "{}".'.format(event))
self.set_status(404)
self.write_error(404)
class StatusMonitorAzureHandler(tornado.web.RequestHandler):
async def get(self):
self.add_header("Access-Control-Allow-Origin", "*")
self.write(status_monitor.get_azure_status())
class StatusMonitorDBHandler(tornado.web.RequestHandler):
async def get(self):
self.add_header("Access-Control-Allow-Origin", "*")
self.write(status_monitor.dump_report_data())
class StatusMonitorReportHandler(tornado.web.RequestHandler):
async def get(self, name):
self.add_header("Access-Control-Allow-Origin", "*")
self.write(status_monitor.dump_report_data(name=name))
class StatusMonitorHandler(tornado.web.RequestHandler):
async def get(self):
self.write(status_monitor.render_status_index())
class AliveHandler(tornado.web.RequestHandler):
async def get(self):
self.add_header("Access-Control-Allow-Origin", "*")
self.write(json.dumps({"status": "operational"}))
def create_webapp():
application = tornado.web.Application([
(r"/conda-linting/org-hook", LintingHookHandler),
(r"/conda-forge-feedstocks/org-hook", UpdateFeedstockHookHandler),
(r"/conda-forge-teams/org-hook", UpdateTeamHookHandler),
(r"/conda-forge-command/org-hook", CommandHookHandler),
(r"/conda-webservice-update/versions", UpdateWebservicesVersionsHandler),
(r"/feedstock-outputs/validate", OutputsValidationHandler),
(r"/feedstock-outputs/copy", OutputsCopyHandler),
(r"/status-monitor/payload", StatusMonitorPayloadHookHandler),
(r"/status-monitor/azure", StatusMonitorAzureHandler),
(r"/status-monitor/db", StatusMonitorDBHandler),
(r"/status-monitor/report/(.*)", StatusMonitorReportHandler),
(r"/status-monitor", StatusMonitorHandler),
(r"/alive", AliveHandler),
])
return application
async def _cache_data():
LOGGER.info("")
LOGGER.info("===================================================")
LOGGER.info("caching status data")
LOGGER.info("===================================================")
async with STATUS_DATA_LOCK:
await tornado.ioloop.IOLoop.current().run_in_executor(
_thread_pool(),
status_monitor.cache_status_data,
)
def main():
# start logging and reset the log format to make it a bit easier to read
tornado.log.enable_pretty_logging()
from tornado.log import LogFormatter
my_log_formatter = LogFormatter(fmt='%(message)s', color=True)
root_logger = logging.getLogger()
root_streamhandler = root_logger.handlers[0]
root_streamhandler.setFormatter(my_log_formatter)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--local",
help="run the webserver locally on 127.0.0.1:5000",
action="store_true",
)
args = parser.parse_args()
application = create_webapp()
http_server = tornado.httpserver.HTTPServer(application, xheaders=True)
port = int(os.environ.get("PORT", 5000))
LOGGER.info("starting server")
if args.local:
LOGGER.info(
"server address: http://127.0.0.1:5000/")
http_server.listen(5000, address='127.0.0.1')
else:
http_server.listen(port)
pcb = tornado.ioloop.PeriodicCallback(
lambda: asyncio.create_task(_cache_data()),
status_monitor.TIME_INTERVAL * 1000,
)
pcb.start()
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
bsd-3-clause
| 1,596,594,711,162,535,400
| 32.721311
| 88
| 0.494514
| false
| 4.470351
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.