repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
psathyrella/partis-deprecated
|
python/compare.py
|
Python
|
gpl-3.0
| 2,531
| 0.003556
|
#!/usr/bin/env python
import argparse
import json
import csv
import sys
sys.path.append('python')
import plotting
import utils
from opener import opener
parser = argparse.ArgumentParser()
parser.add_argument('-b', action='store_true') # passed on to ROOT when plotting
parser.add_argument('--outdir', required=True)
parser.add_argument('--plotdirs', required=True)
parser.add_argument('--names', required=True)
parser.add_argument('--stats', default='')
parser.add_argument('--no-errors', action='store_true')
parser.add_argument('--plot-performance', action='store_true')
parser.add_argument('--scale-errors')
parser.add_argument('--rebin', type=int)
parser.add_argument('--colors')
parser.add_argument('--linestyles')
parser.add_argument('--datadir', default='data/imgt')
parser.add_argument('--leaves-per-tree')
parser.add_argument('--linewidths')
parser.add_argument('--markersizes')
parser.add_argument('--dont-calculate-mean-info', action='store_true')
parser.add_argument('--normalize', action='store_true')
parser.add_argument('--graphify', action='store_true')
parser.add_argument('--strings-to-ignore') # remove this string from the plot names in each dir (e.g. '-mean-bins') NOTE replaces '_' with '-'
args = parser.parse_args()
if args.strings_to_ignore is not None:
args.strings_to_ignore = args.strings_to_ignore.replace('_', '-')
args.plotdirs = utils.get_arg_list(args.plotdirs)
args.scale_errors = utils.get_arg_list(args.scale_errors)
args.colors = utils.get_arg_list(args.colors, intify=True)
args.linestyles = utils.get_arg_list(args.linestyles, intify=True)
args.names = utils.get_arg_list(args.names)
args.leaves_per_tree = utils.get_arg_list(args.leaves_per_tree, intify=True)
args.strings_to_ignore = utils.get_arg_list(args.strings_to_ignore)
args.markersizes = utils.get_arg_list(args.markersizes, intify=True)
args.linewidths = utils.get_arg_list(args.linewidths, intify=True)
for iname in range(len(args.names)):
args.names[iname] = args.names[iname].replace('@', ' ')
assert len(args.plotdirs) == len(args.names)
with opener('r')(args.datadir + '/v-meta.json') as json_file: # get location of <begin> cysteine in each v region
args.cyst_positions = json.load(json_file)
with opener('r')(args.datadir + '/j_tryp.csv') as csv_file: # get location of <end> tryptophan in each j reg
|
ion (TGG)
tryp_reader = csv.reader(csv_file)
args.tryp_positions = {row[0]:row[1] for row in tryp_reader} # WARNING: this doesn't filter out the
|
header line
plotting.compare_directories(args)
|
WatSat-ADCS/Comm
|
test/test_comm.py
|
Python
|
mit
| 1,077
| 0.004643
|
"""
integration test for arduino
NOTE: requires the arduino to be plugged in
"""
import unittest
from comp.comm import ADCSArduino
class TestComm(unittest.TestCase):
def setUp(self):
self.ard = ADCSArduino(pr="/dev/ttyACM0")
def tearDown(self):
self.ard.close_arduino_port()
def test_activate(self):
self.ard.activate()
self.assertEqual(self.ard.arduino.isOpen(), False)
def test_get_sensor_data(self):
self.ard.open_arduino_port()
data = self.ard.get_sensor_data()
print data
self
|
.assertIsNotNone(data)
self.ard.close_arduino_port()
def test_cont_sample(self):
self.ard.open_arduino_port()
for i in range (5):
data = self.ard.get_sensor_data()
print data
self.assertIsNotNone(data)
self.ard.close_arduino_port()
def test_post_change(self):
|
self.ard.post_change(7)
# def test_workflow(self):
# TODO: test full workflow
# pass
if __name__ == "__main__":
unittest.main()
|
openstack/barbican
|
barbican/hacking/checks.py
|
Python
|
apache-2.0
| 7,978
| 0
|
# Copyright (c) 2016, GohighSec
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# Licens
|
e for the specific language governing permissions and limitations
# under the License.
import ast
import re
import six
from hacking import core
import pycodestyle
"""
Guide
|
lines for writing new hacking checks
- Use only for Barbican specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range B3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the B3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to barbican/tests/test_hacking.py
"""
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
assert_no_xrange_re = re.compile(r"\s*xrange\s*\(")
assert_True = re.compile(r".*assertEqual\(True, .*\)")
assert_None = re.compile(r".*assertEqual\(None, .*\)")
assert_Not_Equal = re.compile(r".*assertNotEqual\(None, .*\)")
assert_Is_Not = re.compile(r".*assertIsNot\(None, .*\)")
no_log_warn = re.compile(r".*LOG.warn\(.*\)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pycodestyle.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pycodestyle."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
class CheckLoggingFormatArgs(BaseASTChecker):
"""Check for improper use of logging format arguments.
LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.",
('volume1', 500))
The format arguments should not be a tuple as it is easy to miss.
"""
name = "check_logging_format_args"
version = "1.0"
CHECK_DESC = 'B310 Log method arguments should not be a tuple.'
LOG_METHODS = [
'debug', 'info',
'warn', 'warning',
'error', 'exception',
'critical', 'fatal',
'trace', 'log'
]
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Call(self, node):
"""Look for the 'LOG.*' calls."""
# extract the obj_name and method_name
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# obj must be a logger instance and method must be a log helper
if (obj_name != 'LOG'
or method_name not in self.LOG_METHODS):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckLoggingFormatArgs, self).generic_visit(node)
# any argument should not be a tuple
for arg in node.args:
if isinstance(arg, ast.Tuple):
self.add_error(arg)
return super(CheckLoggingFormatArgs, self).generic_visit(node)
@core.flake8ext
def check_oslo_namespace_imports(physical_line, logical_line, filename):
"""'oslo_' should be used instead of 'oslo.'
B317
"""
if pycodestyle.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("B317: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
@core.flake8ext
def dict_constructor_with_list_copy(logical_line):
"""Use a dict comprehension instead of a dict constructor
B318
"""
msg = ("B318: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def no_xrange(logical_line):
"""Do not use 'xrange'
B319
"""
if assert_no_xrange_re.match(logical_line):
yield(0, "B319: Do not use xrange().")
@core.flake8ext
def validate_assertTrue(logical_line):
"""Use 'assertTrue' instead of 'assertEqual'
B312
"""
if re.match(assert_True, logical_line):
msg = ("B312: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield(0, msg)
@core.flake8ext
def validate_assertIsNone(logical_line):
"""Use 'assertIsNone' instead of 'assertEqual'
B311
"""
if re.match(assert_None, logical_line):
msg = ("B311: Unit tests should use assertIsNone(value) instead"
" of using assertEqual(None, value).")
yield(0, msg)
@core.flake8ext
def no_log_warn_check(logical_line):
"""Disallow 'LOG.warn'
B320
"""
msg = ("B320: LOG.warn is deprecated, please use LOG.warning!")
if re.match(no_log_warn, logical_line):
yield(0, msg)
@core.flake8ext
def validate_assertIsNotNone(logical_line):
"""Use 'assertIsNotNone'
B321
"""
if re.match(assert_Not_Equal, logical_line) or \
re.match(assert_Is_Not, logical_line):
msg = ("B321: Unit tests should use assertIsNotNone(value) instead"
" of using assertNotEqual(None, value) or"
" assertIsNot(None, value).")
yield(0, msg)
|
easyw/kicad-3d-models-in-freecad
|
cadquery/FCAD_script_generator/Button_Switch_Nidec/cq_base_model.py
|
Python
|
gpl-2.0
| 23,507
| 0.005658
|
#!/usr/bin/python
# -*- coding: utf8 -*-
#
#****************************************************************************
#* *
#* base classes for generating part models in STEP AP214 *
#* *
#* This is part of FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* Copyright (c) 2017 *
#* Terje Io https://github.com/terjeio *
#* Maurice https://launchpad.net/~easyw *
#* *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
# 2017-11-30
#
# parts of this code is based on work by other contributors
#
import cadquery as cq
import FreeCAD
from math import sin, tan, radians
class Polyline:
r"""A class for creating a polyline wire (including arcs) using **relative** moves (turtle graphics style)
:param plane: the workplane to add the polyline (as a wire)
:type plane: ``workplane``
:param origin: point
:type origin: ``point``
Most of the methods returns a reference to the class instance (self) which allows method chaining
"""
def __init__(self, plane, origin=(0.0, 0.0)):
self.commands = []
self.plane = plane
self.origin = origin
self.x = 0.0
self.y = 0.0
self.addMoveTo(origin[0], origin[1])
def getCurrentPosition(self):
r"""get the current position in absolute coordinates
:rtype: Point
"""
return (self.x, sel
|
f.y)
def addMoveTo(self, x, y):
r"""add a relative move (offset) from the current coordinate
.. note:: when issued as the first call after instatiating the class then the origin is moved accordingly
:param x: x distance from current position
:type x: ``float``
:param y: y distance from cur
|
rent position
:type y: ``float``
:rtype: self
"""
self.x += x
self.y += y
if len(self.commands) == 1:
self.commands = []
self.origin = (self.x, self.y)
self.commands.append((0, self.x, self.y))
return self
def addPoint(self, x, y):
r"""add a straight line to point
:param x: x distance from current position
:type x: ``float``
:param y: y distance from current position
:type y: ``float``
:rtype: self
"""
self.x += x
self.y += y
self.commands.append((1, self.x, self.y))
return self
def addPoints(self, pointList):
r"""add a list of new points
:param pointList:
:type pointList: list of points
:rtype: self
Example where first half is defined by points and then mirrored by adding points in reverse order::
ow = 0.6
pw = self.pin_width
c1 = (ow - pw) / 2.0
pin = Polyline(cq.Workplane("XY"), origin=(0.0, self.body_width / 2.0))\
.addPoints([
(ow / 2.0, 0),
(0.0, -self.body_width),
(-c1, -c1),
(0.0, -(self.pin_length - pw)),
(-pw / 4.0, -pw),
(-pw / 4.0, 0.0),
])\
.addMirror().make().extrude(self.pin_thickness)
.. figure:: ../images/pin.png
Rendering
"""
for point in pointList:
self.addPoint(point[0], point[1])
return self
def addArc(self, radius, angle=90, type=0):
o = sin(radians(abs(angle) / 2.0))
p = 1.0 - 1.0 * o
f = -1.0 if angle < 0.0 else 1.0
if type == 0:
ap1 = self.x + radius * (p if f == 1.0 else o)
ap2 = self.y + radius * (o if f == 1.0 else p) * f
else:
ap1 = self.x + radius * (p if f == -1.0 else o)
ap2 = self.y + radius * (o if f == -1.0 else p) * f
self.x += radius
self.y += radius * f
self.commands.append((2, self.x, self.y, ap1, ap2))
return self
def addThreePointArc(self, point1, point2):
r"""create a three point arc
The starting point is the current position, end point is *point2*, the arc will be drawn through point1
:param point1:
:type width: ``float``
:param point2:
:type point2: ``point``
:rtype: self
Example::
l = 4
a = 0.2
w = 2 - a
body = Polyline(cq.Workplane("XY"))\
.addPoint(0, w)\
.addThreePointArc((l / 2, a), (l, 0))\
.addPoint(0,- w).make().extrude(1)
.. figure:: ../images/threepointarc.png
Rendering
"""
ap1 = self.x + point1[0]
ap2 = self.y + point1[1]
self.x += point2[0]
self.y += point2[1]
self.commands.append((2, self.x, self.y, ap1, ap2))
return self
def addChamferedRectangle(self, length, width, chamfer):
r"""create a chamfered rectangle centered at the current point
:param length:
:type length: ``float``
:param width:
:type width: ``float``
:param chamfer:
:type chamfer: ``float``
:rtype: self
See :func:`addRoundedRectangle` for an example
"""
self.addMoveTo(-length / 2.0, -width / 2.0 + chamfer)
length = length - chamfer * 2.0
width = width - chamfer * 2.0
self.addPoint(0.0, width)
self.addPoint(chamfer, chamfer)
self.addPoint(length, 0)
self.addPoint(chamfer, -chamfer)
self.addPoint(0.0, -width)
self.addPoint(-chamfer, -chamfer)
self.addPoint(-length, 0.0)
self.addPoint(-chamfer, chamfer)
return self
def addRoundedRectangle(self, length, width, radius):
r"""create a rounded rectangle centered at the current point
:param length:
:type length: ``float``
:param width:
:type width: ``float``
:param cornerRadius:
:type cornerRadius: ``float``
:rtype: self
Example with a chamfered rectangle cutout::
l = 4
w = 2
cutout = Polyline(cq.Workplane("XY"))\
.addChamferedRectangle(l - 0.3, w - 0.
|
whiteclover/Medoly
|
medoly/config/hocon.py
|
Python
|
apache-2.0
| 38,051
| 0.000631
|
#!/usr/bin/env python
#
# Copyright 2016 Medoly
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Human-Optimized Config Object Notation"""
import re
from .errors import HoconParserException, HoconTokenizerException
from .select_config import SelectConfig
class BaseConfig(object):
"""Base Config
:param root: the real hocon root value
:type root: HoconRoot
:raises: AttributeError
"""
def __init__(self, root):
if root.value is None:
raise AttributeError(" error")
self.root = root.value # HoconValue
self.substitutions = root.substitutions # List<HoconSubstitution>
def get_node(self, path):
"""Gets the path data node"""
keys = path.split(".")
current_node = self.root
if current_node is None:
raise KeyError("Doesn't exist the key:" % (path))
for key in keys:
current_node = current_node.get_child_object(key)
return current_node
def __str__(self):
if self.root is None:
return ""
return str(self.root)
def to_dict(self):
"""Converts to dict"""
return self.root.get()
def to_select_config(self):
"""Converts to SelectConfig"""
return SelectConfig(self.root.get())
def has_path(self, path):
"""Check the config has the path node"""
return self.get_node(path) is not None
class Config(BaseConfig):
"""Hocon config
Extends:
BaseConfig
"""
def get_bool(self, path, default=False):
"""Gets the bool data value, defaults not found returns the default value"""
value = self.get_node(path)
if value is None:
return default
return value.get_bool()
def get_int(self, path, default=0):
"""Gets the integer data value, defaults not found returns the default value"""
value = self.get_node(path)
if value is None:
return default
re
|
turn value.get_int()
def get(self, path, default=None):
"""Gets the string data value, defaults not found returns the default value"""
value = self.get_node(path)
if value is None:
return default
return value.get_string()
get_string = get
def get_float(self, path, default=0.0):
"""Gets the float data value, defaults not found returns th
|
e default value"""
value = self.get_node(path)
if value is None:
return default
return value.get_float()
def get_bool_list(self, path):
"""Gets the bool data value, defaults not found returns the default value"""
value = self.get_node(path)
return value.get_bool_list()
def get_float_list(self, path):
"""Gets the float list data value"""
value = self.get_node(path)
return value.get_float_list()
def get_int_list(self, path):
"""Gets the int list data value"""
value = self.get_node(path)
return value.get_int_list()
def get_list(self, path):
"""Gets the list ojbect data value"""
value = self.get_node(path)
return value.get_list()
def get_value(self, path):
"""Gets the string data node, defaults not found returns the default value"""
return self.get_node(path)
class PyConfig(BaseConfig):
"""Python style ocnfig"""
def get(self, path, default=None):
"""Get real type value"""
value = self.get_node(path)
if value is None:
return default
return value.get()
class ConfigFactory(object):
"""Config create tool"""
@classmethod
def empty(cls):
"""Creates a empty hocon config"""
return cls.parse("")
@classmethod
def parse(cls, hocon, func=None, pystyle=False):
"""Parses and creates a hocon config from text string"""
res = Parser.parse(hocon, func, pystyle)
configCls = PyConfig if pystyle else Config
return configCls(res)
@classmethod
def parse_file(cls, path, pystyle=False):
"""Parses and creates a hocon confi from the file path"""
with open(path) as f:
content = f.read()
return cls.parse(content, pystyle=pystyle)
@classmethod
def from_json(cls, jsonObj, pystyle=False):
"""Creates hocon from json data"""
import json
text = json.dumps(jsonObj)
return cls.parse(text, pystyle)
class HoconRoot(object):
"""Hocon config object"""
def __init__(self, value=None, substitutions=None):
self.value = value or HoconValue()
self.substitutions = substitutions or []
class MightBeAHoconObject(object):
"""Hocon Maybe a hocon object"""
pass
class HoconValue(MightBeAHoconObject):
"""Hocon data value node object
Extends
:
MightBeAHoconObject
"""
def __init__(self, values=None):
self.values = values or []
def at_key(self, key):
"""Get data node by key"""
o = HoconObject()
o.get_or_create_key(key)
o[key] = self
r = HoconValue().append_value(o)
return Config(HoconRoot(r))
def get_object(self):
"""Get the real current object"""
raw = self.values[0] if len(self.values) >= 1 else None
if isinstance(raw, HoconObject):
return raw
if isinstance(raw, MightBeAHoconObject):
if raw.is_object():
return raw.get_object()
return raw
def is_object(self):
"""Check is an object"""
return self.get_object() is not None
def append_value(self, value):
"""Append a value inf current node"""
# if isinstance(value, HoconElement):
self.values.append(value)
return self
def clear(self):
"""Clear the sub nodes"""
self.values[:] = []
def new_value(self, value):
"""Clear the sub values and reset by the new value"""
self.clear()
self.append_value(value)
def is_string(self):
"""Check is string object"""
return all([v.is_string() for v in self.values])
def get_array(self):
"""Get the datas value as node list"""
x = []
for arr in self.values:
if arr.is_array():
x.extend(arr.get_array())
return x
def get_list(self):
"""Get the datas value as string list"""
return [e.get_string() for e in self.get_array()]
def is_array(self):
"""Is array?"""
return self.get_list() is not None
def get(self):
"""Get the the sub node"""
if len(self.values) == 1:
return self.values[0].get_object()
return [_.get_object() for _ in self.values]
def contat(self):
"""Contat the contain objects as string"""
return "".join([_.get_string() for _ in self.values])
def get_child_object(self, key):
"""Get chield object"""
return self.get_object().get_key(key)
def get_bool(self):
"""Get the current object as bool value"""
v = self.get_string()
if v == 'on':
return True
if v == 'off':
return False
if v == 'true':
return True
if v == 'false':
return False
raise ValueError("Unknown boolean format: " + v)
def get_string(self):
"""Get the nodes as string"""
if self.is_string():
return self.contat()
return None
def _get_by_type(self, cast):
"""Get the value node by cast type
:param cast: the cast object (eg. int
|
breznak/ALife
|
alife/experiments/behavior/random_walk_map.py
|
Python
|
gpl-2.0
| 6,062
| 0.032662
|
#!/bin/env python2
from alife.worlds.world import World, Point
from alife.agents.UtilityAgent import SimpleAgent
from alife.utils.utils import dumpToArray, zeros
import math
import sys
import numpy
from nupic.encoders.extras.utility import SimpleUtilityEncoder
# common settings:
items=None
target=Point(4,9)
agent=None
foods = []
_hungerMax = 40
def main(targetX=4, targetY=9, dimX=250, dimY=250):
global target
global agent
target=Point(int(targetX), int(targetY))
if not(target.x< dimX and target.y < dimY):
raise Exception("Target coords unreachable!")
w = WorldWithResources(int(dimX), int(dimY), coun
|
t=[2], types=['food']) # world with some food
ag = SimpleAgent(actions={'go' : go}, targets=[reachedTarget], world=w)
ag.verbose = 2
ag.util = SimpleUtilityE
|
ncoder(length=2, minval=0, maxval=max(int(dimX),int(dimY)), scoreMin=0, scoreMax=100, scoreResolution=0.1)
ag.util.setEvaluationFn(euclDistance)
ag.start=ag.world._getRandomPos()
ag.me['hunger']=0 # not hungry
ag.mem = zeros(['score'],ag.mem,ag.world.dimX, ag.world.dimY, zero=-1)
ag.mem = zeros(['hunger'],ag.mem,ag.world.dimX, ag.world.dimY, zero=-1)
ag.mem = zeros(['target'],ag.mem,ag.world.dimX, ag.world.dimY, zero=-1)
agent = ag
# walk it, baby
NUM_WALKS=1
NUM_STARTS=1
for _ in xrange(0, NUM_STARTS):
ag.me['x'],ag.me['y'] = ag.world._getRandomPos()
print "Starting from:", ag.me['x'], ag.me['y']
for _ in xrange(0,NUM_WALKS):
while not reachedTarget(ag, target):
x = ag.me['x']
y = ag.me['y']
moves = [-1,0,1] # for move left-right, front-back
dx = moves[numpy.random.randint(0,len(moves),1)[0]]
dy = moves[numpy.random.randint(0,len(moves),1)[0]]
if dx == 0 and dy == 0: # would stand still
continue
ag.actions['go'](ag, x+dx, y+dy)
utilityMap = dumpToArray('score', ag.mem, ag.world.dimX, ag.world.dimY)
# print utilityMap
targetMap = dumpToArray('target', ag.mem, ag.world.dimX, ag.world.dimY)
_visualize(targetMap)
hungerMap = dumpToArray('hunger', ag.mem, ag.world.dimX, ag.world.dimY)
_visualize(hungerMap)
_visualize(utilityMap)
#########################################
# defines target:
def reachedTarget(ag, target):
"""are we there yet?"""
if ag.verbose > 2:
print ag.me['x'], target.x, ag.me['y'], target.y
return ag.me['x']==target.x and ag.me['y']==target.y
# defines score:
def euclDistance(listCoords):
"""eval fn for agent is distance to target """
global target
global agent
x=listCoords[0]
y=float(listCoords[1])
tx=target.x
ty=target.y
if reachedTarget(agent, target):
return -10 # make target visible
dst_target = math.sqrt((tx-x)**2 + (ty-y)**2)
(_,dst_food) = _toNearestFood(x,y)
w=agent.world.tiles
if(w[x][y]['food']==1):
#print "Found some tasty food!"
agent.me['hunger']=0
hunger = agent.me['hunger']
if agent.mem[x][y]['target'] == -1:
agent.mem[x][y]['target'] = dst_target
else:
agent.mem[x][y]['target'] = min(dst_target,agent.mem[x][y]['target'])
if agent.mem[x][y]['hunger'] == -1:
agent.mem[x][y]['hunger']= dst_food
else:
agent.mem[x][y]['hunger'] = min(agent.mem[x][y]['hunger'], dst_food)
return agent.mem[x][y]['target'] +((agent.me['hunger']/_hungerMax)* agent.mem[x][y]['hunger'])
# return (1- agent.me['hunger']/_hungerMax)*dst_target + (agent.me['hunger']/_hungerMax)*dst_food # there's + bcs we're finding a min (target dist = 0)
# return dst_target + dst_food
# return agent.mem[x][y]['hunger'] + agent.mem[x][y]['target']
if hunger > _hungerMax*0.8: # hunger >80% --> eat!
return agent.mem[x][y]['hunger']
elif hunger < _hungerMax*0.33: # hunger < 33% --> pursuit life-goals
return agent.mem[x][y]['target']
else:
return agent.mem[x][y]['target'] +(_hungerMax- agent.mem[x][y]['hunger'])
def _toNearestFood(x,y):
global foods
max_idx = 0
max_val = 100000
for i,p in enumerate(foods):
fx,fy = p
d = math.sqrt((x-fx)**2 + (y-fy)**2)
if d < max_val:
max_idx = i
max_val = d
return (max_idx, max_val)
# defines action:
def go(ag, x, y):
"""go to x,y"""
# check bounds
if(x<0 or y<0 or x>=ag.world.dimX or y>=ag.world.dimY):
if(ag.verbose > 2):
print "Agent ", ag.name, " is crossing the borders! (",x,",",y,")"
return
ag.me['x']=x
ag.me['y']=y
ag.me['steps']+=1
if ag.me['hunger']<=_hungerMax:
ag.me['hunger']+=1 # walking is tiresome
if ag.mem[x][y]['score'] in [-1]: #default
ag.mem[x][y]['score'] = ag.util.getScoreIN([x, y])
else:
ag.mem[x][y]['score'] = ag.util.getScoreIN([x, y]) #min(ag.mem[x][y]['score'],ag.util.getScoreIN([x, y]))
ag.mem[x][y]['visited'] = 1
#########################################
# helper functions:
def _visualize(map):
try:
# from alife.simulators.mayavi.plot3D import plot3d
from mayavi import mlab
except:
print "Can't show you nice picture; couldn't import mayavi"
return
# plot3d(map)
mlab.figure()
mlab.barchart(map, scale_factor=0.6)
mlab.show()
#########################################
class WorldWithResources(World):
import numpy
def __init__(self, dimX, dimY, count=[5], types=['food']):
"""distributes (list) count of (list) types resources randomly;
"""
super(WorldWithResources, self).__init__(dimX, dimY, types)
global foods
for i,typee in enumerate(types):
for x in xrange(0,dimX): # zero-out
for y in xrange(0,dimY):
self.tiles[x][y][typee]=0
cnt = count[i]
for c in xrange(0,cnt):
rx = numpy.random.randint(0,dimX,1).tolist()[0]
ry = numpy.random.randint(0,dimY,1).tolist()[0]
self.tiles[rx][ry][typee] = 1
foods.append( (rx,ry) )
#########################################
if __name__ == "__main__":
if(len(sys.argv)==3):
main(sys.argv[1], sys.argv[2])
elif(len(sys.argv)==5):
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
else:
print "Use: \n python /path/to/utility_map.py [targetX targetY [dimX dimY]]"
main()
|
m-lab/mlab-ns
|
server/mlabns/util/util.py
|
Python
|
apache-2.0
| 2,015
| 0.000993
|
import json
import os
import jinja2
from mlabns.util import message
def _get_jinja_environment():
current_dir = os.path.dirname(__file__)
templates_dir = os.path.join(current_dir, '../templates')
return jinja2.Environment(loader=jinja2.FileSystemLoader(templates_dir),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def _get_jinja_template(template_filename):
return _get_jinja_environment().get_template(template_filename)
def send_no_content(request):
request.response.headers['Access-Control-Allow-Origin'] = '*'
request.response.headers['Content-Type'] = 'application/json'
request.response.set_status(204)
def send_not_found(request, output_type=message.FORMAT_HTML):
|
request.error(404)
if output_type == message.FORMAT_JSON:
data = {}
data['status_code'] = '404 Not found'
json_data = json.dumps(data)
request.response.headers['Content-Type'] = 'application/json'
request.response.out.write(json_data)
else:
request.response.out.write(_get_jinja_template('
|
not_found.html').render(
))
def send_server_error(request, output_type=message.FORMAT_HTML):
request.error(500)
if output_type == message.FORMAT_JSON:
data = {}
data['status_code'] = '500 Internal Server Error'
json_data = json.dumps(data)
request.response.headers['Content-Type'] = 'application/json'
request.response.out.write(json_data)
else:
request.response.out.write(_get_jinja_template('not_found.html').render(
))
def send_success(request, output_type=message.FORMAT_JSON):
if output_type == message.FORMAT_JSON:
data = {}
data['status_code'] = '200 OK'
json_data = json.dumps(data)
request.response.headers['Content-Type'] = 'application/json'
request.response.out.write(json_data)
else:
request.response.out.write('<html> Success! </html>')
|
PanDAWMS/panda-bigmon-core
|
core/reports/ObsoletedTasksReport.py
|
Python
|
apache-2.0
| 9,797
| 0.005818
|
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.db import connection
from collections import OrderedDict
from datetime import datetime
import time
import scipy.cluster.hierarchy as hcluster
import numpy as np
class ObsoletedTasksReport:
def __init__(self):
pass
def prepareReportTasksV4(self, request, type):
# 1. Select obsolete tasks
# 2. Select obsolete datasets
# 3. Select tasks related to obsolete datasets
# 4. Show datasets, their status, tasks, status
dataSetsSQLQuery = "SELECT t1.TASKID, t1.TIMESTAMP, t1.STATUS, t1.PR_ID, t2.STATUS, t2.NAME, t1.PARENT_TID FROM ATLAS_DEFT.T_PRODUCTION_TASK t1, ATLAS_DEFT.T_PRODUCTION_DATASET t2 WHERE t2.TASKID=t1.TASKID and t1.TIMESTAMP>add_months(sysdate,-1) and (t1.STATUS IN ('obsolete') or (t2.STATUS IN ('toBeDeleted', 'Deleted') and t1.PPTIMESTAMP > add_months(sysdate,-1)))and instr(t2.NAME,'.log.') = 0"
cur = connection.cursor()
cur.execute(dataSetsSQLQuery)
statsDataSets = cur.fetchall()
i = 0
timesecs = []
for taskEntry in statsDataSets:
timesecs.append(time.mktime(taskEntry[1].timetuple()))
i += 1
minT = min(timesecs)
timesecs[:] = [x - minT for x in timesecs]
thresh = 60
dataTmp = [
timesecs,
]
np.asarray(dataTmp)
clusters = hcluster.fclusterdata(np.transpose(np.asarray(dataTmp)), thresh, crite
|
rion="distance")
clustersSummary = {}
i = 0
for dsEntry in statsDataSets:
clusterID = clusters[i]
if clusterID in clustersSummary:
currCluster = clustersSummary[clusterID]
currCluster["req"].append(dsEntry[3])
currCluster["data
|
sets"][dsEntry[5]]=dsEntry[4]
currCluster["tasks"][dsEntry[0]]=dsEntry[2]
currCluster["obsoleteStart"] = dsEntry[1]
currCluster["leastParent"] = dsEntry[6] if dsEntry[6] < currCluster["leastParent"] else currCluster["leastParent"]
else:
currCluster = {"req":[dsEntry[3]], "tasks":{dsEntry[0]:dsEntry[2]},
"datasets":{dsEntry[5]:dsEntry[4]}, "obsoleteStart":dsEntry[1], "leastParent":dsEntry[6]}
clustersSummary[clusterID] = currCluster
i+=1
clustersSummary = clustersSummary.values()
cluserssummaryList = sorted(clustersSummary, key=lambda k: k['obsoleteStart'], reverse=True)
data = {}
data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S")
data['type'] = type
data['clusters'] = cluserssummaryList
return render_to_response('reportObsoletedTasksv4.html', data, RequestContext(request))
def prepareReportTasksV1(self, request, type):
uniqueTasksCond = ""
if type == "tasksview":
uniqueTasksCond ="PART=1 and"
sqlRequest = '''
SELECT * FROM (
WITH RECONSTRUCTEDTASKCHAIN AS (
SELECT TASKID, PR_ID, TASKNAME, CHAIN_TID, PARENT_TID, STATUS as TASKSTATUS, LEVEL as LEV, PPFLAG, CASE WHEN PPGRACEPERIOD = -1 THEN 48 ELSE PPGRACEPERIOD END as PPGRACEPERIOD FROm ATLAS_DEFT.T_PRODUCTION_TASK
START WITH PPFLAG > 0
CONNECT BY NOCYCLE PRIOR TASKID=PARENT_TID ORDER SIBLINGS BY TASKID
) SELECT RECONSTRUCTEDTASKCHAIN.*, STATUS as DSSTATUS, TIMESTAMP, row_number() OVER(PARTITION BY RECONSTRUCTEDTASKCHAIN.TASKID order by t_production_dataset.TIMESTAMP) AS PART, t_production_dataset.NAME as dsname FROM ATLAS_DEFT.RECONSTRUCTEDTASKCHAIN, ATLAS_DEFT.t_production_dataset WHERE t_production_dataset.TASKID=RECONSTRUCTEDTASKCHAIN.TASKID
and instr(t_production_dataset.NAME,'.log.') = 0
) WHERE '''+uniqueTasksCond+''' PPFLAG>=0 ORDER BY LEV DESC
'''
cur = connection.cursor()
cur.execute(sqlRequest)
stats = cur.fetchall()
tasksInfoList = []
timesecs = []
i = 0
for taskEntry in stats:
timesecs.append(time.mktime(stats[i][10].timetuple()))
i += 1
minT = min(timesecs)
timesecs[:] = [x - minT for x in timesecs]
thresh = 21600
data_run = [
timesecs,
]
np.asarray(data_run)
clusters = hcluster.fclusterdata(np.transpose(np.asarray(data_run)), thresh, criterion="distance")
cluserssummary = {}
i = 0
for taskEntry in stats:
clusterID = clusters[i]
tmpDict = {"reqid": taskEntry[1], "taskid": taskEntry[0], "taskname": taskEntry[2], "dsname": taskEntry[12], "clusterid": clusterID}
tasksInfoList.append(tmpDict)
if clusterID not in cluserssummary:
cluserssummary[clusterID] = {"obsoleteStart":taskEntry[10], "obsoleteFinish":taskEntry[10], "requests":[taskEntry[1]], "tasks":[taskEntry[0]], "datasets":[taskEntry[12]]}
else:
if cluserssummary[clusterID]["obsoleteStart"] > taskEntry[10]:
cluserssummary[clusterID]["obsoleteStart"] = taskEntry[10]
if cluserssummary[clusterID]["obsoleteFinish"] < taskEntry[10]:
cluserssummary[clusterID]["obsoleteFinish"] = taskEntry[10]
if taskEntry[0] not in cluserssummary[clusterID]["tasks"]:
cluserssummary[clusterID]["tasks"].append(taskEntry[0])
if taskEntry[12] not in cluserssummary[clusterID]["datasets"]:
cluserssummary[clusterID]["datasets"].append(taskEntry[12])
if taskEntry[1] not in cluserssummary[clusterID]["requests"]:
cluserssummary[clusterID]["requests"].append(taskEntry[1])
i += 1
cluserssummaryList = []
for id, cluster in cluserssummary.items():
cluserssummaryList.append(cluster)
cluserssummaryList = sorted(cluserssummaryList, key=lambda k: k['obsoleteStart'], reverse=True)
data = {}
data['tasksInfo'] = tasksInfoList
data['built'] = datetime.now().strftime("%d %b %Y %H:%M:%S")
data['type'] = type
data['clusters'] = cluserssummaryList
return render_to_response('reportObsoletedTasksv3.html', data, RequestContext(request))
def prepareReportTasksV0(self, request):
sqlRequest = '''
SELECT * FROM (
WITH RECONSTRUCTEDTASKCHAIN AS (
SELECT TASKID, CHAIN_TID, PARENT_TID, STATUS as TASKSTATUS, LEVEL as LEV, PPFLAG, CASE WHEN PPGRACEPERIOD = -1 THEN 48 ELSE PPGRACEPERIOD END as PPGRACEPERIOD FROm ATLAS_DEFT.T_PRODUCTION_TASK
START WITH PPFLAG > 0
CONNECT BY NOCYCLE PRIOR TASKID=PARENT_TID ORDER SIBLINGS BY TASKID
) SELECT RECONSTRUCTEDTASKCHAIN.*, STATUS as DSSTATUS, TIMESTAMP, row_number() OVER(PARTITION BY RECONSTRUCTEDTASKCHAIN.TASKID order by t_production_dataset.TIMESTAMP) AS PART, t_production_dataset.NAME as dsname FROM ATLAS_DEFT.RECONSTRUCTEDTASKCHAIN, ATLAS_DEFT.t_production_dataset WHERE t_production_dataset.TASKID=RECONSTRUCTEDTASKCHAIN.TASKID
and instr(t_production_dataset.NAME,'.log.') = 0
) WHERE PART=1 and PPFLAG>=0 ORDER BY LEV ASC
'''
cur = connection.cursor()
cur.execute(sqlRequest)
stats = cur.fetchall()
tasksInfo = OrderedDict()
inversedMap = {}
for taskEntry in stats:
if taskEntry[4] == 1: #This is entry level of tasks chain
if taskEntry[5] == 1:
tmpDict = {"tofdel":"task force obsoleting"}
if taskEntry[5] == 2:
tmpDict = {"tofdel":"task chain obsoleting"}
tmpDict["date"] = taskEntry[8]
tmpDict["graceperiod"] = taskEntry[6]
tmpDict["dsname"] = taskEntry[10]
tmpDict["dsstatus"] = taskEntry[3]
tasksInfo[taskEntry[0]] = tmpDict
else:
if taskEntry[2] in inversedMap: #here we ch
|
shvets/etvnet-plex-plugin
|
test/test_helper.py
|
Python
|
mit
| 302
| 0.013245
|
import sys, os
sys.path.append(
|
os.path.abspath(os.path.join(os.path.dirname(__file__), '../src/lib/common')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../src/lib/etvnet')))
sys.path.ap
|
pend(os.path.abspath(os.path.join(os.path.dirname(__file__), '../src/lib/youtube')))
|
ilhamwk/accounting
|
view_major.py
|
Python
|
cc0-1.0
| 4,006
| 0.005741
|
from flask import *
from playhouse.flask_utils import *
import string
from app import app
from model import Major, Minor, Stor
|
e, Transaction, Item
@app.route('/major', methods=['GET', 'POST'])
def major_list():
query = Major \
.select(Major, Minor) \
.join(Minor, on=(Major.id == Minor.major).alias('minor')) \
.order_by(Major.id)
last = None
minors = []
majors = []
for major in query:
minor = { 'id': major.minor.id, 'name': major.minor.
|
name }
if last != None and major.id != last.id:
majors.append({'id': last.id, 'income': last.income,
'name': last.name, 'minors': minors})
minors = [minor]
else:
minors.append(minor)
last = major
if last != None:
majors.append({'id': last.id, 'income': last.income,
'name': last.name, 'minors': minors})
return render_template('major.html', majors=majors)
@app.route('/major/add', methods=['GET', 'POST'])
def major_add():
if request.method == 'POST':
if request.form.get('major_id'):
major = get_object_or_404(Major, Major.id == request.form['major_id'])
minors = Minor.listWithStats(request.form['major_id'])
major.name = request.form['name']
major.income = bool(request.form.get('income'))
major.save()
flash('Category #%d updated successfully.' % major.id, 'success')
else:
major = Major.create(name=request.form['name'],
income=bool(request.form.get('income')))
minors = []
for minor_name in string.split(request.form['minors'], ','):
if len(minor_name) > 0:
minor = Minor.create(name=string.strip(minor_name), major=major)
minors.append(minor)
flash('A category created successfully.', 'success')
return render_template('major.html', major=major, minors=minors)
return render_template('major.html')
@app.route('/major/<int:id>', methods=['GET', 'POST'])
def major_detail(id):
major = get_object_or_404(Major, Major.id == id)
minors = Minor.listWithStats(id)
num_items = 0
for minor in minors:
num_items += minor.count
return render_template('major.html',
major=major, minors=minors, num_items=num_items)
@app.route('/major/delete/<int:id>', methods=['GET', 'POST'])
def major_delete(id):
major = get_object_or_404(Major, Major.id == id)
major.delete_instance()
minors = Minor.delete().where(Minor.major == id).execute()
flash('Category #%d is deleted.' % id, 'success')
return jsonify(success=True)
@app.route('/_minor/add', methods=['POST'])
def minor_add():
try:
major_id = request.form['major_id']
major = get_object_or_404(Major, Major.id == major_id)
minor = Minor.create(name=request.form['name'], major=major)
except:
flash('Category #%d not found.' % major_id, 'danger')
return jsonify(success=False)
flash('A new subcategory is added.', 'success')
return jsonify(success=True)
@app.route('/_minor/delete/<int:id>', methods=['GET'])
def minor_delete(id):
try:
minor = get_object_or_404(Minor, Minor.id == id)
minor.delete_instance()
except:
return jsonify(success=False)
return jsonify(success=True)
@app.route('/minor/<int:id>', methods=['GET'])
def minor_detail(id):
minor = get_object_or_404(Minor, Minor.id == id)
majors = Major.select().order_by(Major.id)
return render_template('minor.html', minor=minor, majors=majors)
@app.route('/_minor/edit/<int:id>', methods=['POST'])
def minor_edit(id):
try:
minor = Minor.get(Minor.id == id)
minor.name = request.form['name']
minor.major = request.form['major_id']
minor.save()
except:
return jsonify(success=False)
return jsonify(success=True)
|
JaapJoris/autodidact
|
autodidact/migrations/0003_auto_20170116_1142.py
|
Python
|
agpl-3.0
| 2,079
| 0.00337
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('autodidact', '0002_auto_20161004_1251'),
]
operations = [
migrations.CreateModel(
name='RightAnswer',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('value', models.CharField(help_text='This value can either be a case-insensitive string or a numeric value. For numeric values you can use the <a target="_blank" href="https://docs.moodle.org/23/en/GIFT_format">GIFT notation</a> of "answer:tolerance" or "low..high".', max_length=255)),
('step', models.ForeignKey(related_name='right_answers', to='autodidact.Step')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WrongAnswer',
fields=[
('id', models.AutoField(serialize=False, pri
|
mary_key=True, auto_created=True, verbose_name='ID')),
('value', models.CharField(help_text='Supplying one or more wrong answers will turn this into a multi
|
ple choice question.', max_length=255)),
('step', models.ForeignKey(related_name='wrong_answers', to='autodidact.Step')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='course',
name='slug',
field=models.SlugField(unique=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='answer_required',
field=models.BooleanField(default=False, help_text='If enabled, this step will show students an input field where they can enter their answer. Add one or more right answers below to have students’ answers checked for correctness.'),
preserve_default=True,
),
]
|
rasbt/protein-science
|
scripts-and-tools/grab_atom_radius/grab_atom_radius.py
|
Python
|
gpl-3.0
| 2,978
| 0.014775
|
# Sebastian Raschka 2014
# Script that extracts atoms within a radius from a PDB file
def grab_radius(file, radius, coordinates, include='ATOM,HETATM'):
"""
Grabs those atoms that are within a specified
radius of a provided 3d-coordinate.
Keyword arguments:
file: path to a PDB file
radius: radius in angstrom (float or integer)
coordinates: a list of x, y, z coordinates , e.g., [1.0, 2.4, 4.0]
include: coordinate lines to include (default: "ATOM,HETATM")
Returns:
A list that contains the pdb contents that are within the specified
radius.
"""
include = tuple(include.split(','))
with open(file, 'r') as pdb_file:
pdb_cont = [row.strip() for row in pdb_file.read().split('\n') if row.strip()]
in_radius = []
for line in pdb_cont:
if line.startswith(include):
xyz_coords =
|
[float(line[30:38]),\
float(line[38:46]),\
float(line[46:54])]
distance = (sum([(coordinates[i]-xyz_coords[i])**2 for i in range(3)]))**0.5
if distance <= radius
|
:
in_radius.append(line)
return in_radius
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Extracts atoms within a radius from a PDB file.\n'\
'By default, all atoms in the PDB file are included in the calculation.',
formatter_class=argparse.RawTextHelpFormatter
)
# positional arguments
parser.add_argument('PDBfile')
parser.add_argument('-r', '--radius',
type=float,
metavar='int/float',
default='10.0',
help='radius in Angstrom for atoms to extract (default 10.0)')
parser.add_argument('-c', '--coordinates',
type=str,
metavar='X,Y,Z',
default='0,0,0',
help='center for extracting atoms (default "0,0,0")')
# optional arguments
parser.add_argument('-i', '--include', type=str,
default='ATOM,HETATM',
metavar='coordinate-ID',
help='Coordinate lines to include (default: "ATOM,HETATM")')
parser.add_argument('-o', '--out', metavar='out.fasta', type=str,
help='writes atoms to an output file instead of printing it to the screen')
parser.add_argument('-v', '--version', action='version', version='grab_atom_radius v. 1.0')
args = parser.parse_args()
coords = args.coordinates.split(',')
coords = [float(i) for i in coords]
residues = grab_radius(args.PDBfile, args.radius, coords)
if args.out:
with open(args.out, 'w') as out:
for line in residues:
out.write(line + '\n')
else:
for line in residues:
print(line)
|
bleepbloop/Pivy
|
scons/scons-local-1.2.0.d20090919/SCons/Scanner/Fortran.py
|
Python
|
isc
| 14,448
| 0.002422
|
"""SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/Fortran.py 4369 2009/09/19 15:58:29 scons"
import re
import string
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
apply(SCons.Scanner.Current.__init__, (self,) + args, kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
d = {}
for m in defmodules:
d[m] = 1
modules = filter(lambda m, d=d: not d.has_key(m), modules)
#modules = self.undefinedModules(modules, defmodules)
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = map(lambda x, s=suffix: string.lower(x) + s, modules)
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
nodes.sort()
nodes = map(lambda pair: pair[1], nod
|
es)
return nodes
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle t
|
hem if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that mus
|
nwokeo/supysonic
|
venv/lib/python2.7/site-packages/tests/wsgi.py
|
Python
|
agpl-3.0
| 4,317
| 0.001853
|
import Queue
from unittest import TestCase
import threading
import time
from storm.wsgi import make_app
class TestMakeApp(TestCase):
def stub_app(self, environ, start_response):
if getattr(self, 'in_request', None):
self.in_request()
getattr(self, 'calls', []).append('stub_app')
start_response('200 OK', [])
yield ''
if getattr(self, 'in_generator', None):
self.in_generator()
def stub_start_response(self, status, headers):
pass
def test_find_timeline_outside_request(self):
app, find_timeline = make_app(self.stub_app)
# outside a request, find_timeline returns nothing:
self.assertEqual(None, find_timeline())
def test_find_timeline_in_request_not_set(self):
# In a request, with no timeline object in the environ, find_timeline
# returns None:
app, find_timeline = make_app(self.stub_app)
self.in_request = lambda:self.assertEqual(None, find_timeline())
self.calls = []
list(app({}, self.stub_start_response))
# And we definitely got into the call:
self.assertEqual(['stub_app'], self.calls)
def test_find_timeline_set_in_environ(self):
# If a timeline object is known, find_timeline finds it:
app, find_timeline = make_app(self.stub_app)
timeline = FakeTimeline()
self.in_request = lambda:self.assertEqual(timeline, find_timeline())
list(app({'timeline.timeline': timeline}, self.stub_start_response))
def test_find_timeline_set_in_environ_duri
|
ng_generator(self):
# If a timeline object is known, find_timeline finds it:
app, find_timeline = make_app(self.stub_app)
timeline = FakeTimeline()
self.in_generator = lambda:self.assertEqual(timeline, find_timeline())
list(app({'timeline.timeline': timeline}, self.stub
|
_start_response))
def test_timeline_is_replaced_in_subsequent_request(self):
app, find_timeline = make_app(self.stub_app)
timeline = FakeTimeline()
self.in_request = lambda:self.assertEqual(timeline, find_timeline())
list(app({'timeline.timeline': timeline}, self.stub_start_response))
# Having left the request, the timeline is left behind...
self.assertEqual(timeline, find_timeline())
# ... but only until the next request comes through.
timeline2 = FakeTimeline()
self.in_request = lambda:self.assertEqual(timeline2, find_timeline())
list(app({'timeline.timeline': timeline2}, self.stub_start_response))
def test_lookups_are_threaded(self):
# with two threads in a request at once, each only sees their own
# timeline.
app, find_timeline = make_app(self.stub_app)
errors = Queue.Queue()
sync = threading.Condition()
waiting = []
def check_timeline():
timeline = FakeTimeline()
def start_response(status, headers):
# Block on the condition, so all test threads are in
# start_response when the test resumes.
sync.acquire()
waiting.append('x')
sync.wait()
sync.release()
found_timeline = find_timeline()
if found_timeline != timeline:
errors.put((found_timeline, timeline))
list(app({'timeline.timeline': timeline}, start_response))
t1 = threading.Thread(target=check_timeline)
t2 = threading.Thread(target=check_timeline)
t1.start()
try:
t2.start()
try:
while True:
sync.acquire()
if len(waiting) == 2:
break
sync.release()
time.sleep(0)
sync.notify()
sync.notify()
sync.release()
finally:
t2.join()
finally:
t1.join()
if errors.qsize():
found_timeline, timeline = errors.get(False)
self.assertEqual(timeline, found_timeline)
class FakeTimeline(object):
"""A fake Timeline.
We need this because we can't use plain object instances as they can't be
weakreferenced.
"""
|
enableiot/iotanalytics-rule-engine
|
pydeps/db/dataDao.py
|
Python
|
apache-2.0
| 2,301
| 0.003911
|
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DataDao(object):
def __init__(self, spark_context, config):
self.spark_context = spark_context
self.zookeepers_uri = config.ZOOKEEPERS
self.device_measurement_table_name = config.DEVICE_MEASUREMENT_TABLE_NAME
def get_data_from_hbase(self, account_id, component_id, start_ts, stop_ts):
print("get_data_for_device", account_id, component_id, start_ts, stop_ts)
start = account_id + '\0' + component_id + '\0' + start_ts
stop = account_id + '\0' + component_id + '\0' + stop_ts
print("START: ", start.replace('\0', '\\0'))
print("STOP: ", stop.replace('\0', '\\0'))
# see https://hbase.apache.org/0.94/xref/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html
conf = {
"hbase.zookeeper.quorum": self.zookeepers_uri,
"hbase.mapreduce.inputtable": self.device_measurement_table_name,
"hbase.mapreduce.scan.row.start": str(start),
"hbase.mapreduce.scan.row.stop": str(stop),
"hbase.mapreduce.scan.columns": "data:mea
|
sure_val"
}
key_conv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
value_conv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"
rdd = self.spark_context.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat",
"org.apache.hadoop.hbase.io.ImmutableBytesWritable",
|
"org.apache.hadoop.hbase.client.Result",
conf=conf, keyConverter=key_conv, valueConverter=value_conv)
return rdd
|
materials-commons/materialscommons.org
|
backend/tests/python_api_mulltiuser_check/DB.py
|
Python
|
mit
| 1,380
| 0.001449
|
from os import environ
import logging
import rethinkdb as r
from rethinkdb.errors import RqlDriverError, ReqlError
_MCDB = "materialscommons"
_MCDB_HOST = environ.get('MCDB_HOST') or 'localhost'
probe = environ.get('MCDB_PORT')
if not probe:
print("Unable to run without a setting for MCDB_PORT")
exit(-1)
_MCDB_PORT = int(environ.get('MCDB_PORT'))
class DbConnection:
def __init__(self):
self.log = logging.getLogger(__name__ + "." + self.__class__.__name__)
|
self.conn = None
def set_connection(self):
try:
if not self.conn:
self.conn = r.connect(host=_MCDB_HOST, port=_MCDB_PORT, db=_MCDB)
|
except RqlDriverError as excp:
self.conn = None
message = "Database connection could not be established: host, port, db = " + \
_MCDB_HOST + ", " + str(_MCDB_PORT) + ", " + _MCDB
self.log.error(message)
raise excp
def connection(self):
if not self.conn:
self.set_connection()
ret_value = None
if self.conn:
ret_value = self.conn
return ret_value
@staticmethod
def interface():
return r
def close_connection(self):
try:
if self.conn:
self.conn.close()
except ReqlError:
pass
self.conn = None
|
tvtsoft/odoo8
|
addons/sale_stock/report/sale_report.py
|
Python
|
agpl-3.0
| 1,213
| 0.00742
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp import tools
class sale_report(osv.osv):
_inherit = "sale.report"
_columns = {
'shipped': fields.boolean('Shipped', readonly=True),
'shipped_qty_1': fields.integer('# of Shipped Lines', readonly=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse',readonly=True
|
),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Sale to Invoice'),
('progress', 'Sale Order'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
|
}
def _select(self):
return super(sale_report, self)._select() + ", s.warehouse_id as warehouse_id, s.shipped, s.shipped::integer as shipped_qty_1"
def _group_by(self):
return super(sale_report, self)._group_by() + ", s.warehouse_id, s.shipped"
|
rero/reroils-app
|
tests/api/test_permissions_patron.py
|
Python
|
gpl-2.0
| 2,746
| 0
|
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Th
|
is program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTA
|
BILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests REST API patrons."""
from copy import deepcopy
from flask import url_for
from invenio_accounts.testutils import login_user_via_session
from utils import postdata
def test_patron_permissions(
client, json_header, system_librarian_martigny,
patron_martigny,
librarian_fully):
"""Test patron permissions."""
# Login as patron
login_user_via_session(client, patron_martigny.user)
record = {
"$schema": "https://bib.rero.ch/schemas/patrons/patron-v0.0.1.json",
"first_name": "first_name",
"last_name": "Last_name",
"street": "Avenue Leopold-Robert, 132",
"postal_code": "1920",
"city": "Martigny",
"birth_date": "1967-06-07",
"patron": {
"expiration_date": "2023-10-07",
"type": {"$ref": "https://bib.rero.ch/api/patron_types/ptty1"},
"communication_channel": "email",
"communication_language": "ita"
},
"home_phone": "+41324993111"
}
# can not retrieve any type of users.
list_url = url_for('invenio_records_rest.ptrn_list')
res = client.get(list_url)
assert res.status_code == 403
# can not manage any types of patron roles
role_url = url_for('api_patrons.get_roles_management_permissions')
res = client.get(role_url)
assert res.status_code == 403
# can not create any type of users.
system_librarian = deepcopy(record)
librarian = deepcopy(record)
patron = deepcopy(record)
counter = 1
for record in [
{'data': patron, 'role': 'patron'},
{'data': librarian, 'role': 'librarian'},
{'data': system_librarian, 'role': 'system_librarian'}
]:
counter += 1
data = record['data']
data['roles'] = [record['role']]
data['patron']['barcode'] = ['barcode' + str(counter)]
data['email'] = str(counter) + '@domain.com'
res, _ = postdata(
client,
'invenio_records_rest.ptrn_list',
data
)
assert res.status_code == 403
|
t-brandt/acorns-adi
|
photometry/__init__.py
|
Python
|
bsd-2-clause
| 32
| 0
|
from
|
calc_phot import calc_ph
|
ot
|
botify-labs/python-simple-workflow
|
swf/querysets/workflow.py
|
Python
|
mit
| 25,485
| 0.001295
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from boto.swf.exceptions import SWFResponseError
from swf.constants import REGISTERED
from swf.querysets.base import BaseQuerySet
from swf.models import Domain
from swf.models.workflow import (WorkflowType, WorkflowExecution,
CHILD_POLICIES)
from swf.utils import datetime_timestamp, past_day, get_subkey
from swf.exceptions import (ResponseError, DoesNotExistError,
InvalidKeywordArgumentError, AlreadyExistsError)
class BaseWorkflowQuerySet(BaseQuerySet):
"""Base domain bounded workflow queryset objects
Amazon workflows types and executions are always bounded
to a specific domain: so any queryset which means to deal
with workflows has to be built against a `domain`
:param domain: domain the inheriting queryset belongs to
:type domain: swf.model.domain.Domain
"""
# Amazon response section corresponding
# to current queryset informations
_infos = 'typeInfo'
_infos_plural = 'typeInfos'
def __init__(self, domain, *args, **kwargs):
super(BaseWorkflowQuerySet, self).__init__(*args, **kwargs)
Domain.check(domain)
self.domain = domain
@property
def domain(self):
if not hasattr(self, '_domain'):
self._domain = None
return self._domain
@domain.setter
def domain(self, value):
# Avoiding circular import
from swf.models.domain import Domain
if not isinstance(value, Domain):
err = "domain property has to be of"\
"swf.model.domain.Domain type, not %r"\
% type(value)
raise TypeError(err)
self._domain = value
def _list(self, *args, **kwargs):
raise NotImplementedError
def _list_items(self, *args, **kwargs):
response = {'nextPageToken': None}
while 'nextPageToken' in response:
response = self._list(
*args,
next_page_token=response['nextPageToken'],
**kwargs
)
for item in response[self._infos_plural]:
yield item
class WorkflowTypeQuerySet(BaseWorkflowQuerySet):
# Explicit is better than implicit, keep zen
_infos = 'typeInfo'
_infos_plural = 'typeInfos'
def to_WorkflowType(self, domain, workflow_info, **kwargs):
# Not using get_subkey in order for it to explictly
# raise when workflowType name doesn't exist for example
return WorkflowType(
domain,
workflow_info['workflowType']['name'],
workflow_info['workflowType']['version'],
status=workflow_info['status'],
**kwargs
)
def get(self, name, version, *args, **kwargs):
"""Fetches the Workflow Type with `name` and `version`
:param name: name of the workflow type
:type name: String
:param version: workflow type version
:type version: String
:returns: matched workflow type instance
:rtype: swf.core.model.workflow.WorkflowType
A typical Amazon response looks like:
.. code-block:: json
{
"configuration": {
"defaultExecutionStartToCloseTimeout": "300",
"defaultTaskStartToCloseTimeout": "300",
"defaultTaskList": {
"name": "None"
},
"defaultChildPolicy": "TERMINATE"
},
"typeInfo": {
"status": "REGISTERED",
"creationDate": 1364492094.968,
"workflowType": {
"version": "1",
"name": "testW"
}
}
}
"""
try:
response = self.connection.describe_workflow_type(self.domain.name, name, version)
except SWFResponseError as e:
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(e.body['message'])
raise ResponseError(e.body['message'])
wt_info = response[self._infos]
wt_config = response['configuration']
task_list = kwargs.get('task_list')
if task_list is None:
task_list = get_subkey(wt_config, ['defaultTaskList', 'name'])
child_policy = kwargs.get('child_policy')
if child_policy is None:
child_policy = wt_config.get('defaultChildPolicy')
decision_task_timeout = kwargs.get('decision_task_timeout')
if decision_task_timeout is None:
decision_task_timeout = wt_config.get(
'defaultTaskStartToCloseTimeout')
execution_timeout = kwargs.get('execution_timeout')
if execution_timeout is None:
execution_timeout = wt_config.get(
'defaultExecutionStartToCloseTimeout')
decision_tasks_timeout = kwargs.get('decision_tasks_timeout')
if decision_tasks_timeout is None:
decision_tasks_timeout = wt_config.get(
'defaultTaskStartToCloseTimeout')
return self.to_WorkflowType(
self.domain,
wt_info,
task_list=task_list,
child_policy=child_policy,
execution_timeout=execution_timeout,
decision_tasks_timeout=decision_tasks_timeout,
)
def get_or_create(self, name, version,
status=REGISTERED,
creation_date=0.0,
deprecation_date=0.0,
task_list=None,
child_policy=CHILD_POLICIES.TERMINATE,
execution_timeout='300',
decision_tasks_timeout='300',
description=None,
*args, **kwargs):
"""Fetches, or creates the ActivityType with ``name`` and ``version``
When fetching trying to fetch a matching workflow type, only
name and version parameters are taken in account.
Anyway, If you'd wanna make sure that in case the workflow type
has to be created it is made with specific values, just provide it.
:param name: name of the workflow type
:type name: String
:param version: workflow type version
:type version: String
:param status: workflow type status
:type status: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED}
:param creation_date: creation date of the current WorkflowType
:type creation_date: float (timestamp)
:param deprecation_date: deprecatio
|
n date of Workflo
|
wType
:type deprecation_date: float (timestamp)
:param task_list: task list to use for scheduling decision tasks for executions
of this workflow type
:type task_list: String
:param child_policy: policy to use for the child workflow executions
when a workflow execution of this type is terminated
:type child_policy: CHILD_POLICIES.{TERMINATE |
REQUEST_CANCEL |
ABANDON}
:param execution_timeout: maximum duration for executions of this workflow type
:type execution_timeout: String
:param decision_tasks_timeout: maximum duration of decision tasks for this workflow type
:type decision_tasks_timeout: String
:param description: Textual description of the workflow type
:type description: String
:returns: Fetched or created WorkflowType model object
:rtype: WorkflowType
"""
try:
return self.get(name,
version,
task_list=task_list,
child_policy=child_policy,
execution_timeout=execution_timeout,
|
GabrielBrascher/cloudstack
|
systemvm/debian/opt/cloud/bin/cs/__init__.py
|
Python
|
apache-2.0
| 1,000
| 0
|
# Licensed to the Apache Software Founda
|
tion (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copy
|
right ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from cs.CsConfig import CsConfig
config = CsConfig()
logging.basicConfig(filename=config.get_logger(),
level=config.get_level(),
format=config.get_format())
|
williamalu/mimo_usrp
|
scripts/pll.py
|
Python
|
mit
| 3,454
| 0.001448
|
#!/usr/bin/env python
""" Class that reads in data from decoder.py and uses a PLL to correct for
phase offset. """
import numpy as np
import matplotlib.pyplot as plt
import helper
#from decoder import Decoder
j = (0 + 1j)
class PLL(object):
def __init__(self, data, k_p, k_i, k_d):
""" Initialize data and PID coefficients. """
self.data = data
self.k_p = k_p
self.k_i = k_i
self.k_d = k_d
self.data_fixed = None
def correct_phase_offset(self):
""" Use PID control to estimate phase offset in self.data, correct for
it, and save the corrected data as self.data_fixed. """
phase = 0
prev_err = 0
err = 0
err_sum = 0
self.data_fixed = np.array([])
self.abs_val_list = np.array([])
self.err_list = np.array([])
self.phase_list = np.array([])
for x in self.data:
# Multiply input value by complex exponential of specified phase
y = x * np.exp(-phase * j)
self.data_fixed = np.append(self.data_fixed, y)
|
# Estimate error in phase for BPSK
err = -y.real * y.imag #if (np.absolute(x) > .004) else
|
0.0
# Estimate error in phase for QPSK
# A = y.real * np.sign(y.imag)
# B = y.imag * np.sign(y.real)
# err = (-1/2) * (A - B)
# Calculate integral of error
err_sum += err
# Calculate derivative of error
err_diff = err - prev_err
# Use PID control to find the phase offset for the next step
phase += self.k_p * err + self.k_i * err_sum + self.k_d * err_diff
phase = helper.wrap_radian(phase)
# Define error in previous step
prev_err = err
# Track data for plottting
self.err_list = np.append(self.err_list, err)
self.abs_val_list = np.append(self.abs_val_list, np.absolute(x))
self.phase_list = np.append(self.phase_list, phase)
def plot_data(self):
""" Visualize! """
# Original data and corrected data
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(self.data.real, linewidth=2.0, label='real')
plt.plot(self.data.imag, 'r-', linewidth=2.0, label='imag')
# plt.plot(self.err_list, label='err')
plt.title('Data')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(self.data_fixed.real, linewidth=2.0, label='real')
plt.plot(self.data_fixed.imag, 'r-', linewidth=2.0, label='imag')
plt.title('Data with phase correction')
plt.legend()
# Phase and error
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(self.phase_list)
plt.title('Phase Offset Over Time')
plt.subplot(2, 1, 2)
plt.plot(self.err_list)
plt.title('Error Over Time')
# Absolute value
plt.figure()
plt.plot(self.abs_val_list)
plt.title('Absolute Value Over Time')
plt.show()
if __name__ == "__main__":
'''
data_path = '../data/'
input_filename = 'received_data_1.bin'
decoder = Decoder(data_path, input_filename)
decoder.read_file()
decoder.find_offsets_bpsk()
decoder.fix_offsets()
'''
k_p = 0.7
k_i = 0.1
k_d = 0.0
pll = PLL(decoder.data_fixed, k_p, k_i, k_d)
pll.correct_phase_offset()
pll.plot_data()
|
colaftc/webtool
|
top/api/rest/SubuserEmployeeUpdateRequest.py
|
Python
|
mit
| 702
| 0.032764
|
'''
Created by a
|
uto_sdk on 2013.01.22
'''
from top.api.base import RestApi
class SubuserEmployeeUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.depa
|
rtment_id = None
self.duty_id = None
self.employee_name = None
self.employee_nickname = None
self.employee_num = None
self.employee_turnover = None
self.entry_date = None
self.id_card_num = None
self.leader_id = None
self.office_phone = None
self.personal_email = None
self.personal_mobile = None
self.sex = None
self.sub_id = None
self.work_location = None
def getapiname(self):
return 'taobao.subuser.employee.update'
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/town/DDTownLoader.py
|
Python
|
apache-2.0
| 951
| 0.002103
|
import TownLoader
import DDStreet
from toontown.suit import Suit
class DDTownLoader(TownLoader.TownLoader):
def __init__(self, hood, parentFSM, doneEvent):
TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent)
self.streetClass = DDStreet.DDStreet
self.musicFile = 'phase_6/audio/bgm/DD_SZ.ogg'
self.activityMusicFile = 'phase_6/audio/bgm/DD_SZ_activity.ogg'
self.townStorageDNAFile = 'phase_6/dna/storage_DD_town.pdna'
def load(self, zoneId):
TownLoader.TownLoader.load(self, zoneId)
Suit.loadSuits(2)
dnaFile = 'phase_6/dna/donal
|
ds_dock_' + str(self.canonicalBranchZone) + '.pdna'
self.createHood(dnaFile)
def unload(self):
Suit.unloadSuits(2)
TownLoader.TownLoader.unload(self)
def enter(self, requestStatus):
TownLoader.TownLoader.en
|
ter(self, requestStatus)
def exit(self):
TownLoader.TownLoader.exit(self)
|
gazpachoking/Flexget
|
flexget/plugins/input/inputs.py
|
Python
|
mit
| 2,537
| 0.001971
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.eve
|
nt import event
log = logging.getLogger('inputs'
|
)
class PluginInputs(object):
"""
Allows the same input plugin to be configured multiple times in a task.
Example::
inputs:
- rss: http://feeda.com
- rss: http://feedb.com
"""
schema = {
'type': 'array',
'items': {
'allOf': [
{'$ref': '/schema/plugins?phase=input'},
{
'maxProperties': 1,
'error_maxProperties': 'Plugin options within inputs plugin must be indented 2 more spaces than '
'the first letter of the plugin name.',
'minProperties': 1,
},
]
},
}
def on_task_input(self, task, config):
entry_titles = set()
entry_urls = set()
for item in config:
for input_name, input_config in item.items():
input = plugin.get_plugin_by_name(input_name)
method = input.phase_handlers['input']
try:
result = method(task, input_config)
except plugin.PluginError as e:
log.warning('Error during input plugin %s: %s' % (input_name, e))
continue
if not result:
msg = 'Input %s did not return anything' % input_name
if getattr(task, 'no_entries_ok', False):
log.verbose(msg)
else:
log.warning(msg)
continue
for entry in result:
if entry['title'] in entry_titles:
log.debug('Title `%s` already in entry list, skipping.' % entry['title'])
continue
urls = ([entry['url']] if entry.get('url') else []) + entry.get('urls', [])
if any(url in entry_urls for url in urls):
log.debug('URL for `%s` already in entry list, skipping.' % entry['title'])
continue
yield entry
entry_titles.add(entry['title'])
entry_urls.update(urls)
@event('plugin.register')
def register_plugin():
plugin.register(PluginInputs, 'inputs', api_ver=2)
|
MyRookie/SentimentAnalyse
|
src/Algorithm/ScoreCaculating.py
|
Python
|
mit
| 1,593
| 0.041431
|
import math
import sys
sys.path.append('..')
import Analyse.AFX as AFX
class State:
def __init__(self):
self.SenShifterState = True
self.MoodStrength = 1.0
self.positive = 0.0
self.negative = 0.0
def Process(self, score):
if self.SenShifterState is True:
self.positive += score
else:
self.negative += score
def Clear(self):
self.SenShifterState = True
self.MoodStrength = 1.0
self.positive = 0.0
self.negative = 0.0
def ChangeMood(self,mood):
if mood.startswith('I'):
self.MoodStrength *= 2
if mood.startswit
|
h('D'):
self.MoodStrength /= 2
def returnScore(self):
score = self.positive - self.negative
score *= self.MoodStrength
return score
#calulating the score pf specific sentence
def CaculateASentence(Sentence):
S = State()
for word in Sentence:
tag = AFX.GetWord(word,'Tag')
#if the word has no orientation or it is a boring word, just ignore it
if tag == 0.0 or tag is "Bor":
continue
if tag is "Con":
S.Clear()
elif tag is "Neg":
#if there is a negative tagged here
|
, change the state of Sentiment Shifter
S.SenShifterState = -S.SenShifterState
elif tag is "Inc" or tag is "Dow":
S.ChangeMood(tag)
else:
S.Process(tag)
return S.returnScore()
#caculating the score of the Document with specific rules
def Run(Data):
ScoreList = []
counter = 0
for Sen in Data:
if Sen != []:
if AFX.GetWord(Sen[0],'Tag') is "Con":
word = AFX.GetWord(Sen[0],'Word')
print Sen
print CaculateASentence(Sen)
++counter
pass
#Most people don't like rainy, even if I like the weather quite much.
|
land-pack/pyroom
|
pyroom/manage.py
|
Python
|
gpl-3.0
| 239
| 0
|
from pyroom.app import P
|
yRoom
from options import options
from pyroom.urls import settings
from pyroom.urls import handlers
if __name__ == '__main__':
|
pyroom = PyRoom(options=options, handlers=handlers, **settings)
pyroom.start()
|
google/jws
|
jws/jwt.py
|
Python
|
apache-2.0
| 9,120
| 0.005482
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JSON Web Token (JWT) in compact serialization format.
Jwt uses jws underneath. The difference between jws and jwt is that
jws only verifies the signature while jwt verifies both the signature and
claims as defined at https://tools.ietf.org/html/rfc7519#section-4.1. In
particular, in addition to signature verification, jwt does the following:
1. Verify expected issuer, subjects and list of audiences. However, the
verification is **optional** because one, jwt does not know what your expected
issuer, subject and list of audiences are and second, RFCs do not mandate
these claims. As a consequence, when you construct the verifier:
+ If you do not specify these fields, jwt does *not** know how to verify
them, and hence does **not** verify them.
+ If you specify these fields, the verification is automatic and mandatory.
2. When 'exp', 'nbf' are in the claims, jwt automatically verifies them.
3. If you use your own claims that aren't defined at
https://tools.ietf.org/html/rfc7519#section-4.1, jwt does not know how to
verify them. You have to verify them yourselves after signature verification
and RFC claims verification.
"""
__author__ = "quannguyen@google.com (Quan Nguyen)"
import json
import jws
from . import jwsutil
from .exceptions import SecurityException
import six
import datetime
import calendar
class JwtPublicKeyVerify(object):
"""JWT Public Key Verifier which verifies both the signature and claims."""
def __init__(self,
jwk_set,
issuer=None,
subject=None,
audiences=None,
clock_skew_tolerance=0):
"""Constructor for JwtPublicKeyVerify.
Args:
jwk_set: a JwkSet.
issuer: string, the issuer claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.1.
subject:
|
string, the subject claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.2.
audiences: list of string, the audiences claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.3.
clock_skew_tolerance: integer, the clock skew that the verifier tolerates.
|
Raises:
UnsupportedAlgorithm: if the algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not Rsa or
Ecdsa key.
"""
self.verifier = jws.JwsPublicKeyVerify(jwk_set)
self.issuer = issuer
self.subject = subject
self.audiences = audiences
self.clock_skew_tolerance = clock_skew_tolerance
def verify(self, token):
"""Verifies whether the token is signed with the corresponding private key and whether the payload's claims are valid.
Args:
token: bytes, the JWS compact serialization token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Returns:
dict, the deserialized JSON payload in the token.
Raises:
SecurityException: when the token is invalid
"""
try:
payload = json.loads(self.verifier.verify(token).decode("utf-8"))
if _verify_claims(payload, self.issuer, self.subject, self.audiences,
self.clock_skew_tolerance):
return payload
else:
raise SecurityException("Invalid token")
except SecurityException as e:
raise e
except:
raise SecurityException("Invalid token")
class JwtPublicKeySign(object):
"""Jwt public key signer that suppports both Ecdsa and Rsa signature schemes.
"""
def __init__(self, jwk_set):
"""Constructor for JwtPublicKeySign.
Args:
jwk_set: a JwkSet.
Raises:
UnsupportedAlgorithm: if the algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not Rsa or
Ecdsa key.
"""
self.signer = jws.JwsPublicKeySign(jwk_set)
def sign(self, header, payload):
"""Computes the signed jwt as defined at rfc7515#section-7.1.
Args:
header: dict, dictionary of header to convert to JSON and sign.
payload: dict, dictionary of the payload to conert to JSON and sign.
Returns:
bytes, the signed token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Raises:
SecurityException: if the header's algorithm or kid does not match the
key's.
"""
return self.signer.sign(header, payload)
class JwtMacVerify(object):
"""Jwt Mac Verifier that verifies both message authentication code and claims."""
def __init__(self,
jwk_set,
issuer=None,
subject=None,
audiences=None,
clock_skew_tolerance=0):
"""Constructor for JwtMacVerify.
Args:
jwk_set: a JwkSet.
issuer: string, the issuer claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.1.
subject: string, the subject claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.2.
audiences: list of string, the audiences claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.3.
clock_skew_tolerance: integer, the clock skew that the verifier tolerates.
Raises:
UnsupportedAlgorithm: if the algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not Rsa or
Ecdsa key.
"""
self.verifier = jws.JwsMacVerify(jwk_set)
self.issuer = issuer
self.subject = subject
self.audiences = audiences
self.clock_skew_tolerance = clock_skew_tolerance
def verify(self, token):
"""Verifies whether the token was authenticated with mac and whether the payload's claims are valid.
Args:
token: bytes, the JWS compact serialization token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Returns:
dict, the deserialized JSON payload in the token.
Raises:
SecurityException: when the token is not valid.
"""
try:
payload = json.loads(self.verifier.verify(token).decode("utf-8"))
if _verify_claims(payload, self.issuer, self.subject, self.audiences,
self.clock_skew_tolerance):
return payload
else:
raise SecurityException("Invalid token")
except SecurityException as e:
raise e
except:
raise SecurityException("Invalid token")
class JwtMacAuthenticator(object):
"""Jws Mac Authenticator that authenticates jwt token."""
def __init__(self, jwk_set):
"""Constructor for JwtMacAuthenticator.
Args:
jwk_set: a JwkSet.
Raises:
UnsupportedAlgorithm: if the key.algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not symmetric
Hmac key.
"""
self.authenticator = jws.JwsMacAuthenticator(jwk_set)
def authenticate(self, header, payload):
"""Computes the authenticated jwt as defined at rfc7515#section-7.1.
Args:
header: dict, dictionary of header to convert to JSON and sign.
payload: dict, dictionary of payload to convert to JSON and sign.
Returns:
bytes, the authenticated token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Raises:
SecurityException: if the header's algorithm or kid does not match the
key's.
"""
return self.authenticator.authenticate(header, payload)
def _get_unix_timestamp():
return calendar.timegm(datetime.datetime.utcnow().utctimetuple())
def _verify_claims(payload, issuer, subject, audiences, clock_skew_tolerance):
if issuer is not None:
if payload.get("iss", None) is None:
return False
if not isinstanc
|
THM-TheoreM/Algorithm
|
tool/ImageProcessing/perspective/antialiased_do.py
|
Python
|
gpl-3.0
| 947
| 0.024287
|
from PIL import Image
import numpy
from antialiased import antialiased
im=Image.open('C:/Users/linzz/Desktop/pic/image_processing/ImageProcessing/perspective/find/line.jpg')
im=numpy.array(im)
node_site_u2 = [165, 145, 557, 83, 564, 333, 137, 359]
node_site_u3 = [124, 150, 524, 113, 540, 246, 106, 272]
node_site_u4 = [128, 154, 523, 181, 523, 380, 128, 406]
node_site_u5 = [118, 38, 351, 46, 355, 183, 118, 187]
node_site_u6 = [171, 72,
|
423, 78, 423, 208, 171, 205]
node_site_12 = [634, 617, 1900, 431, 1896, 1161, 491, 1269]
x0,y0,x1,y1,x2,y2,x3,y3=node_site_u2
x0=122
y0=266
x1=429
y1=161
im=antialiased(im,x0,y0,x1,y1)
'''
im=antialiased(im,x0,y0,x3,y3)
im=antialiased(im,x3,y3,x0,y0)
im=antialiased(im,x1,y1,x2,y2)
im=antialiased(im,x2,y2,x1,y1)
im=antialiased(im,x2,y2,x3,y3)
im=antialiased(im,x3,y3,x2,y2)
'''
im=Image.fromarray(im)
im.save('C:/Users/linzz/Desktop/pic/image_processing/ImageProcessing/perspective/find/a
|
nti_1.jpg')
|
jbassen/edx-platform
|
lms/djangoapps/dashboard/sysadmin.py
|
Python
|
agpl-3.0
| 38,209
| 0.001282
|
"""
This module creates a sysadmin dashboard for managing and viewing
courses.
"""
import csv
import json
import logging
import os
import subprocess
import time
import StringIO
from pymongo.errors import PyMongoError
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.management import call_command
from django.db import IntegrityError
from django.http import HttpResponse, Http404
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils im
|
port timezone
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.generic.base import TemplateView
from django.views.decorators.http import condition
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
import mongoengine
from path import path
import sys
from cour
|
seware.courses import get_course_by_id
import dashboard.git_import as git_import
from django_comment_client.management_utils import rename_user as rename_user_util
from dashboard.git_import import GitImportError
from dashboard.models import CourseImportLog
from external_auth.models import ExternalAuthMap
from external_auth.views import generate_password
from instructor_task.models import InstructorTask
from student.models import CourseEnrollment, UserProfile, Registration
from student.roles import CourseStaffRole, CourseInstructorRole
import track.views
from util.json_request import JsonResponse
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class SysadminDashboardView(TemplateView):
"""Base class for sysadmin dashboard views with common methods"""
template_name = 'sysadmin_dashboard.html'
def __init__(self, **kwargs):
"""
Initialize base sysadmin dashboard class with modulestore,
modulestore_type and return msg
"""
self.def_ms = modulestore()
self.is_using_mongo = True
if self.def_ms.get_modulestore_type(None) == 'xml':
self.is_using_mongo = False
self.msg = u''
self.datatable = []
super(SysadminDashboardView, self).__init__(**kwargs)
@method_decorator(ensure_csrf_cookie)
@method_decorator(login_required)
@method_decorator(cache_control(no_cache=True, no_store=True,
must_revalidate=True))
@method_decorator(condition(etag_func=None))
def dispatch(self, *args, **kwargs):
return super(SysadminDashboardView, self).dispatch(*args, **kwargs)
def get_courses(self):
""" Get an iterable list of courses."""
return self.def_ms.get_courses()
def return_csv(self, filename, header, data):
"""
Convenient function for handling the http response of a csv.
data should be iterable and is used to stream object over http
"""
csv_file = StringIO.StringIO()
writer = csv.writer(csv_file, dialect='excel', quotechar='"',
quoting=csv.QUOTE_ALL)
writer.writerow(header)
# Setup streaming of the data
def read_and_flush():
"""Read and clear buffer for optimization"""
csv_file.seek(0)
csv_data = csv_file.read()
csv_file.seek(0)
csv_file.truncate()
return csv_data
def csv_data():
"""Generator for handling potentially large CSVs"""
for row in data:
writer.writerow(row)
csv_data = read_and_flush()
yield csv_data
response = HttpResponse(csv_data(), mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(
filename)
return response
class Users(SysadminDashboardView):
"""
The status view provides Web based user management, a listing of
courses loaded, and user statistics
"""
def fix_external_auth_map_passwords(self):
"""
This corrects any passwords that have drifted from eamap to
internal django auth. Needs to be removed when fixed in external_auth
"""
msg = ''
for eamap in ExternalAuthMap.objects.all():
euser = eamap.user
epass = eamap.internal_password
if euser is None:
continue
try:
testuser = authenticate(username=euser.username, password=epass)
except (TypeError, PermissionDenied, AttributeError), err:
# Translators: This message means that the user could not be authenticated (that is, we could
# not log them in for some reason - maybe they don't have permission, or their password was wrong)
msg += _('Failed in authenticating {username}, error {error}\n').format(
username=euser,
error=err
)
continue
if testuser is None:
# Translators: This message means that the user could not be authenticated (that is, we could
# not log them in for some reason - maybe they don't have permission, or their password was wrong)
msg += _('Failed in authenticating {username}\n').format(username=euser)
# Translators: this means that the password has been corrected (sometimes the database needs to be resynchronized)
# Translate this as meaning "the password was fixed" or "the password was corrected".
msg += _('fixed password')
euser.set_password(epass)
euser.save()
continue
if not msg:
# Translators: this means everything happened successfully, yay!
msg = _('All ok!')
return msg
def create_user(self, uname, name, password=None):
""" Creates a user (both SSL and regular)"""
if not uname:
return _('Must provide username')
if not name:
return _('Must provide full name')
email_domain = getattr(settings, 'SSL_AUTH_EMAIL_DOMAIN', 'MIT.EDU')
msg = u''
if settings.FEATURES['AUTH_USE_CERTIFICATES']:
if '@' not in uname:
email = '{0}@{1}'.format(uname, email_domain)
else:
email = uname
if not email.endswith('@{0}'.format(email_domain)):
# Translators: Domain is an email domain, such as "@gmail.com"
msg += _('Email address must end in {domain}').format(domain="@{0}".format(email_domain))
return msg
mit_domain = 'ssl:MIT'
if ExternalAuthMap.objects.filter(external_id=email,
external_domain=mit_domain):
msg += _('Failed - email {email_addr} already exists as {external_id}').format(
email_addr=email,
external_id="external_id"
)
return msg
new_password = generate_password()
else:
if not password:
return _('Password must be supplied if not using certificates')
email = uname
if '@' not in email:
msg += _('email address required (not username)')
return msg
new_password = password
user = User(username=uname, email=email, is_active=True)
user.set_password(new_password)
try:
user.save()
except IntegrityError:
msg += _('Oops, failed to create user {user}, {error}').format(
user=user,
error="IntegrityError"
)
return msg
reg = Registration()
reg.reg
|
hmoco/osf.io
|
admin/base/settings/defaults.py
|
Python
|
apache-2.0
| 6,690
| 0.001495
|
"""
Django settings for the admin project.
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
from django.contrib import messages
from api.base.settings import * # noqa
# TODO ALL SETTINGS FROM API WILL BE IMPORTED AND WILL NEED TO BE OVERRRIDEN
# TODO THIS IS A STEP TOWARD INTEGRATING ADMIN & API INTO ONE PROJECT
# import local # Build own local.py (used with postgres)
# TODO - remove duplicated items, as this is now using settings from the API
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# from the OSF settings
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
# Don't allow migrations
DATABASE_ROUTERS = ['admin.base.db.router.NoMigrationRouter']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'admin'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'admin-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
# set to False: prereg uses a SPA and ajax and grab the token to use it in the requests
CSRF_COOKIE_HTTPONLY = False
ALLOWED_HOSTS = [
'.osf.io'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 5,
}
},
]
USE_L10N = False
# Email settings. Account created for testing. Password shouldn't be hardcoded
# [DEVOPS] this should be set to 'django.core.mail.backends.smtp.EmailBackend' in the > dev local.py.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Sendgrid Email Settings - Using OSF credentials.
# Add settings references to local.py
EMAIL_HOST = osf_settings.MAIL_SERVER
EMAIL_HOST_USER = osf_settings.MAIL_USERNAME
EMAIL_HOST_PASSWORD = osf_settings.MAIL_PASSWORD
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# 3rd party
'raven.contrib.django.raven_compat',
'webpack_loader',
'django_nose',
'password_reset',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.wiki',
'addons.twofactor',
# Internal apps
'admin.common_auth',
'admin.base',
'admin.pre_reg',
'admin.spam',
'admin.metrics',
'admin.nodes',
'admin.users',
'admin.desk',
'admin.meetings',
)
MIGRATION_MODULES = {
'osf': None,
'addons_osfstorage': None,
'addons_wiki': None,
'addons_twofactor': None,
}
USE_TZ = True
# local development using https
if osf_settings.SECURE_MODE and osf_settings.DEBUG_MODE:
INSTALLED_APPS += ('sslserver',)
# Custom user model (extends AbstractBaseUser)
AUTH_USER_MODEL = 'osf.OSFUser'
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'admin'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
CORS_ALLOW_CREDENTIALS = True
MIDDLEWARE_CLASSES = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
MESSAGE_TAGS = {
messages.SUCCESS: 'text-success',
messages.ERROR: 'text-danger',
messages.WARNING: 'text-warning',
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
|
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}
}]
ROOT_URLCONF = 'admin.base.urls'
WSGI_APPLICATION = 'admin.base.wsgi.application'
ADMIN_BASE = ''
STATIC_URL = '/static/'
LOGIN_URL = 'account/login/'
LOGIN_REDIRECT_URL = ADMIN_BASE
STATIC_ROOT = os.path.join(os.p
|
ath.dirname(BASE_DIR), 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
LANGUAGE_CODE = 'en-us'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'public/js/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--verbosity=2']
# Keen.io settings in local.py
KEEN_PROJECT_ID = osf_settings.KEEN['private']['project_id']
KEEN_READ_KEY = osf_settings.KEEN['private']['read_key']
KEEN_WRITE_KEY = osf_settings.KEEN['private']['write_key']
KEEN_CREDENTIALS = {
'keen_ready': False
}
if KEEN_CREDENTIALS['keen_ready']:
KEEN_CREDENTIALS.update({
'keen_project_id': KEEN_PROJECT_ID,
'keen_read_key': KEEN_READ_KEY,
'keen_write_key': KEEN_WRITE_KEY
})
ENTRY_POINTS = {'osf4m': 'osf4m', 'prereg_challenge_campaign': 'prereg',
'institution_campaign': 'institution'}
# Set in local.py
DESK_KEY = ''
DESK_KEY_SECRET = ''
|
uclouvain/OSIS-Louvain
|
base/models/enums/learning_container_year_types.py
|
Python
|
agpl-3.0
| 2,804
| 0.001784
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.utils.translation import gettext_lazy as _
from base.models.utils.utils import ChoiceEnum
COURSE = "COURSE"
INTERNSHIP = "INTERNSHIP"
DISSERTATION = "DISSERTATION"
OTHER_COLLECTIVE = "OTHER_COLLECTIVE"
OTHER_INDIVIDUAL = "OTHER_INDIVIDUAL"
MASTER_THESIS = "MASTER_THESIS"
EXTERNAL = "EXTERNAL"
LEARNING_CONTAINER_YEAR_TYPES_FOR_FACULTY = (
(OTHER_COLLECT
|
IVE, _("Other collective")),
(OTHER_INDIVIDUAL, _("Other individual")),
(MASTER_THESIS, _("Thesis")),
)
class LearningContainerYearType(ChoiceEnum):
COURSE = _("Course")
INTERNSHIP = _("Internship")
DISSERTATION = _("Dissertation")
OTHER_COLLECTIVE = _("Other collective")
OTHER_INDIVIDUAL = _(
|
"Other individual")
MASTER_THESIS = _("Thesis")
EXTERNAL = _("External")
@classmethod
def for_faculty(cls) -> tuple:
return cls.OTHER_COLLECTIVE.name, cls.OTHER_INDIVIDUAL.name, cls.MASTER_THESIS.name, cls.INTERNSHIP.name
LCY_TYPES_WITH_FIXED_ACRONYM = [COURSE, INTERNSHIP, DISSERTATION]
LEARNING_CONTAINER_YEAR_TYPES_WITHOUT_EXTERNAL = LearningContainerYearType.choices()[:-1]
CONTAINER_TYPE_WITH_DEFAULT_COMPONENT = [COURSE, MASTER_THESIS, OTHER_COLLECTIVE, INTERNSHIP, EXTERNAL]
TYPE_ALLOWED_FOR_ATTRIBUTIONS = (OTHER_COLLECTIVE, OTHER_INDIVIDUAL, MASTER_THESIS, INTERNSHIP, EXTERNAL, DISSERTATION)
CONTAINER_TYPES_CREATION_PROPOSAL = (
(COURSE, _("Course")),
(DISSERTATION, _("Dissertation")),
(INTERNSHIP, _("Internship"))
)
IN_CHARGE_TYPES = [COURSE, INTERNSHIP, DISSERTATION]
|
Som-Energia/somenergia-tomatic
|
tomatic/pbx/pbxareavoip.py
|
Python
|
gpl-3.0
| 5,174
| 0.018748
|
# -*- coding: utf-8 -*-
import requests
import json
from yamlns import namespace as ns
from .. import persons
class AreaVoip(object):
@staticmethod
def defaultQueue():
import dbconfig
return dbconfig.tomatic.get('areavoip',{}).get('queue', None)
def __init__(self):
import dbconfig
self.config = dbconfig.tomatic.areavoip
def _api(self, request, **kwds):
print(request,kwds)
result = requests.get(self.config.baseurl,
params=dict(
reqtype = request,
tenant = self.config.tenant,
key = self.config.apikey,
**kwds),
timeout=2, # seconds
)
print((result.request.url))
print(result.text)
if 'action' in kwds and kwds.get('format') != 'json':
if result.text.strip() != 'OK':
raise Exception(result.text.strip())
return True
if kwds.get('format') == 'json':
return result.json()
return result.text.strip()
def setQueue(self, queue, names):
self.clear(queue)
for name in names:
self.add(queue, name)
def queue(self, queue):
response = self._api('INFO', info='agentsconnected',
queue = queue,
format='json',
)
if not response: return []
return [
ns(
|
key = persons.byExtension(extension),
extension = extension,
name = persons.name(persons.byExtension(extension)),
paused = status.get('1') == 'paused',
disconnected = status['2'] is None or status['2'] == 'UNAVAILABLE',
available = status['2'] == 'NOT_INUSE',
ringing = status['2'] == 'RINGING',
incall = status['2'] == 'INUSE',
ncalls = int(status['0']),
|
secondsInCalls = int(status.get('3','0')),
secondsSinceLastCall = 0, # TODO
flags = [status['2']] if status['2'] and status['2'] not in (
'UNAVAILABLE', 'NOT_INUSE', 'RINGING', 'INUSE',
) else [],
)
for extension, status in response.items()
]
def pause(self, queue, name, paused=True):
extension = persons.extension(name)
if not extension: return
response = self._api('AGENT',
action='pause' if paused else 'unpause',
queue = queue,
extension = extension,
reason = 'notimplemented',
)
def resume(self, queue, name):
self.pause(queue, name, False)
def add(self, queue, name):
extension = persons.extension(name)
if not extension: return
response = self._api('QUEUE', action='add',
id = queue,
extension = extension,
type='NF', # agent type: non-follow
)
def clear(self, queue):
response = self._api('QUEUE', action='clean',
id = queue,
)
def stats(self, queue, date=None):
date = date or '{:%Y-%m-%d}'.format(datetime.date.today())
stats = ns(
self._api('INFO',
info='queue',
id=queue,
format='json',
),
DATE=date,
)
fields = [
'date',
'callsreceived',
'answeredcalls',
'abandonedcalls',
'timedoutcalls',
'talktime',
'averagetalktime',
'holdtime',
'averageholdtime',
'maxholdtime',
]
return ns([
(attr, stats[attr.upper()])
for attr in fields
])
def _allExtensions(self):
return self._api('MANAGEDB',
object='extension',
action='list',
format='json',
).items()
def addExtension(self, extension, fullname, email=''):
for id, extensionInfo in self._allExtensions():
if extensionInfo['ex_number'] == extension:
break
else:
return
jsondata = json.dumps(dict(
ex_name = fullname,
))
self._api('MANAGEDB',
object='extension',
action='update',
objectid=id,
jsondata=jsondata,
format='json',
)
def removeExtension(self, extension):
self.addExtension(extension,'')
def clearExtensions(self):
for id, extensionInfo in self._allExtensions():
if not extensionInfo.get('ex_name'):
continue
self._api('MANAGEDB',
object='extension',
action='update',
objectid=id,
jsondata='{"ex_name": ""}',
format='json',
)
def extensions(self):
return [(
extensionInfo['ex_number'],
extensionInfo['ex_name'],
)
for id, extensionInfo in self._allExtensions()
if extensionInfo['ex_name']
]
# vim: ts=4 sw=4 et
|
lsbardel/python-stdnet
|
covrun.py
|
Python
|
bsd-3-clause
| 177
| 0
|
import sys
import os
from runtests import run
if __name__ == '__main__':
if sys.version_info > (3, 3):
run(coverage=True,
|
coveralls=True)
|
else:
run()
|
to266/hyperspy
|
hyperspy/tests/signal/test_signal_subclass_conversion.py
|
Python
|
gpl-3.0
| 2,881
| 0
|
import nose.tools as nt
import numpy as np
from hyperspy.signal import Signal
from hyperspy import signals
from hyperspy.exceptions import DataDimensionError
class Test1d:
def setUp(self):
self.s = Signal(np.arange(2))
@nt.raises(DataDimensionError)
def test_as_image(self):
self.s.as_image((0, 1))
def test_as_spectrum(self):
np.testing.assert_array_equal(self.s.data, self.s.as_spectrum(0).data)
def test_se
|
t_EELS(self):
s = self.s.as_spectrum(0)
s.set_signal_type("EELS")
nt.assert_equal(s.metadata.Signal.signal_type, "EELS")
nt.assert_is_instance(s, signals.EELSSpectrum)
class Test2d:
def setUp(self):
self.s = Signal(np.random.random((2, 3)))
def test_as_image_T(self):
nt.assert_equal(
self.s.data.T.shape, self.s
|
.as_image((0, 1)).data.shape)
def test_as_image(self):
nt.assert_equal(
self.s.data.shape, self.s.as_image((1, 0)).data.shape)
def test_as_spectrum_T(self):
nt.assert_equal(
self.s.data.T.shape, self.s.as_spectrum(0).data.shape)
def test_as_spectrum(self):
nt.assert_equal(
self.s.data.shape, self.s.as_spectrum(1).data.shape)
def test_s2EELS2im2s(self):
s = self.s.as_spectrum(0)
s.set_signal_type("EELS")
im = s.as_image((1, 0))
nt.assert_equal(im.metadata.Signal.signal_type, "EELS")
s = im.as_spectrum(0)
nt.assert_equal(s.metadata.Signal.signal_type, "EELS")
nt.assert_is_instance(s, signals.EELSSpectrum)
class Test3d:
def setUp(self):
self.s = Signal(np.random.random((2, 3, 4)))
def test_as_image_contigous(self):
nt.assert_true(self.s.as_image((0, 1)).data.flags['C_CONTIGUOUS'])
def test_as_image_1(self):
nt.assert_equal(
self.s.as_image((0, 1)).data.shape, (4, 2, 3))
def test_as_image_2(self):
nt.assert_equal(
self.s.as_image((1, 0)).data.shape, (4, 3, 2))
def test_as_image_3(self):
nt.assert_equal(
self.s.as_image((1, 2)).data.shape, (3, 4, 2))
def test_as_spectrum_contigous(self):
nt.assert_true(self.s.as_spectrum(0).data.flags['C_CONTIGUOUS'])
def test_as_spectrum_0(self):
nt.assert_equal(
self.s.as_spectrum(0).data.shape, (2, 4, 3))
def test_as_spectrum_1(self):
nt.assert_equal(
self.s.as_spectrum(1).data.shape, (3, 4, 2))
def test_as_spectrum_2(self):
nt.assert_equal(
self.s.as_spectrum(1).data.shape, (3, 4, 2))
def test_as_spectrum_3(self):
nt.assert_equal(
self.s.as_spectrum(2).data.shape, (2, 3, 4))
def test_remove_axis(self):
im = self.s.as_image((-2, -1))
im._remove_axis(-1)
nt.assert_is_instance(im, signals.Spectrum)
|
gatgui/pygl
|
python/test_gl.py
|
Python
|
lgpl-3.0
| 6,380
| 0.025549
|
# Copyright (C) 2009 Gaetan Guidet
#
# This file is part of pygl.
#
# luagl is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or (at
# your option) any later version.
#
# luagl is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import sys
import pygl
from pygl import gl
from pygl import glut
prog = None
mesh = None
mesh_idx = None
use_arrays = True
elapsed_time = 0 # in milliseconds
step = 1
time_param = 1
def checkVertex(element):
if element[0] != element.position.x:
raise Exception("position.x mismatch")
if element[1] != element.position.y:
raise Exception("position.y mismatch")
if element[2] != element.position.z:
raise Exception("position.z mismatch")
if element[3] != element.texcoord.s:
raise Exception("texcoord.s mismatch")
if element[4] != element.texcoord.t:
raise Exception("texcoord.t mismatch")
def initMesh():
global mesh, mesh_idx
mesh_fmt = pygl.buffer.MakeStructure(["position", ["x", "y", "z"]], ["texcoord", ["s", "t"]])
mesh = pygl.buffer.New(pygl.buffer.Float, mesh_fmt, 4)
# StructuredBuffer sucks ...
mesh_idx = pygl.buffer.New(pygl.buffer.Ushort, 4)
mesh_idx[0] = 0
mesh_idx[1] = 1
mesh_idx[2] = 2
mesh_idx[3] = 3
v = mesh.element(0)
v[0] = 0
v[1] = 0
v[2] = 0
v[3] = 0
v[4] = 0
print("check v0")
checkVertex(v)
v = mesh.element(1)
v.position.x = 1
v.position.y = 0
v.position.z = 0
v.texcoord.s = 1
v.texcoord.t = 0
print("check v1")
checkVertex(v)
v = mesh.element(2)
v[0] = 1
v[1] = 1
v[2] = 0
v[3] = 1
v[4] = 1
print("check v2")
checkVertex(v)
v = mesh.element(3)
v[0] = 0
v[1] = 1
v[2] = 0
v[3] = 0
v[4] = 1
print("check v3")
checkVertex(v)
print(mesh)
def drawMesh():
global mesh, mesh_idx, use_arrays
if use_arrays == True:
positions = mesh.field("position")
texcoords = mesh.field("texcoord")
stride = mesh.elementSize
gl.EnableClientState(gl.VERTEX_ARRAY)
gl.ClientActiveTe
|
xture(gl.TEXTURE0)
gl.EnableClientState(gl.TEXTURE_COORD_ARRAY)
gl.TexCoordPointer(2, gl.FLOAT, stride, texcoords.rawPtr)
gl.VertexPointer(3, gl.FLOAT, stride, positions.rawPtr)
#gl.DrawArrays(gl.QUADS, 0, 4)
gl.DrawElements(gl.QUADS, 4, gl.UNSIGNED_SHORT, mesh_idx.rawPtr)
gl.DisableClientState(gl.TE
|
XTURE_COORD_ARRAY)
gl.DisableClientState(gl.VERTEX_ARRAY)
else:
gl.Begin(gl.QUADS)
for e in mesh:
gl.MultiTexCoord2f(gl.TEXTURE0, e.texcoord.s, e.texcoord.t);
gl.Color3f(1, 1, 1)
gl.Vertex3fv(e.position)
gl.End()
def initShaders():
global prog
vprog_src = [
"void main() {\n",
" gl_TexCoord[0] = gl_MultiTexCoord0;\n",
" gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;\n",
"}\n"
]
fprog_src = [
"uniform float time;\n",
"void main() {\n",
" gl_FragColor = time * vec4(gl_TexCoord[0].s, gl_TexCoord[0].t, 1.0, 0.0);\n",
"}\n"
]
prog = gl.CreateProgram()
vprog = gl.CreateShader(gl.VERTEX_SHADER)
gl.ShaderSource(vprog, vprog_src)
gl.CompileShader(vprog)
print("Compile vertex shader: %s" % gl.GetShaderInfoLog(vprog))
fprog = gl.CreateShader(gl.FRAGMENT_SHADER)
gl.ShaderSource(fprog, fprog_src)
gl.CompileShader(fprog)
print("Compile fragment shader: %s" % gl.GetShaderInfoLog(fprog))
gl.AttachShader(prog, vprog)
gl.AttachShader(prog, fprog)
gl.LinkProgram(prog)
print("Link: %s" % gl.GetProgramInfoLog(prog))
print("Vertex shader source: %s" % gl.GetShaderSource(vprog))
print("Fragment shader source: %s" % gl.GetShaderSource(fprog))
# callbacks
def display():
global prog, time_param
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
gl.PushMatrix()
gl.UseProgram(prog)
loc = gl.GetUniformLocation(prog, "time")
gl.Uniform1f(loc, time_param)
drawMesh()
gl.UseProgram(0)
gl.PopMatrix()
glut.SwapBuffers()
def reshape(w, h):
print("reshape %dx%d" % (w, h))
gl.Viewport(0, 0, w, h)
gl.MatrixMode(gl.PROJECTION)
gl.LoadIdentity()
gl.Ortho(0, 1, 0, 1, -1, 1)
gl.MatrixMode(gl.MODELVIEW)
gl.LoadIdentity()
glut.PostRedisplay()
def keyboard(key, x, y):
global use_arrays
if (key == 27):
print("Quit")
glut.Exit(0)
elif key == 97: # 'A'
use_arrays = not use_arrays
glut.PostRedisplay()
else:
print("Key: %s" % key)
def menu(val):
print("MenuEntry %s selected" % val)
if val == 1:
print("Quit")
glut.Exit(0)
def submenu(val):
print("SubMenuEntry %s selected: %s" % (val, ["Hello", "Goodbye"][val]))
def fade():
global elapsed_time, step, time_param
time_param = elapsed_time * 0.01
elapsed_time = elapsed_time + step
if elapsed_time >= 100:
step = -1
elif elapsed_time == 0:
step = 1
glut.PostRedisplay()
#glut.TimerFunc(1, fade, 0)
def printMatrix(m):
s = ""
# line
for i in xrange(4):
s = s + "{ ";
# col
for j in xrange(4):
s = s + "%s " % m[i+j*4]
s = s + "}\n"
print(s)
def initGL():
gl.Init()
print("OpenGL version: %s" % gl.version)
gl.ClearColor(0, 0, 0, 1)
gl.ClearDepth(1)
gl.DepthFunc(gl.LESS)
gl.ShadeModel(gl.SMOOTH)
gl.Enable(gl.LIGHTING)
gl.Enable(gl.LIGHT0)
gl.Enable(gl.DEPTH_TEST)
gl.Enable(gl.CULL_FACE)
gl.FrontFace(gl.CCW)
initMesh()
initShaders()
glut.Init()
glut.InitWindowPosition(50, 50)
glut.InitWindowSize(640, 480)
dm = glut.RGBA|glut.DEPTH|glut.DOUBLE
glut.InitDisplayMode(dm)
glut.CreateWindow("PyGLUT")
initGL()
glut.DisplayFunc(display)
glut.ReshapeFunc(reshape)
glut.KeyboardFunc(keyboard)
#glut.TimerFunc(1, fade, 0)
glut.IdleFunc(fade)
smid = glut.CreateMenu(submenu)
glut.AddMenuEntry("Hello", 0)
glut.AddMenuEntry("Goodbye", 1)
mid = glut.CreateMenu(menu)
glut.AddSubMenu("Greetings", smid)
glut.AddMenuEntry("Quit", 1)
glut.AttachMenu(glut.RIGHT_BUTTON)
glut.MainLoop()
|
toddpalino/kafka-tools
|
kafka/tools/protocol/responses/describe_acls_v0.py
|
Python
|
apache-2.0
| 1,614
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under
|
the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# s
|
oftware distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.responses import BaseResponse
class DescribeAclsV0Response(BaseResponse):
schema = [
{'name': 'throttle_time_ms', 'type': 'int32'},
{'name': 'error_code', 'type': 'int16'},
{'name': 'error_message', 'type': 'string'},
{'name': 'resources',
'type': 'array',
'item_type': [
{'name': 'resource_type', 'type': 'int8'},
{'name': 'resource_name', 'type': 'string'},
{'name': 'acls',
'type': 'array',
'item_type': [
{'name': 'principal', 'type': 'string'},
{'name': 'host', 'type': 'string'},
{'name': 'operation', 'type': 'int8'},
{'name': 'permission_type', 'type': 'int8'},
]},
]},
]
|
wonder-sk/inasafe
|
safe/impact_functions/inundation/flood_raster_population/metadata_definitions.py
|
Python
|
gpl-3.0
| 5,408
| 0
|
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Flood Raster Impact on
Population.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Rizky Maulana Nugraha'
from safe.common.utilities import OrderedDict
from safe.defaults import (
default_minimum_needs,
default_gender_postprocessor,
age_postprocessor,
minimum_needs_selector)
from safe.impact_functions.impact_function_metadata import \
ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
layer_mode_continuous,
layer_geometry_raster,
hazard_flood,
hazard_category_single_event,
unit_metres,
unit_feet,
count_exposure_unit,
exposure_population
)
class FloodEvacuationRasterHazardMetadata(ImpactFunctionMetadata):
"""Metadata for FloodEvacuationFunction.
.. versionadded:: 2.1
We only need to re-implement as_dict(), all other behaviours
are inherited from the abstract base class.
"""
@staticmethod
def as_dict():
"""Return metadata as a dictionary.
This is a static method. You can use it to get the metadata in
dictionary format for an impact function.
:returns: A dictionary representing all the metadata for the
concrete impact function.
:rtype: dict
"""
dict_meta = {
'id': 'FloodEvacuationRasterHazardFunction',
'name': tr('Raster flood on population'),
'impact': tr('Need evacuation'),
'title': tr('Need evacuation'),
'function_type': 'old-style',
'author': 'AIFDR',
'date_implemented': 'N/A',
'overview': tr(
'To assess the impacts of flood inundation in raster '
'format on population.'),
'detailed_description': tr(
'The population subject to inundation exceeding a '
'threshold (default 1m) is calculated and returned as a '
'raster layer. In addition the total number of affected '
'people and the required needs based on the user '
'defined minimum needs are reported. The threshold can be '
'changed and even contain multiple numbers in which case '
'evacuation and needs are calculated using the largest number '
'with population breakdowns provided for the smaller numbers. '
'The population raster is resampled to the resolution of the '
'hazard raster and is rescaled so that the resampled '
'population counts reflect estimates of population count '
'per resampled cell. The resulting impact layer has the '
'same resolution and reflects population count per cell '
'which are affected by inundation.'),
'hazard_input': tr(
'A hazard raster layer where each cell represents flood '
'depth (in meters).'),
'exposure_input': tr(
'An exposure raster layer where each cell represent '
'population count.'),
'output': tr(
'Raster layer contains people affected and the minimum '
'needs based on the people affected.'),
'actions': tr(
'Provide details about how many people would likely need '
'to be evacuated, where they are located and what '
'resources would be required to support them.'),
'limitations': [
tr('The default threshold of 1 meter was selected based '
'on consensus, not hard evidence.')
],
'citations': [],
'layer_requirements': {
'hazard': {
'layer_mode'
|
: layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'hazard_categories': [hazard_category_single_event],
'hazard_types': [hazard_flood],
'continuous_hazard_units': [unit_feet, unit_metres],
'vector_hazard_classifications': [],
'raster_hazard_classifications': [],
'additional_keywords': []
},
'exposure
|
': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'exposure_types': [exposure_population],
'exposure_units': [count_exposure_unit],
'exposure_class_fields': [],
'additional_keywords': []
}
},
'parameters': OrderedDict([
('thresholds [m]', [1.0]),
('postprocessors', OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
])),
('minimum needs', default_minimum_needs())
])
}
return dict_meta
|
jesseengel/magenta
|
magenta/pipelines/pipeline_test.py
|
Python
|
apache-2.0
| 8,715
| 0.004246
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from magenta.common import testing_lib
from magenta.pipelines import pipeline
from magenta.pipelines import statistics
import tensorflow as tf
MockStringProto = testing_lib.MockStringProto # pylint: disable=invalid-name
class MockPipeline(pipeline.Pipeline):
def __init__(self):
super(MockPipeline, self).__init__(
input_type=str,
output_type={'dataset_1': MockStringProto,
'dataset_2': MockStringProto})
def transform(self, input_object):
return {
'dataset_1': [
MockStringProto(input_object + '_A'),
MockStringProto(input_object + '_B')],
'dataset_2': [MockStringProto(input_object + '_C')]}
class PipelineTest(tf.test.TestCase):
def testFileIteratorRecursive(self):
target_files = [
('0.ext', b'hello world'),
('a/1.ext', b'123456'),
('a/2.ext', b'abcd'),
('b/c/3.ext', b'9999'),
('b/z/3.ext', b'qwerty'),
('d/4.ext', b'mary had a little lamb'),
('d/e/5.ext', b'zzzzzzzz'),
('d/e/f/g/6.ext', b'yyyyyyyyyyy')]
extra_files = [
('stuff.txt', b'some stuff'),
('a/q/r/file', b'more stuff')]
root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
for path, contents in target_files + extra_files:
abs_path = os.path.join(root_dir, path)
tf.gfile.MakeDirs(os.path.dirname(abs_path))
tf.gfile.GFile(abs_path, mode='w').write(contents)
file_iterator = pipeline.file_iterator(root_dir, 'ext', recurse=True)
self.assertEqual(set(contents for _, contents in target_files),
set(file_iterator))
def testFileIteratorNotRecursive(self):
target_files = [
('0.ext', b'hello world'),
('1.ext', b'hi')]
extra_files = [
('a/1.ext', b'123456'),
('a/2.ext', b'abcd'),
('b/c/3.ext', b'9999'),
('d/e/5.ext', b'zzzzzzzz'),
('d/e/f/g/6.ext', b'yyyyyyyyyyy'),
('stuff.txt', b'some stuff'),
('a/q/r/file', b'more stuff')]
root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
for path, contents in target_files + extra_files:
abs_path = os.path.join(root_dir, path)
tf.gfile.MakeDirs(os.path.dirname(abs_path))
tf.gfile.GFile(abs_path, mode='w').write(contents)
file_iterator = pipeline.file_iterator(root_dir, 'ext', recurse=False)
self.assertEqual(set(contents for _, contents in target_files),
set(file_iterator))
def testTFRecordIterator(self):
tfrecord_file = os.path.join(
tf.resource_loader.get_data_files_path(),
'../testdata/tfrecord_iterator_test.tfrecord')
self.assertEqual(
[MockStringProto(string)
for string in [b'hello world', b'12345', b'success']],
list(pipeline.tf_record_iterator(tfrecord_file, MockStringProto)))
def testRunPipelineSerial(self):
strings = ['abcdefg', 'helloworld!', 'qwerty']
root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
pipeline.run_pipeline_serial(
MockPipeline(), iter(strings), root_dir)
dataset_1_dir = os.path.join(root_dir, 'dataset_1.tfrecord')
dataset_2_dir = os.path.join(root_dir, 'dataset_2.tfrecord')
self.assertTrue(tf.gfile.Exists(dataset_1_dir))
self.assertTrue(tf.gfile.Exists(dataset_2_dir))
dataset_1_reader = tf.python_io.tf_record_iterator(dataset_1_dir)
self.assertEqual(
set([('serialized:%s_A' % s).encode('utf-8') for s in strings] +
[('serialized:%s_B' % s).encode('utf-8') for s in strings]),
set(dataset_1_reader))
dataset_2_reader = tf.python_io.tf_record_iterator(dataset_2_dir)
self.assertEqual(
set(('serialized:%s_C' % s).encode('utf-8') for s in strings),
set(dataset_2_reader))
def testPipelineIterator(self):
strings = ['abcdefg', 'helloworld!', 'qwerty']
result = pipeline.load_pipeline(MockPipeline(), iter(strings))
self.assertEqual(
set([MockStringProto(s + '_A') for s in strings] +
[MockStringProto(s + '_B') for s in strings]),
set(result['dataset_1']))
self.assertEqual(
set(MockStringProto(s + '_C') for s in strings),
set(result['dataset_2']))
def testPipelineKey(self):
# This happens if PipelineKey() is used on a pipeline with out a dictionary
# output, or the key is not in the output_type dict.
pipeline_inst = MockPipeline()
pipeline_key = pipeline_inst['dataset_1']
self.assertTrue(isinstance(pipeline_key, pipeline.PipelineKey))
self.assertEqual(pipeline_key.key, 'dataset_1')
self.assertEqual(pipeline_key.unit, pipeline_inst)
self.assertEqual(pipeline_key.output_type, MockStringProto)
with self.assertRaises(KeyError):
_ = pipeline_inst['abc']
class TestPipeline(pipeline.Pipeline):
def __init__(self):
super(TestPipeline, self).__init__(str, str)
def transform(self, input_object):
pass
pipeline_inst = TestPipeline()
with self.assertRaises(KeyError):
_ = pipeline_inst['abc']
with self.assertRaises(ValueError):
_ = pipeline.PipelineKey(1234, 'abc')
def testInvalidTypeSignatureError(self):
class PipelineShell(pipeline.Pipeline):
def transform(self, input_object):
pass
_ = PipelineShell(str, str)
_ = PipelineShell({'name': str}, {'name': str})
good_type = str
for bad_type in [123, {1: str}, {'name': 123},
{'name': str, 'name2': 123}, [str, int]]:
with self.assertRaises(pipeline.InvalidTypeSignatureError):
PipelineShell(bad_type, good_type)
with self.assertRaises(pipeline.InvalidTypeSignatureError):
PipelineShell(good_type, bad_type)
def testPipelineGivenName(self):
class TestPipeline123(pipeline.Pipeline):
def __init__(self):
super(TestPipeline123, self).__init__(str, str, 'TestName')
self.stats = []
def transform(self, input_object):
self._set_stats([statistics.Counter('counter_1', 5),
statistics.Counter('counter_2', 10)])
return []
pipe = TestPipeline123()
self.assertEqual(pipe.name, 'TestName')
pipe.transform('hello')
stats = pipe.get_stats()
self.assertEqual(
set((stat.name, stat.count) for stat in stats),
set([('TestName_counter_1', 5), ('TestName_counter_2', 10)]))
def testPipelineDefaultName(self):
class TestPipeline123(pipeline.Pipeline):
def __init__(self):
super(TestPipeline123, self).__init__(str, str)
self.stats = []
def transform(self, input_object):
self._set_stats([statistics.Counter('counter_1', 5),
statistics.Counter('counter_2', 10)])
return []
pipe = TestPipeline123()
self.assertEqual(pipe.name, 'TestPipeline123')
pipe.transform
|
('hello')
stats = pipe.get_stats()
self.assertEqual(
set((stat.name, stat.count) for stat in stats),
set([('TestPipeline123_counter_1', 5),
('TestPipeline123_counter_2', 10)]))
def testInvalidStatisticsError(self):
class TestPipeline1(pipeline.Pipeline):
def __init__(self):
super(TestPipeline1, self).__init__(object, object)
self.stats = [
|
]
def transform(self, input_object):
self._set_stats([statistics.Counter('counter_1', 5), 12345])
return []
class TestPipeline2(pipeline
|
ianawilson/BbQuick
|
docs/conf.py
|
Python
|
mit
| 6,974
| 0.006739
|
# -*- coding: utf-8 -*-
#
# BbQuick documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 10 20:55:10 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BbQuick'
copyright = u'2012, Ian A Wilson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_
|
title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (su
|
ch as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BbQuickdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BbQuick.tex', u'BbQuick Documentation',
u'Ian A Wilson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bbquick', u'BbQuick Documentation',
[u'Ian A Wilson'], 1)
]
|
edx/edx-platform
|
cms/djangoapps/contentstore/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,882
| 0.004782
|
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PushNotificationConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
|
options={
|
'ordering': ('-change_date',),
'abstract': False,
},
),
migrations.CreateModel(
name='VideoUploadConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('profile_whitelist', models.TextField(help_text='A comma-separated list of names of profiles to include in video encoding downloads.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
Code4SA/umibukela
|
umibukela/migrations/0029_auto_20170226_0745.py
|
Python
|
mit
| 1,763
| 0.002269
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import umibukela.models
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0028_cyc
|
le_materials'),
]
operations = [
migrations.CreateModel(
name='ProgrammeKoboRefreshToken',
fields=[
('programme', models.OneToOneField(related_name='kobo_refresh_token', primary_key=True, serialize=False, to='umibukela.Progr
|
amme')),
('token', models.TextField()),
],
),
migrations.RenameModel(
old_name='KoboRefreshToken',
new_name='UserKoboRefreshToken',
),
migrations.AddField(
model_name='cycle',
name='auto_import',
field=models.BooleanField(default=False),
preserve_default=False,
),
migrations.AddField(
model_name='cycleresultset',
name='site_option_name',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AlterField(
model_name='cycle',
name='materials',
field=models.FileField(null=True, upload_to=umibukela.models.cycle_materials_filename, blank=True),
),
migrations.AlterField(
model_name='cycleresultset',
name='cycle',
field=models.ForeignKey(related_name='cycle_result_sets', to='umibukela.Cycle'),
),
migrations.AlterField(
model_name='cycleresultset',
name='survey',
field=models.ForeignKey(related_name='cycle_result_sets', blank=True, to='umibukela.Survey', null=True),
),
]
|
gongleiarei/qemu
|
scripts/analyze-migration.py
|
Python
|
gpl-2.0
| 20,683
| 0.006479
|
#!/usr/bin/env python
#
# Migration Stream Analyzer
#
# Copyright (c) 2015 Alexander Graf <agraf@suse.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
import numpy as np
import json
import os
import argparse
import collections
import pprint
def mkdir_p(path):
try:
os.makedirs(path)
except OSError:
pass
class MigrationFile(object):
def __init__(self, filename):
self.filename = filename
self.file = open(self.filename, "rb")
def read64(self):
return np.asscalar(np.fromfile(self.file, count=1, dtype='>i8')[0])
def read32(self):
return np.asscalar(np.fromfile(self.file, count=1, dtype='>i4')[0])
def read16(self):
return np.asscalar(np.fromfile(self.file, count=1, dtype='>i2')[0])
def read8(self):
return np.asscalar(np.fromfile(self.file, count=1, dtype='>i1')[0])
def readstr(self, len = None):
if len is None:
len = self.read8()
if len == 0:
return ""
return np.fromfile(self.file, count=1, dtype=('S%d' % len))[0]
def readvar(self, size = None):
if size is None:
size = self.read8()
if size == 0:
return ""
value = self.file.read(size)
if len(value) != size:
raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
return value
def tell(self):
return self.file.tell()
# The VMSD description is at the end of the file, after EOF. Look for
# the last NULL byte, then for the beginning brace of JSON.
def read_migration_debug_json(self):
QEMU_VM_VMDESCRIPTION = 0x06
# Remember the offset in the file when we started
entrypos = self.file.tell()
# Read the last 10MB
self.file.seek(0, os.SEEK_END)
endpos = self.file.tell()
self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
datapos = self.file.tell()
data = self.file.read()
# The full file read closed the file as well, reopen it
self.file = open(self.filename, "rb")
# Find the last NULL byte, then the first brace after that. This should
# be the beginning of our JSON data.
nulpos = data.rfind("\0")
jsonpos = data.find("{", nulpos)
# Check backwards from there and see whether we guessed right
self.file.seek(datapos + jsonpos - 5, 0)
if self.read8() != QEMU_VM_VMDESCRIPTION:
raise Exception("No Debug Migration device found")
jsonlen = self.read32()
# Seek back to where we were at the beginning
self.file.seek(entrypos, 0)
return data[jsonpos:jsonpos + jsonlen]
def close(self):
self.file.close()
class RamSection(object):
RAM_SAVE_FLAG_COMPRESS = 0x02
RAM_SAVE_FLAG_MEM_SIZE = 0x04
RAM_SAVE_FLAG_PAGE = 0x08
RAM_SAVE_FLAG_EOS = 0x10
RAM_SAVE_FLAG_CONTINUE = 0x20
RAM_SAVE_FLAG_XBZRLE = 0x40
RAM_SAVE_FLAG_HOOK = 0x80
def __init__(self, file, version_id, ramargs, section_key):
if version_id != 4:
raise Exception("Unknown RAM version %d" % version_id)
self.file = file
self.section_key = section_key
self.TARGET_PAGE_SIZE = ramargs['page_size']
self.dump_memory = ramargs['dump_memory']
self.write_memory = ramargs['write_memory']
self.sizeinfo = collections.OrderedDict()
self.data = collections.OrderedDict()
self.data['section sizes'] = self.sizeinfo
self.name = ''
if self.write_memory:
self.files = { }
if self.dump_memory:
self.memory = collections.OrderedDict()
self.data['memory'] = self.memory
def __repr__(self):
return self.data.__repr__()
def __str__(self):
return self.data.__str__()
def getDict(self):
return self.data
def read(self):
# Read all RAM sections
while True:
addr = self.file.read64()
flags = addr & (self.TARGET_PAGE_SIZE - 1)
addr &= ~(self.TARGET_PAGE_SIZE - 1)
if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
while True:
namelen = self.file.read8()
# We assume that no RAM chunk is big enough to ever
# hit the first byte of the address, so when we see
# a zero here we know it has to be an address, not the
# length of the next block.
if namelen == 0:
self.file.file.seek(-1, 1)
break
self.name = self.file.readstr(len = namelen)
len = self.file.read64()
self.sizeinfo[self.name] = '0x%016x' % len
if self.write_memory:
print self.name
mkdir_p('./' + os.path.dirname(self.name))
f = open('./' + self.name, "wb")
f.truncate(0)
f.truncate(len)
self.files[self.name] = f
flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
if flags & self.RAM_SAVE_FLAG_COMPRESS:
if flags & self.RAM_SAVE_FLAG_CONTINUE:
flags &= ~self.RAM_SAVE_FLAG_CONTINUE
else:
self.name = self.file.readstr()
fill_char = self.file.read8()
# The page in question is filled with fill_char now
if self
|
.write_memory and fill_char != 0:
self.files[self.name].seek(addr, os.SEEK_SET)
self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
if self.dump_memory:
self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_
|
char
flags &= ~self.RAM_SAVE_FLAG_COMPRESS
elif flags & self.RAM_SAVE_FLAG_PAGE:
if flags & self.RAM_SAVE_FLAG_CONTINUE:
flags &= ~self.RAM_SAVE_FLAG_CONTINUE
else:
self.name = self.file.readstr()
if self.write_memory or self.dump_memory:
data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
else: # Just skip RAM data
self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
if self.write_memory:
self.files[self.name].seek(addr, os.SEEK_SET)
self.files[self.name].write(data)
if self.dump_memory:
hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
flags &= ~self.RAM_SAVE_FLAG_PAGE
elif flags & self.RAM_SAVE_FLAG_XBZRLE:
raise Exception("XBZRLE RAM compression is not supported yet")
elif flags & self.RAM_SAVE_FLAG_HOOK:
raise Exception("RAM hooks don't make sense with files")
# End of RAM section
if flags & self.RAM_SAVE_FLAG_EOS:
break
if flags != 0:
raise Exception("Unknown RAM flags: %x" % flags)
def __del__(self):
if self.write_memory:
for key in self.files:
self.files[key].close()
class HTABSection(object):
HASH_PTE_SIZE_64 = 16
def __init__(self, file, version_id, device, section_
|
kennyjoseph/identity_extraction_pub
|
python/utility_code/dependency_parse_handlers.py
|
Python
|
mit
| 6,421
| 0.007164
|
__author__ = 'kjoseph'
import itertools
import Queue
from collections import defaultdict
from dependency_parse_object import DependencyParseObject, is_noun, is_verb
def get_parse(dp_objs):
term_map = {}
map_to_head = defaultdict(list)
for parse_object in dp_objs:
if parse_object.head > 0:
map_to_head[parse_object.head].append(parse_object.id)
term_map[parse_object.id] = parse_object
# first manually combine MWE
#mwe_to_combine = get_mwe_combinations(map_to_head,term_map)
#for mwe in mwe_to_combine:
# combine_terms(mwe,term_map,map_to_head)
#conj_to_combine = get_conj_combinations(map_to_head,term_map)
#for conj in conj_to_combine:
# combine_terms(conj,term_map,map_to_head)
# now manually chunk the nouns together
nouns_to_combine = get_noun_combinations(map_to_head,term_map)
for noun_set in nouns_to_combine:
combine_terms(noun_set,term_map, map_to_head)
verbs_to_combine = get_verb_combinations(map_to_head,term_map)
for verb_set in verbs_to_combine:
combine_terms(verb_set,term_map, map_to_head)
roots =[]
non_terms = []
for parse_object in term_map.values():
if parse_object.head == 0:
roots.append(parse_object)
elif parse_object.head == -1:
non_terms.append(parse_object)
# now build the parse tree
to_parse = Queue.LifoQueue()
for root in reversed(roots):
to_parse.put([root,0])
return to_parse, term_map, map_to_head, non_terms
def get_noun_combinations(map_to_head,term_map):
to_combine = []
for head_id, children in map_to_head.iteritems():
head = term_map[head_id]
if len(children) == 0 or not (is_noun(head.postag) or head.postag in ['D','@','A','R']) :
continue
for child_id in children:
child = term_map[child_id]
if is_noun(child.postag) or child.postag in ['D','@','A','R']:
to_combine.append({child.id, head.id})
return get_combinations(to_combine)
def get_verb_combinations(map_to_head,term_map):
to_combine = []
for head_id, children in map_to_head.iteritems():
head = term_map[head_id]
if len(children) == 0 or not is_verb(head.postag):
continue
for child_id in children:
child = term_map[child_id]
if is_verb(child.postag) and child.id == (head.id +1):
to_combine.append({child.id, head.id})
return get_combinations(to_combine)
def get_mwe_combinations(map_to_head,term_map):
to_combine = []
for head_id, children in map_to_head.iteritems():
head = term_map[head_id]
if len(children) == 0:
continue
for child_id in children:
child = term_map[child_id]
if child.deprel == 'MWE':
to_combine.append({child.id, head.id})
return get_combinations(to_combine)
def get_conj_combinations(map_to_head,term_map):
to_combine = []
for head_id, children in map_to_head.iteritems():
|
head = term_map[head_id]
if len(children) == 0:
continue
for child_id in children:
child
|
= term_map[child_id]
if child.deprel == 'CONJ':
to_combine.append({child.id, head.id})
return get_combinations(to_combine)
def get_combinations(to_combine):
combination_found = True
while combination_found:
combination_found = False
combos = itertools.combinations(to_combine,2)
removed = []
for d in combos:
if len([d[0] == r or d[1] == r for r in removed]):
continue
if len(d[0].intersection(d[1])) > 0:
combination_found = True
to_combine.append(set.union(d[0],d[1]))
[to_combine.remove(x) for x in to_combine if x == d[0]]
[to_combine.remove(x) for x in to_combine if x == d[1]]
removed.append(d[0])
removed.append(d[1])
return to_combine
def combine_terms(noun_set,term_map, map_to_head):
new_parse_obj = DependencyParseObject(object_ids=noun_set,term_map=term_map)
# okay, we've created a new parse object
# now we need to update the relations to it
for id in noun_set:
if id == new_parse_obj.id:
term_map[id] = new_parse_obj
if id in map_to_head:
for child_id in noun_set:
if child_id in map_to_head[id]:
map_to_head[id].remove(child_id)
else:
# things dependent on this thing need to become dependent on the new parse object
if id in map_to_head:
for child in map_to_head[id]:
if child not in noun_set:
map_to_head[new_parse_obj.id].append(child)
term_map[child].head = new_parse_obj.id
del map_to_head[id]
del term_map[id]
def print_parse(parse_out, term_map, map_to_head):
while not parse_out.empty():
curr_head,level = parse_out.get()
print " "*level + str(level) +" " + curr_head.__unicode__()
for child in reversed(map_to_head.get(curr_head.id,[])):
parse_out.put([term_map[child],level+1])
def get_entities_from_parse(term_map):
all_proper = []
all_entities = []
all_entities_original_ids = []
all_proper_original_ids = []
for k,v in term_map.iteritems():
if is_noun(v.postag) or v.postag == '@' or v.postag == '#':
text = []
split_text = v.text.split()
ent_ids = []
for x in range(len(split_text)):
t = split_text[x]#.strip(string.punctuation)
#if x == 0 and t in stopwords:
# continue
text.append(t)
ent_ids.append(v.all_original_ids[x])
if len(text) > 0 and v.postag != 'O':
if '^' in v.postag and v.text[0].isupper():
all_proper.append(" ".join(text))
all_proper_original_ids.append(sorted(v.all_original_ids))
all_entities.append(" ".join([t.lower() for t in text]))
all_entities_original_ids.append(sorted(ent_ids))
return all_entities, all_proper, all_entities_original_ids, all_proper_original_ids
|
axbaretto/beam
|
sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/oauth2/flow.py
|
Python
|
apache-2.0
| 9,887
| 0
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 Authorization Flow
.. warning::
This module is experimental and is subject to change signficantly
within major version releases.
This module provides integration with `requests-oauthlib`_ for running the
`OAuth 2.0 Auth
|
orization Flow`_ and acquiring user credentials.
Here's an example of using the flow with the installed application
authorization flow::
import googl
|
e.oauth2.flow
# Create the flow using the client secrets file from the Google API
# Console.
flow = google.oauth2.flow.Flow.from_client_secrets_file(
'path/to/client_secrets.json',
scopes=['profile', 'email'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
# Tell the user to go to the authorization URL.
auth_url, _ = flow.authorization_url(prompt='consent')
print('Please go to this URL: {}'.format(auth_url))
# The user will get an authorization code. This code is used to get the
# access token.
code = input('Enter the authorization code: ')
flow.fetch_token(code=code)
# You can use flow.credentials, or you can just get a requests session
# using flow.authorized_session.
session = flow.authorized_session()
print(session.get('https://www.googleapis.com/userinfo/v2/me').json())
.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/stable/
.. _OAuth 2.0 Authorization Flow:
https://tools.ietf.org/html/rfc6749#section-1.2
"""
import json
import google.auth.transport.requests
import google.oauth2.credentials
import google.oauth2.oauthlib
class Flow(object):
"""OAuth 2.0 Authorization Flow
This class uses a :class:`requests_oauthlib.OAuth2Session` instance at
:attr:`oauth2session` to perform all of the OAuth 2.0 logic. This class
just provides convenience methods and sane defaults for doing Google's
particular flavors of OAuth 2.0.
Typically you'll construct an instance of this flow using
:meth:`from_client_secrets_file` and a `client secrets file`_ obtained
from the `Google API Console`_.
.. _client secrets file:
https://developers.google.com/identity/protocols/OAuth2WebServer
#creatingcred
.. _Google API Console:
https://console.developers.google.com/apis/credentials
"""
def __init__(self, oauth2session, client_type, client_config):
"""
Args:
oauth2session (requests_oauthlib.OAuth2Session):
The OAuth 2.0 session from ``requests-oauthlib``.
client_type (str): The client type, either ``web`` or
``installed``.
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
self.client_type = client_type
"""str: The client type, either ``'web'`` or ``'installed'``"""
self.client_config = client_config[client_type]
"""Mapping[str, Any]: The OAuth 2.0 client configuration."""
self.oauth2session = oauth2session
"""requests_oauthlib.OAuth2Session: The OAuth 2.0 session."""
@classmethod
def from_client_config(cls, client_config, scopes, **kwargs):
"""Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
Raises:
ValueError: If the client configuration is not in the correct
format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
if 'web' in client_config:
client_type = 'web'
elif 'installed' in client_config:
client_type = 'installed'
else:
raise ValueError(
'Client secrets must be for a web or installed app.')
session, client_config = (
google.oauth2.oauthlib.session_from_client_config(
client_config, scopes, **kwargs))
return cls(session, client_type, client_config)
@classmethod
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
"""Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
"""
with open(client_secrets_file, 'r') as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs)
@property
def redirect_uri(self):
"""The OAuth 2.0 redirect URI. Pass-through to
``self.oauth2session.redirect_uri``."""
return self.oauth2session.redirect_uri
@redirect_uri.setter
def redirect_uri(self, value):
self.oauth2session.redirect_uri = value
def authorization_url(self, **kwargs):
"""Generates an authorization URL.
This is the first step in the OAuth 2.0 Authorization Flow. The user's
browser should be redirected to the returned URL.
This method calls
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
and specifies the client configuration's authorization URI (usually
Google's authorization server) and specifies that "offline" access is
desired. This is required in order to obtain a refresh token.
Args:
kwargs: Additional arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
Returns:
Tuple[str, str]: The generated authorization URL and state. The
user must visit the URL to complete the flow. The state is used
when completing the flow to verify that the request originated
from your application. If your application is using a different
:class:`Flow` instance to obtain the token, you will need to
specify the ``state`` when constructing the :class:`Flow`.
"""
url, state = self.oauth2session.authorization_url(
self.client_config['auth_uri'],
access_type='offline', **kwargs)
return url, state
def fetch_token(self, **kwargs):
"""Completes the Authorization Flow and obtains an access token.
This is the final step in the OAuth 2.0 Authorization Flow. This is
called after the user consents.
This method calls
:meth:`requests_oauthlib.OAuth2Session.fetch_token`
and specifies the client configuration's token URI (usually Google's
token server).
Args:
kwargs: Arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
one of ``code``
|
BinDigit1/EulerProjects
|
Problem 40/Champernownes_constant.py
|
Python
|
gpl-2.0
| 304
| 0.013158
|
import time
output = ''
i=1
start_time = time.time()
while len(output)<1000001:
output +=str(i)
i +
|
= 1
print(int(output[9]) * int(output[99]) *
int(output[999]) * int(output[9999]) *
int(output[99999]) * int(output[999999
|
]))
print("--- %s seconds ---" % (time.time() - start_time))
|
destos/mfinstop
|
mfinstop/context_processors.py
|
Python
|
gpl-3.0
| 113
| 0
|
from django.conf import settings
def google_ua(request):
return {'google_ua
|
': settings.GOOGLE_TRACKING_ID
|
}
|
rwl/PyCIM
|
CIM14/IEC61970/OperationalLimits/ActivePowerLimit.py
|
Python
|
mit
| 1,717
| 0.001747
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.OperationalLimits.OperationalLimit import OperationalLimit
c
|
lass ActivePowerLimit(OperationalLimit):
"""Limit on active power flow.
"""
def __init__(self, value=0.0, *args, **kw_args):
"""Initialises a new 'ActivePowerLimit' instance.
@param value: Value of active power limit.
"""
#: Value of active power limit.
self.value = value
super(ActivePowerLimit, self).__init__(*args, **kw_args)
_attrs = ["value"]
_attr_types = {"value": float}
_defaults = {"
|
value": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
GooeyComps/gooey-dist
|
gooeydist/interpreter/matrix.py
|
Python
|
mit
| 2,797
| 0.008223
|
from enum import Enum
# Takes string names or int indices of a type and an attribute.
# Returns None if that type does not have that attribute.
# Else returns the default value for that attribute.
def getDefault(typeName, attrName):
# Determine index of type
if type(typeName) == int:
typeIndex = typeName
elif type(typeName) == str:
typeNameStr = 'TypeName.'+typeName
typeIndex = eval(typeNameStr).value
else:
print("Oops, typeName arg is of the wrong type.")
#Determine index of attribute
if type(attrName) == int:
attrIndex = attrName
elif type(attrName) == str:
attrNameStr = 'AttrName.'+attrName
attrIndex = eval(attrNameStr).value
else:
print("Oops, attrName arg is of the wrong type.")
#Retrieve and return default value for given type and attribute
return matrix[attrIndex][typeIndex]
class TypeName(Enum):
Window = 0
Button = 1
Checkboxes = 2
RadioButtons = 3
DropDown = 4
Text = 5
FormattedText = 6
TextBox = 7
Menu = 8
MenuItem = 9
Search = 10
Image = 11
class AttrName(Enum):
title = 0
text = 1
options = 2
position = 3
size = 4
color = 5
action = 6
hidden = 7
font = 8
fontSize = 9
textColor = 10
source = 11
matrix = [["""Untitled Window""", """""", """Untitled Checkboxes""", """Untitled Radio Buttons""", """Untitled Drop Down""", None, None, """Untitled Text Box""", None, """Untitled Menu Item""", None, """"""],
[None, """Untitled Button""", None, None, None, """Text""", """Formatted Text""", """Type here""", None, None, """Search""", """Image Caption"""],
[None, None, "*""Option 1"" ""Option 2"" ""Option 3""", "*""Option 1"" ""Option 2"" ""Option 3""", "*""Option 1"" ""Option 2"" ""Option 3""", None, None, None, "menuItem1 menuItem2 menuItem3", """Option 1"" ""Option 2"" ""Option 3""", None, None],
[None, 'center', 'center', 'center', 'center', 'center', None, 'center', 'menuBar', None, 'center', 'center'],
['medium', 'medium', 'medium', 'medium', 'medium', 'medium', None, 'medium', None, None, 'medium', 'medium'],
['white', None, None, None, None, 'white', None, None, None, None, None, None],
["""""", """""", None, None, N
|
one, None, None, None, None, None, None, None],
[False, False, False, False, False, False, None, False, False, False, False, False],
["""Times New Roman""", None, None, None, None, None, """Tim
|
es New Roman""", None, None, None, None, None],
[12, None, None, None, None, None, 12, None, None, None, None, None],
['black', None, None, None, None, None, 'black', None, None, None, None, None],
[None, None, None, None, None, None, None, None, None, None, None, 'defaultIcon']]
NUM_ATTRIBUTES = len(matrix)
NUM_TYPES = len(matrix[0])
|
Alwnikrotikz/marinemap
|
lingcod/manipulators/urls.py
|
Python
|
bsd-3-clause
| 487
| 0.01232
|
from django.conf.urls.defaults import *
urlpatterns = patterns('lingcod.manipulators.views',
(r'^test/$', 'testView' ),
(r'^list/([A-Za-z0-9_,]+)/([A-Za-z0-9_,]+)/$', 'mpaManipulatorList' ),
url(r'^([A-Za-z0-9_,]+)/$', 'multi_generic_manipulator_view', name='manipulate'),
url(r'^$', 'multi_generic_manipulator_view', {'manipu
|
lators': None}, name='manipulate-blank'),
url(r'^/$', 'multi_generic_manipulator_view', {'manipulato
|
rs': None}, name='manipulate-blank'),
)
|
p2pu/mechanical-mooc
|
groups/tests.py
|
Python
|
mit
| 2,657
| 0.006022
|
from django.test import TestCase
from groups import models as group_model
class SimpleTest(TestCase):
def test_create_group(self):
"""
Test group creation
"""
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
self.assertTrue('address' in group)
self.assertTrue('description' in group)
self.assertTrue('members' in group)
self.assertTrue('sequence' in grou
|
p)
group_copy = group_model.get_group(group['uri'])
self.assertEqual(group, group_copy)
def test_add_group_member(self):
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
group_model.add_group_member(group['uri'], 'bob@mail.com')
group = group_model.get_group(group['uri'])
self.assertEqual(len(group
|
['members']), 1)
self.assertEqual(group['members'][0], 'bob@mail.com')
def test_remove_group_member(self):
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
group_model.add_group_member(group['uri'], 'bob@mail.com')
group_model.add_group_member(group['uri'], 'dick@mail.com')
group_model.remove_group_member(group['uri'], 'dick@mail.com')
group = group_model.get_group(group['uri'])
self.assertEqual(len(group['members']), 1)
self.assertEqual(group['members'][0], 'bob@mail.com')
def test_get_sequence_groups(self):
group = group_model.create_group('group-1-1@mechmooc.com', 'The A team', 1)
group = group_model.create_group('group-1-2@mechmooc.com', 'The B team', 1)
group = group_model.create_group('group-1-3@mechmooc.com', 'The C team', 1)
group = group_model.create_group('group-1-4@mechmooc.com', 'The D team', 1)
group = group_model.create_group('group-1-5@mechmooc.com', 'The E team', 1)
group = group_model.create_group('group-2-1@mechmooc.com', 'The A team', 2)
group = group_model.create_group('group-2-2@mechmooc.com', 'The B team', 2)
group = group_model.create_group('group-2-3@mechmooc.com', 'The C team', 2)
s_1_groups = group_model.get_groups('1')
self.assertIn('group-1-1@mechmooc.com', [group['address'] for group in s_1_groups])
def test_get_member_groups(self):
group = group_model.create_group('ateam@mechmooc.com', 'The A team', 1)
group_model.add_group_member(group['uri'], 'bob@mail.com')
group = group_model.get_group(group['uri'])
groups = group_model.get_member_groups('bob@mail.com')
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0], group)
|
sfu-natlang/HMM-Aligner
|
src/support/proc_no_tag_to_clean.py
|
Python
|
mit
| 510
| 0
|
content =\
[line.strip().split() for
|
line in open("ut_align_no_tag.a")]
f = open("ut_align_no_tag_clean.a", "w")
for line in content:
for entry in line:
if entry.find('?') != -1:
l, rs = entry.split('?')
rs = rs.split(',')
for r in rs:
f.write(l + '?' + r + " ")
else:
l, rs = entry.split('-')
rs = rs.split(',')
for r in rs:
f.write(l + '-' +
|
r + " ")
f.write("\n")
f.close()
|
jamesaud/se1-group4
|
jmatcher/job/migrations/0016_auto_20170411_0342.py
|
Python
|
mit
| 2,381
| 0.00252
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-11 03:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('students', '0007_auto_20170410_0523'),
('job', '0015_auto_20170410_0523'),
]
operations = [
migrations.CreateModel(
name='JobApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
|
('updated_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('country', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='jobapply',
name='job_id',
),
migrations.RemoveField(
model_name='jobapply',
name='user_id',
),
migrations.RemoveField(
model_name='job',
name='location',
),
migrations.AlterField(
model_name='job',
name='skills',
field=models.ManyToManyField(null=True, related_name='reqskills', to='users.Skill'),
),
migrations.DeleteModel(
name='JobApply',
),
migrations.AddField(
model_name='jobapplication',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='job.Job'),
),
migrations.AddField(
model_name='jobapplication',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Student'),
),
migrations.AddField(
model_name='job',
name='applications',
field=models.ManyToManyField(related_name='applications', through='job.JobApplication', to='students.Student'),
),
]
|
|
hortonworks/hortonworks-sandbox
|
apps/pig/src/pig/migrations/0006_auto__del_logs__add_field_job_status__add_field_job_email_notification.py
|
Python
|
apache-2.0
| 9,272
| 0.00701
|
# encoding: utf-8
# Licensed to Hortonworks, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Hortonworks, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Logs'
db.delete_table('pig_logs')
# Adding field 'Job.status'
db.add_column('pig_job', 'status', self.gf('django.db.models.fields.SmallIntegerField')(default=2), keep_default=False)
# Adding field 'Job.email_notification'
db.add_column('pig_job', 'email_notification', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True), keep_default=False)
# Deleting field 'PigScript.text'
db.delete_column('pig_pigscript', 'text')
# Deleting field 'PigScript.creater'
db.delete_column('pig_pigscript', 'creater_id')
# Adding field 'PigScript.pig_script'
db.add_column('pig_pigscript', 'pig_script', self.gf('django.db.models.fields.TextField')(default=' '), keep_default=False)
# Adding field 'PigScript.user'
db.add_column('pig_pigscript', 'user', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User']), keep_default=False)
# Adding field 'PigScript.saved'
db.add_column('pig_pigscript', 'saved', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
# Adding field 'PigScript.python_script'
db.add_column('pig_pigscript', 'python_script', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Changing field 'PigScript.date_created'
db.alter_column('pig_pigscript', 'date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True))
def backwards(self, orm):
# Adding model 'Logs'
db.create_table('pig_logs', (
('status', self.gf('django.db.models.fields.CharField')(max_length=1)),
('start_time', self.gf('django.db.models.fields.DateTimeField')()),
('end_time', self.gf('django.db.models.fields.DateTimeField')()),
('script_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('pig', ['Logs'])
# Deleting field 'Job.status'
db.delete_column('pig_job', 'status')
# Deleting field 'Job.email_notification'
db.delete_column('pig_job', 'email_notification')
# Adding field 'PigScript.text'
db.add_column('pig_pigscript', 'text', self.gf('django.db.models.fields.TextField')(default=' ', blank=True), keep_default=False)
# Adding field 'PigScript.creater'
db.add_column('pig_pigscript', 'creater', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User']), keep_default=False)
# Deleting field 'PigScript.pig_script'
db.delete_column('pig_pigscript', 'pig_script')
# Deleting field 'PigScript.user'
db.delete_column('pig_pigscript', 'user_id')
# Deleting field 'PigScript.saved'
db.delete_column('pig_pigscript', 'saved')
# Deleting field 'PigScript.python_script'
db.delete_column('pig_pigscript', 'python_script')
# Changing field 'PigScript.date_created'
db.alter_column('pig_pigscript', 'date_created', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pig.job': {
'Meta': {'object_name': 'Job'},
'emai
|
l_notification': ('django.db.models.fields.Boole
|
anField', [], {'default': 'True', 'blank': 'True'}),
'job_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pig.PigScript']"}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '2'}),
'statusdir': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pig.pigscript': {
'Meta': {'object_name': 'PigScript'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django
|
motobyus/moto
|
module_django/tokenauth/jwtTest/urls.py
|
Python
|
mit
| 277
| 0
|
# -*- coding: utf-8 -*-
from django.conf.urls import include, url
from rest_framework imp
|
ort routers
from jwtTest import views
router = routers.DefaultRouter()
ro
|
uter.register(r'management', views.ProductAViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
|
FedoraScientific/salome-paravis
|
test/VisuPrs/SWIG_scripts/B9.py
|
Python
|
lgpl-2.1
| 1,819
| 0.00055
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/SWIG_scripts/B9 case
from paravistest import datadir
from presentations import EntityType, ScalarMapOnField, CutPlanesOnField
import pvserver as paravis
import pvsimple
my_paravis = paravis.myParavis
# Get view
view1 = pvsimple.GetRenderView()
# Import MED file
med_file = datadir + "pointe.med"
field_name = "fieldnodedouble"
entity = EntityType.NODE
timestamp = 1
OpenDataFile(med_file)
med_reader = pvsimple.GetActiveSource()
# Create presentations
scalarmap = ScalarMapOnField(med_reader, entity, field_name, timestamp)
pvsimple.ResetCamera(view1)
view2 = pvsimple.CreateRenderView()
cutlines = CutPlanesOnField(med_reader, entity, field_name, timestamp)
pvsimple.ResetCamera(view2)
# Delete
source = cutlines.Input
pvsimple.Delete(source)
pv
|
simple.Delete(med_reader)
# Clear views from scalar bar and update views
for rview i
|
n pvsimple.GetRenderViews():
rview.Representations.Clear()
pvsimple.Render(rview)
|
GNS3/gns3-server
|
gns3server/controller/__init__.py
|
Python
|
gpl-3.0
| 21,528
| 0.002276
|
#!/usr/bin/env python
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import json
import uuid
import socket
import shutil
import aiohttp
from ..config import Config
from .project import Project
from .template import Template
from .appliance import Appliance
from .appliance_manager import ApplianceManager
from .template_manager import TemplateManager
from .compute import Compute, ComputeError
from .notification import Notification
from .symbols import Symbols
from ..version import __version__
from .topology import load_topology
from .gns3vm import GNS3VM
from ..utils.get_resource import get_resource
from .gns3vm.gns3_vm_error import GNS3VMError
import logging
log = logging.getLogger(__name__)
class Controller:
"""
The controller is responsible to manage one or more computes.
"""
def __init__(self):
self._computes = {}
self._projects = {}
self._notification = Notification(self)
self.gns3vm = GNS3VM(self)
self.symbols = Symbols()
self._ssl_context = None
self._appliance_manager = ApplianceManager()
self._template_manager = TemplateManager()
self._iou_license_settings = {"iourc_content": "",
"license_check": True}
self._config_loaded = False
self._config_file = Config.instance().controller_config
log.info("Load controller configuration file {}".format(self._config_file))
async def start(self):
log.info("Controller is starting")
self.load_base_files()
server_config = Config.instance().get_section_config("Server")
Config.instance().listen_for_config_changes(self._update_config)
host = server_config.get("host", "localhost")
port = server_config.getint("port", 3080)
# clients will use the IP they use to connect to
# the controller if console_host is 0.0.0.0
console_host = host
if host == "0.0.0.0":
host = "127.0.0.1"
name = socket.gethostname()
if name == "gns3vm":
name = "Main server"
computes = self._load_controller_settings()
from gns3server.web.web_server import WebServ
|
er
self._ssl_context = WebServer.instance(host=host, port=port).ssl_context()
protocol = server_config.get("protocol", "http")
if self._ssl_context and protocol != "https":
log.warning("Protocol changed to 'https' for local compute because SSL is enabled".format(port))
protocol = "https"
try:
self._local_server = await self.add_compute(compute_id="local",
|
name=name,
protocol=protocol,
host=host,
console_host=console_host,
port=port,
user=server_config.get("user", ""),
password=server_config.get("password", ""),
force=True,
ssl_context=self._ssl_context)
except aiohttp.web.HTTPConflict:
log.fatal("Cannot access to the local server, make sure something else is not running on the TCP port {}".format(port))
sys.exit(1)
for c in computes:
try:
await self.add_compute(**c)
except (aiohttp.web.HTTPError, KeyError):
pass # Skip not available servers at loading
try:
await self.gns3vm.auto_start_vm()
except GNS3VMError as e:
log.warning(str(e))
await self.load_projects()
await self._project_auto_open()
def ssl_context(self):
"""
Returns the SSL context for the server.
"""
return self._ssl_context
def _update_config(self):
"""
Call this when the server configuration file changes.
"""
if self._local_server:
server_config = Config.instance().get_section_config("Server")
self._local_server.user = server_config.get("user")
self._local_server.password = server_config.get("password")
async def stop(self):
log.info("Controller is stopping")
for project in self._projects.values():
await project.close()
for compute in self._computes.values():
try:
await compute.close()
# We don't care if a compute is down at this step
except (ComputeError, aiohttp.web.HTTPError, OSError):
pass
await self.gns3vm.exit_vm()
#self.save()
self._computes = {}
self._projects = {}
async def reload(self):
log.info("Controller is reloading")
self._load_controller_settings()
# remove all projects deleted from disk.
for project in self._projects.copy().values():
if not os.path.exists(project.path) or not os.listdir(project.path):
log.info(f"Project '{project.name}' doesn't exist on the disk anymore, closing...")
await project.close()
self.remove_project(project)
await self.load_projects()
await self._project_auto_open()
def check_can_write_config(self):
"""
Check if the controller configuration can be written on disk
:returns: boolean
"""
try:
os.makedirs(os.path.dirname(self._config_file), exist_ok=True)
if not os.access(self._config_file, os.W_OK):
raise aiohttp.web.HTTPConflict(text="Change rejected, cannot write to controller configuration file '{}'".format(self._config_file))
except OSError as e:
raise aiohttp.web.HTTPConflict(text="Change rejected: {}".format(e))
def save(self):
"""
Save the controller configuration on disk
"""
if self._config_loaded is False:
return
controller_settings = {"computes": [],
"templates": [],
"gns3vm": self.gns3vm.__json__(),
"iou_license": self._iou_license_settings,
"appliances_etag": self._appliance_manager.appliances_etag,
"version": __version__}
for template in self._template_manager.templates.values():
if not template.builtin:
controller_settings["templates"].append(template.__json__())
for compute in self._computes.values():
if compute.id != "local" and compute.id != "vm":
controller_settings["computes"].append({"host": compute.host,
"name": compute.name,
"port": compute.port,
"protocol": compute.protocol,
"user": compute.user,
"password": compute.password,
"compute_id": compute.id})
try:
os.makedirs(os.path.dirname(self
|
nddsg/SimpleDBMS
|
simple_dbms/comparison.py
|
Python
|
gpl-3.0
| 4,943
| 0.001618
|
import re
from conditional_expression import ConditionalExpression
from compare_term import CompareTerm
class Comparison(ConditionalExpression, object):
"""
A class that represents a comparison appearing in a WHERE clause.
"""
EQ = 0
NOTEQ = 1
LT = 2
GT = 3
LTEQ = 4
GTEQ = 5
LIKE = 6
CLIKE = 7
IS_NULL = 8
IS_NOT_NULL = 9
MINTYPE = 0
MAXTYPE = 9
def __init__(self, type, left, right):
"""
Constructs a Comparison object that represents a comparison involving
the specified operator and operands.
:param type: the type of comparison (i.e., the operator)
:param left: the left operand
:param right: the right operand (maybe null)
"""
super(Comparison, self).__init__(left, right)
self.type = type
self.left = left
self.right = right
# For LIKE and CLIKE, create a Pattern object for the regex.
if type == Comparison.LIKE or type == Comparison.CLIKE:
# Convert SQL regex to regex.
expr = self.right.get_value()
expr = expr.replace("%", ".*")
expr = expr.replace('_', '.')
if type == Comparison.LIKE:
self.regex = re.compile(expr)
else:
self.regex = re.compile(expr, re.IGNORECASE)
def get_left_term(self):
"""
Returns the left operand of the comparison represented by the called object
:return: the left operand
"""
return self.left
def get_right_term(self):
"""
Returns the right operand of the comparison represented by the called object
:return: the right operaand (null if it sis a IS_NULL or IS_NOT_NULL)
"""
return self.right
def get_type(self):
"""
Returns the type of the comparison represented by the called object
:return: the type of the comparison -- i.e., one of the constants
defined in this class (e.g., Comparison.EQ for equals)
"""
return self.type
def is_true(self):
"""
Evaluates the comparison represented by the called object, based on
the current values of the operands
:return: true if the comparison is true, and false if it is false
"""
left_val = self.left.get_value()
right_val = None
if self.right is not None:
right_val = self.right.get_value()
# To enable comparisons between integers and reals,
# we turn all Integers into Doubles.
if left_val is not None and isinstance(left_val, int):
left_val = float(left_val)
if right_val is not None and isinstance(right_val, int):
right_val = float(right_val)
# Comparisons with type mismatches are always false.
if left_val is not None and right_val is not None and \
left_val.__class__.__name__ != right_val.__class__.__name__:
return False
if self.left.get_val_type() == CompareTerm.INTEGER or self.left.get_val_type() == CompareTerm.REAL:
return self._compare_args(float(left_val), float(right_val))
elif self.left.get_val_type() == CompareTerm.STRING:
if right_val == None:
return self._compare_args(str(left_val), None)
else:
return self._compare_args(str(left_val), str(right_val))
else:
raise Exception("Unknown value type", self.left.get_val_type(), "for left side of comparison")
def _compare_args(self, left_arg, right_arg):
"""
A helper method that performs the actual comparison.
:param left_arg:
:param right_arg:
:return:
"""
if self.type is not Comparison.IS_NULL and self.type is not Comparison.IS_NOT_NULL and (
left_arg is None or right_arg i
|
s None):
return False
if self.type == Comparison.EQ:
return left_arg == right_arg
elif se
|
lf.type == Comparison.NOTEQ:
return left_arg != right_arg
elif self.type == Comparison.LT:
return left_arg < right_arg
elif self.type == Comparison.GT:
return left_arg > right_arg
elif self.type == Comparison.LTEQ:
return left_arg <= right_arg
elif self.type == Comparison.GTEQ:
return left_arg >= right_arg
elif self.type == Comparison.LIKE or self.type == Comparison.CLIKE:
left_str = str(left_arg)
if self.regex.match(left_str):
return True
else:
return False
elif self.type == Comparison.IS_NULL:
return left_arg is None
elif self.type == Comparison.IS_NOT_NULL:
return left_arg is not None
else:
raise Exception("Unknown comparison type")
|
bitcraft/firmata_aio
|
firmata_aio/protocol/commands.py
|
Python
|
gpl-3.0
| 1,998
| 0.001001
|
"""
Define command names and prove command/code mappings
"""
from collections import ChainMap
__all__ = [
'nibble_commands',
'byte_commands',
'sysex_commands',
'command_lookup',
'command_names',
]
INPUT, OUTPUT, ANALOG, \
PWM, SERVO, I2C, ONEWIRE, \
STEPPER, ENCODER = range(0, 9)
# do not combine names and packet structure:
# packets sometimes share same name and code, but have different
# structure depending on the origin (firmata or client)
# do not combine them: their membership to a particular
# group defines the packet structure that builds them
nibble_commands = {
0xE0: ('analog_io_message', ('pin', 'value')),
0x90: ('digital_io_message', ('port', 'value')),
0xC0: ('report_analog_pin', ('pin', 'value')),
0xD0: ('report_digital_port', ('port', 'value')),
}
byte_commands = {
0xF0: ('start_sysex', ()),
0xF4: ('set_pin_mode', ('pin', 'mode')),
0xF5: ('set_digital_pin_value', ('pin', 'value')),
0xF7: ('stop_sysex', ()),
0xF9: ('protocol_version', ('major', 'minor'))
}
sysex_commands = {
0x61: ('encoder_data', ()),
|
0x69: ('analog_mapping_query', ()),
0x6A: ('analog_mapping_response', ()),
0x6B: ('capability_query
|
', ()),
0x6C: ('capability_response', ()),
0x6D: ('pin_state_query', ()),
0x6E: ('pin_state_response', ()),
0x6F: ('extended_analog', ()),
0x70: ('servo_config', ()),
0x71: ('string_data', ()),
0x72: ('stepper_data', ()),
0x73: ('onewire_data', ()),
0x75: ('shift_data', ()),
0x76: ('i2c_request', ()),
0x77: ('i2c_reply', ()),
0x78: ('i2c_config', ()),
0x79: ('report_firmware', ()),
0x7A: ('sampling_interval', ()),
0x7B: ('scheduler_data', ()),
0x7E: ('sysex_non_realtime', ()),
0x7F: ('sysex_realtime', ()),
}
# Code => Name mapping for all types
command_names = ChainMap(nibble_commands, byte_commands, sysex_commands)
# Name => Code mapping for all types
command_lookup = {v[0]: k for k, v in command_names.items()}
|
pisskidney/leetcode
|
easy/27.py
|
Python
|
mit
| 449
| 0
|
#!/usr/bin/python
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: in
|
t
:rtype: int
"""
i = 0
j = 0
while i < len(nums):
if nums[i] != val:
nums[j] = nums[i]
j += 1
i += 1
|
return j
s = Solution()
x = [3, 3, 3, 3, 3, 3, 3]
print x
print s.removeElement(x, 3)
print x
|
RichardLMR/generic-qsar-py-utils
|
code/ml_input_utils.py
|
Python
|
gpl-2.0
| 21,062
| 0.033995
|
#########################################################################################################
# ml_input_utils.py
# One of the Python modules written as part of the genericQSARpyUtils project (see below).
#
# ################################################
# #ml_input_utils.py: Key documentation :Contents#
# ################################################
# #1. Overview of this project.
# #2. IMPORTANT LEGAL ISSUES
# #<N.B.: Check this section ("IMPORTANT LEGAL ISSUES") to see whether - and how - you ARE ALLOWED TO use this code!>
# #<N.B.: Includes contact details.>
# ##############################
# #1. Overview of this project.#
# ##############################
# #Project name: genericQSARpyUtils
# #Purpose of this project: To provide a set of Python functions
# #(or classes with associated methods) that can be used to perform a variety of tasks
# #which are relevant to generating input files, from cheminformatics datasets, which can be used to build and
# #validate QSAR models (generated using Machine Learning methods implemented in other software packages)
# #on such datasets.
# #To this end, two Python modules are currently provided.
# #(1) ml_input_utils.py
# #Defines the following class:
# #descriptorsFilesProcessor: This contains methods which can be used to prepare datasets in either CSV or svmlight format, including converting between these formats, based upon previously calculated fingerprints (expressed as a set of tab separated text strings for each instance) or numeric descriptors.
# #(2) ml_functions.py
# #Defines a set of functions which can be used to carry out univariate feature selection,cross-validation etc. for Machine Learning model input files in svmlight format.
# ###########################
# #2. IMPORTANT LEGAL ISSUES#
# ###########################
# Copyright Syngenta Limited 2013
#Copyright (c) 2013-2015 Liverpool John Moores University
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
# THIS PROGRAM IS MADE AVAILABLE FOR DISTRIBUTION WITHOUT ANY FORM OF WARRANTY TO THE
# EXTENT PERMITTED BY APPLICABLE LAW. THE COPYRIGHT HOLDER PROVIDES THE PROGRAM \"AS IS\"
# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM LIES
# WITH THE USER. SHOULD THE PROGRAM PROVE DEFECTIVE IN ANY WAY, THE USER ASSUMES THE
# COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. THE COPYRIGHT HOLDER IS NOT
# RESPONSIBLE FOR ANY AMENDMENT, MODIFICATION OR OTHER ENHANCEMENT MADE TO THE PROGRAM
# BY ANY USER WHO REDISTRIBUTES THE PROGRAM SO AMENDED, MODIFIED OR ENHANCED.
# IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL THE
# COPYRIGHT HOLDER BE LIABLE TO ANY USER FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
# INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
# PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
# OR LOSSES SUSTAINED BY THE USER OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO
# OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGES.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# ####################
# See also: http://www.gnu.org/licenses/ (last accessed 14/01/2013)
# Contact:
# 1. richard.marchese_robinson@syngenta.com
# or if this fails
# 2. rmarcheserobinson@gmail.com
# #####################
#########################################################################################################
#<N.B.: All file name manipulation supposes this code is running under Windows!>
import re,os,itertools,sys,csv
from collections import defaultdict #Assumption: Python version >= 2.5
import functools
import pybel
class descriptorsFilesProcessor():
def __init__(self):
pass
def match_ids_to_string_fp_features(self,string_fp_file,jCompoundMapperStringFeatures=False):
id2string_fp_features = {} #N.B.: For now, we will only compute binary descriptors based upon feature occurence => only the set of unique features per compound is required!
f_in = open(string_fp_file)
try:
lines = [LINE.replace('\n','') for LINE in f_in.readlines()]
assert not 0 == len(lines), " Fingerprints file is empty???"
del LINE
finally:
f_in.close()
del f_in
for LINE in lines:
if jCompoundMapperStringFeatures:
ID = re.sub('(_INDEX=[0-9]+)','',LINE.split('\t')[0])
features = list(set([re.sub('(\:1$)','',raw_feat) for raw_feat in LINE.split('\t')[1:]]))
else:
ID = LINE.split('\t')[0]
features = list(set([raw_feat for raw_feat in LINE.split('\t')[1:]]))
features.sort() #15/01/13:new line inserted
id2string_fp_features[ID] = features
del LINE
#assert len(id2string_fp_features) == len(lines), " Duplicate IDs???" #-Better handled within script body - can call utility function to identify which IDs are duplicated!
return id2string_fp_features
def match_all_unique_features_to_indices(self,id2features,feat2IndexFileName='feat2Index.csv'):
feat2Exists = defaultdict(bool) #is this a faster way to get all unique features than simply building up a list and then applying list(set(built_up_list))?
for id in id2features:
for FEATURE in id2features[id]:
feat2Exists[FEATURE] = True
del id
del FEATURE
feat2Index = defaultdict(int) #values should default to zero - a pre-requisite for this function and convert_id2features_to_svm_light_format_descriptors_file(...)!
#for FEATURE in feat2Exists.keys(): ###15/01/13: commented out
features = feat2Exists.keys() #15/01/13:new line inserted
features.sort() #15/01/13:new line inserted
feat_count = 0 #15/01/13:new line inserted
for FEATURE in features: #15/01/13:new line inserted
#feat2Index[FEATURE] += range(1,len(feat2Exists.keys())+1)[feat2Exists.keys().index(FEATURE)] ###15/01/13: commented out
feat_count += 1 #15/01/13:new line inserted
feat2Index[FEATURE] = feat_count #15/01/13:new line inserted
del FEATURE
del feat_count #15/01/13:new line inserted
#############################################################################################
#Record the correspondence between features and indices for subsequent model intepretation###
#############################################################################################
f_out = open(feat2IndexFileName,'w')
try:
f_out.write('Feature(Quoted),Index\n') #Quoting should make it possible to inspect this file in Excel...
for FEATURE in feat2Index:
f_out.write('"%s",%d\n' % (FEATURE,feat2Index[FEATURE]))
finally:
f_out.close()
del f_out
#############################################################################################
return feat2Index
def generate_molId2DescId2DescValue_from_raw_fp_file(self,raw_fp_file,iSjCompoundMapperStringFeatures=False,unique_features_file=None):
'''
generate_molId2DescId2DescValue_from_raw_fp_file(raw_fp_file,iSjCompoundMapperStringFeatures=False,unique_features_file=None)
(1) raw_fp_file :
Must h
|
ave the following structure to each line:
molId\tFeatureB\tFeatureC\tFeatureA\tFeatureX....
Must - for now! - have a .txt extension!
(2) unique_features_file :
Must have the same format as feat2IndexFileName (see contents of self.match_all_unique_features_to_indices(...).
'''
id2string_fp_features = self.match_ids_to_string_f
|
p_features(raw_fp_file,iSjCompoundMapperStringFeatures)
if unique_features_file is None:
feat2IndexFileName = re.sub('(\.txt$)','_fpFeat2InitialIndex.csv',raw_fp_file)#17/03/13: actually, it is useful to write this to the same directory as the fingerprints file! => Hopefully any associated errors can be dealt with!#.split
|
jkbrzt/django-settings-export
|
tests/settings.py
|
Python
|
bsd-3-clause
| 688
| 0
|
SECRET_KEY = 'spam'
D
|
EBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_URLCONF = 'tests.urls'
INSTALLED_APPS = ['tests']
DATABASES = {'default': {'NAME': 'db.sqlite',
'ENGINE': 'django.db.backends.sqlite3'}}
# Django < 1.8
TEM
|
PLATE_CONTEXT_PROCESSORS = [
'django_settings_export.settings_export'
]
# Django 1.8+
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django_settings_export.settings_export',
],
},
},
]
FOO = 'foo'
BAR = 'bar'
SETTINGS_EXPORT = [
'FOO',
'BAR',
]
|
ojarva/home-info-display
|
tea_reader_consumer/run.py
|
Python
|
bsd-3-clause
| 1,217
| 0.002465
|
from local_settings import BASE_URL
import datetime
import json
import redis
import requests
import requests.exceptions
class TeaReaderConsumer(object):
def __init__(self):
self.redis = redis.StrictRedis()
def run(self):
pubsub = self.redis.pubsub(ignore_subscribe_messages=True)
pubsub.subscribe("tea-reader-pubsub")
for message in pubsub.listen():
try:
data = json.loads(message["data"])
except (ValueError, TypeError) as err:
print "Failed to decode redis data: %s" % err
continue
resp = requests.get(BASE_URL + "tea/get/" + data["id"])
print resp.content
if resp.status_code != 200:
print "Getting details for %s failed: %s" % (data["id"], resp.status_code)
continue
tag_data = resp.json()
if tag_data["fields"]["boil
|
_water"]:
self.redis.publish("kettle-commands", json.dumps({"on": tag_data["fields"]["boil_water"]}))
|
requests.post(BASE_URL + "tea/get/" + data["id"])
def main():
runner = TeaReaderConsumer()
runner.run()
if __name__ == '__main__':
main()
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamtvsupertuga/lib/resources/lib/sources/en/glodls.py
|
Python
|
gpl-2.0
| 5,797
| 0.014145
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy
import re, urllib, urlparse
from resources.lib.modules import debrid
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['glodls.to']
self.base_link = 'https://glodls.to/'
|
self.tvsearch = 'search_results.php?search={0}&cat=41&incldead=0&inclexternal=0&lang=1&sort=seeders&order=desc'
self.moviesearch = 'search_results.php?search={0}&cat=1&incldead=0&inclexternal=0&lang=1&sort=size&order=desc'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except
|
BaseException:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except BaseException:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url is None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except BaseException:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url is None:
return sources
if debrid.status() is False:
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
if 'tvshowtitle' in data:
url = self.tvsearch.format(urllib.quote_plus(query))
url = urlparse.urljoin(self.base_link, url)
else:
url = self.moviesearch.format(urllib.quote_plus(query))
url = urlparse.urljoin(self.base_link, url)
items = self._get_items(url)
hostDict = hostDict + hostprDict
for item in items:
try:
name = item[0]
quality, info = source_utils.get_release_quality(name, name)
info.append(item[2])
info = ' | '.join(info)
url = item[1]
url = url.split('&tr')[0]
sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
'direct': False, 'debridonly': True})
except BaseException:
pass
return sources
except BaseException:
return sources
def _get_items(self, url):
items = []
try:
headers = {'User-Agent': client.agent()}
r = client.request(url, headers=headers)
posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'})
posts = [i for i in posts if not 'racker:' in i]
for post in posts:
data = client.parseDOM(post, 'a', ret='href')
url = [i for i in data if 'magnet:' in i][0]
name = client.parseDOM(post, 'a', ret='title')[0]
t = name.split(self.hdlr)[0]
if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
try:
y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
except BaseException:
y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
if not y == self.hdlr: continue
try:
size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
div = 1 if size.endswith('GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
size = '%.2f GB' % size
except BaseException:
size = '0'
items.append((name, url, size))
return items
except BaseException:
return items
def resolve(self, url):
return url
|
rdhyee/osf.io
|
website/addons/github/tests/test_models.py
|
Python
|
apache-2.0
| 12,663
| 0.001264
|
# -*- coding: utf-8 -*-
import mock
import unittest
from nose.tools import * # noqa
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase, get_default_metaschema
from tests.factories import ExternalAccountFactory, ProjectFactory, UserFactory
from framework.auth import Auth
from website.addons.github.exceptions import NotFoundError
from website.addons.github import settings as github_settings
from website.addons.github.model import GitHubUserSettings
from website.addons.github.model import GitHubNodeSettings
from website.addons.github.tests.factories import (
GitHubAccountFactory,
GitHubNodeSettingsFactory,
GitHubUserSettingsFactory
)
from website.addons.base.testing import models
from .utils import create_mock_github
mock_github = create_mock_github()
class TestNodeSettings(models.OAuthAddonNodeSettingsTestSuiteMixin, OsfTestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = GitHubAccountFactory
NodeSettingsFactory = GitHubNodeSettingsFactory
NodeSettingsClass = GitHubNodeSettings
UserSettingsFactory = GitHubUserSettingsFactory
## Mixin Overrides ##
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'repo': 'mock',
'user': 'abc',
'owner': self.node
}
def test_set_folder(self):
# GitHub doesn't use folderpicker, and the nodesettings model
# does not need a `set_repo` method
pass
def test_serialize_settings(self):
# GitHub's serialized_settings are a little different from
# common storage addons.
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'owner': self.node_settings.user, 'repo': self.node_settings.repo}
assert_equal(settings, expected)
@mock.patch(
'website.addons.github.model.GitHubUserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_complete_has_auth_not_verified(self):
super(TestNodeSettings, self).test_complete_has_auth_not_verified()
@mock.patch('website.addons.github.api.GitHubClient.repos')
@mock.patch('website.addons.github.api.GitHubClient.my_org_repos')
def test_to_json(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
super(TestNodeSettings, self).test_to_json()
@mock.patch('website.addons.github.api.GitHubClient.repos')
@mock.patch('website.addons.github.api.GitHubClient.my_org_repos')
def test_to_json_user_is_owner(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
result = self.node_settings.to_json(self.user)
assert_true(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_true(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), [])
@mock.patch('website.addons.github.api.GitHubClient.repos')
@mock.patch('website.addons.github.api.GitHubClient.my_org_repos')
def test_to_json_user_is_not_owner(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
not_owner = UserFactory()
result = self.node_settings.to_json(not_owner)
assert_false(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_false(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), None)
class TestUserSettings(models.OAuthAddonUserSettingTestSuiteMixin, OsfTestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = GitHubAccountFactory
def test_public_id(self):
assert_equal(self.user.external_accounts[0].display_name, self.user_settings.public_id)
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.external_account = GitHubAccountFactory()
self.project.creator.external_accounts.append(self.external_account)
self.project.creator.save()
self.node_settings = self.project.get_addon('githu
|
b')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-At
|
tack'
self.node_settings.external_account = self.external_account
self.node_settings.save()
self.node_settings.set_auth
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('website.addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove
|
vmprof/vmprof-server
|
webapp/wsgi.py
|
Python
|
mit
| 280
| 0
|
# import os
# os.environ.setde
|
fault("DJANGO_SETTINGS_MODULE", "settings")
# from vmprof import DjangoVMPROF
# vmprof = DjangoVMPROF("localhost", 8000, "token")
# app = vmprof(get_wsgi_application())
from django.core.wsgi import get_wsgi_application
app = get_wsgi_application
|
()
|
dropbox/pep8squad
|
yapf/yapflib/subtype_assigner.py
|
Python
|
apache-2.0
| 10,019
| 0.007286
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subtype assigner for lib2to3 trees.
This module assigns extra type information to the lib2to3 trees. This
information is more specific than whether something is an operator or an
identifier. For instance, it can specify if a node in the tree is part of a
subscript.
AssignSubtypes(): the main function exported by this module.
Annotations:
subtype: The subtype of a pytree token. See 'format_token' module for a list
of subtypes.
"""
from lib2to3 import pytree
from yapf.yapflib import format_token
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
def AssignSubtypes(tree):
"""Run the subtype assigner visitor over the tree, modifying it in place.
Arguments:
tree: the top-level pytree node to annotate with subtypes.
"""
subtype_assigner = _SubtypeAssigner()
subtype_assigner.Visit(tree)
# Map tokens in argument lists to their respective subtype.
_ARGLIST_TOKEN_TO_SUBTYPE = {
'=': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
'*': format_token.Subtype.VARARGS_STAR,
'**': format_token.Subtype.KWARGS_STAR_STAR,
}
class _SubtypeAssigner(pytree_visitor.PyTreeVisitor):
"""_SubtypeAssigner - see file-level docstring for detailed description.
The subtype is added as an annotation to the pytree token.
"""
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [',']))
dict_maker = (len(node.children) > 1 and isinstance(
node.children[1], pytree.Leaf) and node.children[1].value == ':')
last_was_comma = False
for child in node.children:
self.Visit(child)
if pytree_utils.NodeName(child) == 'comp_for':
self._SetFirstLeafTokenSubtype(child,
format_token.Subtype.DICT_SET_GENERATOR)
else:
if dict_maker and last_was_comma:
self._SetFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_KEY)
last_was_comma = isinstance(child, pytree.Leaf) and child.value == ','
def Visit_expr_stmt(self, node): # pylint: disable=invalid-name
# expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist)
# | ('=' (yield_expr|testlist_star_expr))*)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
self._SetTokenSubtype(child, format_token.Subtype.ASSIGN_OPERATOR)
def Visit_or_test(self, node): # pylint: disable=invalid-name
# or_test ::= and_test ('or' and_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'or':
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_test(self, node): # pylint: disable=invalid-name
# and_test ::= not_test ('and' not_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'and':
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'not':
self._SetTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
# comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not'
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and child.value in {
'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'not in', 'is', 'is not'
}):
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_star_expr(self, node): # pylint: disable=invalid-name
# star_expr ::= '*' expr
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '*':
self._SetTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_expr(self, node): # pylint: disable=invalid-name
# expr ::= xor_expr ('|' xor_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '|':
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_xor_expr(self, node): # pylint: disable=invalid-name
# xor_expr ::= and_expr ('^' and_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '^':
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_expr(self, node): # pylint: disable=invalid-name
# and_expr ::= shift_expr ('&' shift_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '&':
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_shift_expr(self, node): # pylint: disable=invalid-name
# shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}:
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-':
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_term(self, node): # pylint: disable=invalid-name
# term ::= factor (('*'|'/'|'%'|'//') factor)*
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'*', '/', '%', '//'}):
|
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_factor(self, node): # pylint: disable=invalid-name
# factor ::= ('+'|'-'|'~') factor | power
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-~':
|
self._SetTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_power(self, node): # pylint: disable=invalid-name
# power ::= atom trailer* ['**' factor]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '**':
self._SetTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
self._SetTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_sliceop(self, node): # pylint: disable=invalid-name
# sliceop ::= ':' [test]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
self._SetTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::=
# test [comp_for] | test '=' test
self._ProcessArgLists(node)
def Visit_arglist(self,
|
google/gazoo-device
|
gazoo_device/utility/retry.py
|
Python
|
apache-2.0
| 3,404
| 0.004994
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function call retry utility."""
import time
from typing import Any, Callable, Mapping, Optional, Sequence, Type, TypeVar
from gazoo_device import errors
def _default_is_successful(_: Any) -> bool:
return True
def not_func(val: Any) -> bool:
"""Returns True if bool(val) evaluates to False."""
return not bool(val)
def is_true(val: Any) -> bool:
"""Returns True if bool(val) evaluates to True."""
return bool(val)
_FuncReturnType = TypeVar("_FuncReturnType")
def retry(
func: Callable[..., _FuncReturnType],
func_args: Sequence[Any] = (),
func_kwargs: Optional[Mapping[str, Any]] = None,
is_successful: Callable[[_FuncReturnType], bool] = _default_is_successful,
timeout: float = 10,
interval: float = 1,
reraise: bool = True,
exc_type: Type[Exception] = errors.CommunicationTimeoutError
) -> _FuncReturnType:
"""Retries func() until it succeeds or timeout is reached.
Success of execution of func() is determined by is_successful() function,
which should return True on successful execution of func().
Args:
func: Function to execute.
func_args: Positional arguments to the function.
func_kwargs: Keywor
|
d arguments to the function.
is_successful: Function which takes in the result of func() and returns
whether function execution should be considered successful. To indicate
success, return True. Defaults to always returning True.
timeout: If no run of func() succeeds in this time period, raise an error.
interval: How long to wait between attempts of func().
reraise: Whether to re-raise exceptions in func() or not. If True, will
re-raise any exception
|
s from func(). If False, considers execution of
func() a failure if an Exception is raised. is_successful() will NOT be
called if an Exception occurs.
exc_type: Type of exception to raise when timeout is reached. Note that the
class constructor will be called with just 1 argument.
Returns:
Return value of first successful func() call.
Raises:
Exception: if timeout is reached, or if an Exception occurs in func() with
reraise=True.
"""
if func_kwargs is None:
func_kwargs = {}
tried_times = 0
start_time = time.time()
end_time = start_time + timeout
while time.time() < end_time:
exception_occurred = False
tried_times += 1
try:
func_result = func(*func_args, **func_kwargs)
except Exception: # pylint: disable=broad-except
if reraise:
raise
else:
exception_occurred = True
if not exception_occurred and is_successful(func_result):
return func_result
time.sleep(interval)
time_elapsed = time.time() - start_time
raise exc_type("Timeout in {}s. Tried calling {} {} times.".format(
time_elapsed, func.__name__, tried_times))
|
resmo/cloudstack
|
test/integration/plugins/nuagevsp/test_nuage_sharednetwork_vpc_vm_monitor.py
|
Python
|
apache-2.0
| 30,062
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Component tests for Shared Network functionality with Nuage VSP SDN plugin:
VPC Virtual Machine Monitoring
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.utils import cleanup_resources, validateList
from marvin.lib.base import (VPC,
Account,
Domain,
User,
VirtualMachine,
Network,
NetworkOffering,
VpcOffering)
from marvin.lib.common import list_virtual_machines
from marvin.codes import PASS
# Import System modules
from nose.plugins.attrib import attr
class TestNuageSharedNetworkVpcVmMonitor(nuageTestCase):
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are required for
executing Nuage VSP SDN plugin test cases for shared networks:
Under ROOT - create domain D1
Under domain D1 - Create two subdomains D11 and D12
Under each of the domains - create one admin user and couple of
regular users.
Create shared network with the following scope:
1. Network with scope="all"
2. Network with scope="domain" with no subdomain access
3. Network with scope="domain" with subdomain access
4. Network with scope="account"
"""
super(TestNuageSharedNetworkVpcVmMonitor, cls).setUpClass()
cls.sharednetworkdata = cls.test_data["acl"]
cls.nuagenetworkdata = cls.test_data["nuagevsp"]
cls.domain_1 = None
cls.domain_2 = None
try:
# backup default apikey and secretkey
cls.default_apikey = cls.api_client.connection.apiKey
cls.default_secretkey = cls.api_client.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain1"]
)
cls.domain_11 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_12 = Domain.create(
cls.api_client,
cls.sharednetworkdata["domain12"],
parentdomainid=cls.domain_1.id
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD1B"],
|
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.a
|
ccount_d11 = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.api_client,
cls.sharednetworkdata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.api_client, cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account and admin account in "ROOT" domain
cls.account_roota = Account.create(
cls.api_client,
cls.sharednetworkdata["accountROOTA"],
admin=False,
)
user = cls.generateKeysForUser(cls.api_client, cls.account_roota)
cls.user_roota_apikey = user.apikey
cls.user_roota_secretkey = user.secretkey
cls.account_root = Account.create(
cls.api_client,
cls.sharednetworkdata["accountROOTA"],
admin=True,
)
user = cls.generateKeysForUser(cls.api_client, cls.account_root)
cls.user_root_apikey = user.apikey
cls.user_root_secretkey = user.secretkey
# service offering is already created in Nuagetestcase
cls.sharednetworkdata['mode'] = cls.zone.networktype
# As admin user , create shared network with scope "all", "domain"
# with subdomain access , "domain" without subdomain access and
# "account"
cls.api_client.connection.apiKey = cls.default_apikey
cls.api_client.connection.securityKey = cls.default_secretkey
cls.shared_network_offering = NetworkOffering.create(
cls.api_client,
cls.test_data["nuagevsp"]["shared_nuage_network_offering"],
conservemode=False
)
# Enable Network offering
cls.shared_network_offering.update(cls.api_client, state='Enabled')
cls.shared_network_offering_id = cls.s
|
tornadomeet/mxnet
|
python/mxnet/contrib/__init__.py
|
Python
|
apache-2.0
| 1,006
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
|
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS"
|
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Experimental contributions"""
from . import symbol
from . import ndarray
from . import symbol as sym
from . import ndarray as nd
from . import autograd
from . import tensorboard
from . import text
|
oblalex/django-workflow
|
src/workflow/__init__.py
|
Python
|
mit
| 58
| 0
|
"""
Transactional workflow
|
control for Django models.
"""
|
|
globus/globus-cli
|
src/globus_cli/commands/_common.py
|
Python
|
apache-2.0
| 2,720
| 0.000735
|
import sys
import click
from globus_cli.termio import FORMAT_SILENT, formatted_print
from ..services.transfer import CustomTransferClient
def transfer_task_wait_with_io(
transfer_client: CustomTransferClient,
meow,
heartbeat,
polling_interval,
timeout,
task_id,
timeout_exit_code,
) -> None:
"""
Options are the core "task wait" options, including the `--meow` easter
egg.
This does the core "task wait" loop, including all of the IO.
It *does exit* on behalf of the caller. (We can enhance with a
`noabort=True` param or somesuch in the future if necessary.)
"""
def timed_out(waited_time):
if timeout is None:
return False
else:
return waited_time >= timeout
def check_completed():
completed = transfer_client.task_wait(
task_id, timeout=polling_interval, polling_interval=polling_interval
)
if completed:
if heartbeat:
click.echo("", err=True)
# meowing tasks wake up!
if meow:
click.echo(
r"""
_..
/}_{\ /.-'
( a a )-.___...-'/
==._.== ;
\ i _..._ /,
{_;/ {_//""",
err=True,
|
)
# TODO: possibly update TransferClient.task_wait so that we don't
# need to do an extra fetch to get the
|
task status after completion
res = transfer_client.get_task(task_id)
formatted_print(res, text_format=FORMAT_SILENT)
status = res["status"]
if status == "SUCCEEDED":
click.get_current_context().exit(0)
else:
click.get_current_context().exit(1)
return completed
# Tasks start out sleepy
if meow:
click.echo(
r"""
|\ _,,,---,,_
/,`.-'`' -. ;-;;,_
|,4- ) )-,_..;\ ( `'-'
'---''(_/--' `-'\_)""",
err=True,
)
waited_time = 0
while not timed_out(waited_time) and not check_completed():
if heartbeat:
click.echo(".", err=True, nl=False)
sys.stderr.flush()
waited_time += polling_interval
# add a trailing newline to heartbeats if we fail
if heartbeat:
click.echo("", err=True)
exit_code = 1
if timed_out(waited_time):
click.echo(f"Task has yet to complete after {timeout} seconds", err=True)
exit_code = timeout_exit_code
# output json if requested, but nothing for text mode
res = transfer_client.get_task(task_id)
formatted_print(res, text_format=FORMAT_SILENT)
click.get_current_context().exit(exit_code)
|
alphagov/digitalmarketplace-api
|
migrations/versions/900_add_brief_is_a_copy.py
|
Python
|
mit
| 461
| 0.006508
|
"""Add Brief.is_a_copy boolean, default False, nullable False
Revision ID: 890
Revises: 880
Create
|
Date: 2017-06-01 11:24:53.346954
"""
# revision identifiers, used by Alembic.
revision = '900'
down_revision = '890'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('briefs', sa.Column('is_a_copy', sa.Boolean(), server_default=sa.text(u'false'), nullable=False)
|
)
def downgrade():
op.drop_column('briefs', 'is_a_copy')
|
cryptapus/electrum-uno
|
lib/pem.py
|
Python
|
mit
| 6,584
| 0.003493
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses code from TLSLlite
# TLSLite Author: Trevor Perrin)
import binascii
from x509 import ASN1_Node
def a2b_base64(s):
try:
b = bytearray(binascii.a2b_base64(s))
except Exception as e:
raise SyntaxError("base64 error: %s" % e)
return b
def b2a_base64(b):
return binascii.b2a_base64(b)
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(p
|
refix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input n
|
ame string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example:
" Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK
BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK
BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK
BREAK SIG----- "
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = ""
while s1:
s2 += s1[:64] + "\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name) + s2 + \
("-----END %s-----\n" % name)
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
def parse_private_key(s):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return _parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return _parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
def _parsePKCS8(bytes):
s = ASN1_Node(str(bytes))
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID_node = s.next_node(version_node)
ii = s.first_child(rsaOID_node)
rsaOID = decode_OID(s.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
if rsaOID != '1.2.840.113549.1.1.1':
raise SyntaxError("Unrecognized AlgorithmIdentifier")
privkey_node = s.next_node(rsaOID_node)
value = s.get_value_of_type(privkey_node, 'OCTET STRING')
return _parseASN1PrivateKey(value)
def _parseSSLeay(bytes):
return _parseASN1PrivateKey(ASN1_Node(str(bytes)))
def bytesToNumber(s):
return int(binascii.hexlify(s), 16)
def _parseASN1PrivateKey(s):
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = s.next_node(version_node)
e = s.next_node(n)
d = s.next_node(e)
p = s.next_node(d)
q = s.next_node(p)
dP = s.next_node(q)
dQ = s.next_node(dP)
qInv = s.next_node(dQ)
return map(lambda x: bytesToNumber(s.get_value_of_type(x, 'INTEGER')), [n, e, d, p, q, dP, dQ, qInv])
|
malept/js-sphinx-inventory
|
sphinx_inventory/js/mdn.py
|
Python
|
apache-2.0
| 1,705
| 0
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import json
import logging
from ._compat import ElementTree, urlopen
MDN_SITEMAP = 'https://developer.mozilla.org/sitemaps/en-US/sitemap.xml'
SITEMAP_NS = 'http://www.sitemaps.org/schemas/sitemap/0.9'
log = logging.getLogger(__name__)
def parse():
"""
Generate a cross-reference dictionary for the MDN JavaScript Reference.
:rtype: dict
"""
with urlopen(MDN_SITEMAP) as f:
xml = ElementTree.parse(f)
refs = defaultdict(dict)
for loc in xml.iterfind('{{{ns}}}url/{{{ns}}}loc'.format(ns=SITEMAP_NS)):
url = loc.text
if 'JavaScript/Reference/Global_Objects/' not in url:
continue
url_su
|
ffix = url[81:]
parts = url_suffix.split('/')
if len(parts) == 1:
name = parts[0]
if name[0].isupper():
ref_type = 'class'
else:
ref_type = 'data'
|
elif len(parts) == 2:
cls, attr = parts
with urlopen('{url}$json'.format(url=url)) as f:
metadata = json.loads(f.read().decode('utf-8'))
name = '{0}.{1}'.format(cls, attr)
if 'Method' in metadata['tags']:
ref_type = 'function'
elif 'Property' in metadata['tags']:
ref_type = 'attribute'
else:
fmt = 'Unknown ref_type for {0}. Tags: {1}'
log.warning(fmt.format(url, ', '.join(metadata['tags'])))
continue
else:
log.warning('Skipping URL (too many parts): {0}'.format(url))
continue
refs[ref_type][name] = url_suffix
return dict(refs)
|
vintasoftware/tapioca-mandrill
|
testing.py
|
Python
|
bsd-3-clause
| 299
| 0
|
from decouple import config
from tapioca_mandrill import Mandrill
from tapioca.exceptions import TapiocaException
api = Mandrill(key=config('KEY'))
try:
r = api.users(method='ping').post()
except TapiocaException as e:
print e.c
|
lient().data()
print e.client().respon
|
se().status_code
|
sjohannes/exaile
|
plugins/amazoncovers/__init__.py
|
Python
|
gpl-2.0
| 2,845
| 0.000351
|
# Copyright (C) 2006 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 1, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import time
from xl import common, covers, event, providers, settings
import _ecs as ecs
import amazonprefs
logger = logging.getLogger(__name__)
AMAZON = None
USER_AGENT = None
def enable(exaile):
if exaile.loading:
event.add_callback(_enable, "exaile_loaded")
else:
_enable(None, exaile, None)
def _enable(eventname, exaile, nothing):
global AMAZON, USER_AGENT
USER_AGENT = exaile.get_user_agent_string('amazoncovers')
AMAZON = AmazonCoverSearch()
providers.register('covers', AMAZON)
def disable(exaile):
providers.unregister('covers', AMAZON)
def get_preferences_pane():
return amazonprefs
class AmazonCoverSearch(covers.CoverSearchMethod):
"""
Searches amazon for an album cover
"""
name = 'amazon'
title = 'Amazon
|
'
def __init__(self):
self.starttime = 0
def find_covers(self, track, limit=-1):
"""
Searches amazon for album covers
"""
try:
artist = track.get_tag_raw('artist')[0]
album = track.get_tag_raw('album')[0]
except (AttributeError, TypeError):
return
|
[]
# get the settings for amazon key and secret key
api_key = settings.get_option('plugin/amazoncovers/api_key', '')
secret_key = settings.get_option('plugin/amazoncovers/secret_key', '')
if not api_key or not secret_key:
logger.warning(
'Please enter your Amazon API and secret '
'keys in the Amazon Covers preferences'
)
return []
# wait at least 1 second until the next attempt
waittime = 1 - (time.time() - self.starttime)
if waittime > 0:
time.sleep(waittime)
self.starttime = time.time()
search = "%s - %s" % (artist, album)
try:
albums = ecs.search_covers(search, api_key, secret_key, USER_AGENT)
except ecs.AmazonSearchError:
return []
return albums
def get_cover_data(self, url):
return common.get_url_contents(url, USER_AGENT)
|
braysia/CellTK
|
celltk/preprocess.py
|
Python
|
mit
| 1,780
| 0.001685
|
"""
Any operations to make img from img.
python celltk/preprocess.py -f gaussian_laplace -i c0/img_00000000*
"""
# from scipy.ndimage import imread
import argparse
from utils.file_io import make_dirs, imsave
from utils.util import imread
from utils.parser import ParamParser, parse_image_files
import logging
from utils.global_holder import holder
import preprocess_operation
logger = logging.getLogger(__name__)
def caller(inputs, output, functions, params):
holder.inputs = inputs
make_dirs(output)
logger.info("Functions {0} for {1} images.".format(functions, len(inputs)))
for holder.frame, holder.path in enumerate(inputs):
img = imread(holder.path)
f
|
or function, param in zip(functions, params):
func = getattr(preprocess_operation, function)
img = func(img, **param)
imsave(img, output, holder.path)
logger.info("\tframe {0} done.".format(holder.frame))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="images", nargs="*")
parser.add_argument("-o", "--output", help="output directory", type=str, default='temp')
parser.add_argument("-f", "--functions", help="functions", nargs="*")
parser.add_argument('-p', '--param', nargs='+', help='parameters', action='append')
# parser.add_argument("-p", "--param", nargs="*", help="parameters", default=[])
args = parser.parse_args()
if args.functions is None:
print help(preprocess_operation)
return
params = ParamParser(args.param).run()
args.input = parse_image_files(args.input)
holder.args = args
caller(args.input, args.output, args.functions, params)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
Arceliar/bmwrapper
|
incoming.py
|
Python
|
mit
| 9,110
| 0.010538
|
import socket
import threading
import email.mime.text
import email.mime.image
import email.mime.multipart
import email.header
import bminterface
import re
import select
import logging
class ChatterboxConnection(object):
END = "\r\n"
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def sendall(self, data, END=END):
data += END
self.conn.sendall(data)
def recvall(self, END=END):
data = []
while True:
chunk = self.conn.recv(4096)
if END in chunk:
data.append(chunk[:chunk.index(END)])
break
data.append(chunk)
if len(data) > 1:
pair = data[-2] + data[-1]
if END in pair:
data[-2] = pair[:pair.index(END)]
data.pop()
break
return "".join(data)
def handleUser(data):
d = data.split()
logging.debug("data:%s" % d)
username = d[-1]
if username[:3] == 'BM-':
logging.debug("Only showing messages for %s" % username)
bminterface.registerAddress(username)
else:
logging.debug("Showing all messages in the inbox")
bminterface.registerAddress(None)
return "+OK user accepted"
def handlePass(data):
return "+OK pass accepted"
def _getMsgSizes():
msgCount = bminterface.listMsgs()
msgSizes = []
for msgID in range(msgCount):
logging.debug("Parsing msg %i of %i" % (msgID+1, msgCount))
dateTime, toAddress, fromAddress, subject, body = bminterface.get(msgID)
msgSizes.append(len(makeEmail(dateTime, toAddress, fromAddress, subject, body)))
return msgSizes
def handleStat(data):
msgSizes = _getMsgSizes()
msgCount = len(msgSizes)
msgSizeTotal = 0
for msgSize in msgSizes:
msgSizeTotal += msgSize
returnData = '+OK %i %i' % (msgCount, msgSizeTotal)
logging.debug("Answering STAT: %i %i" % (msgCount, msgSizeTotal))
return returnData
def handleList(data):
dataList = data.split()
cmd = dataList[0]
msgSizes = _getMsgSizes()
if len(dataList) > 1:
msgId = dataList[1]
# means the server wants a single message response
i = int(msgId) - 1
if i >= len(msgSizes):
return "-ERR no such message"
else:
msgSize = msgSizes[i]
return "+OK %s %s" % (msgId, msgSize)
msgCount = 0
returnDataPart2 = ''
msgSizeTotal = 0
for msgSize in msgSizes:
msgSizeTotal += msgSize
msgCount += 1
returnDataPart2 += '%i %i\r\n' % (msgCount, msgSize)
returnDataPart2 += '.'
returnDataPart1 = '+OK %i messages (%i octets)\r\n' % (msgCount, msgSizeTotal)
returnData = returnDataPart1 + returnDataPart2
logging.debug("Answering LIST: %i %i" % (msgCount, msgSizeTotal))
logging.debug(returnData)
return returnData
def handleTop(data):
msg = 'test'
logging.debug(data.split())
cmd, msgID, lines = data.split()
msgID = int(msgID)-1
lines = int(lines)
logging.debug(lines)
dateTime, toAddress, fromAddress, subject, body = bminterface.get(msgID)
logging.debug(subject)
msg = makeEmail(dateTime, toAddress, fromAddress, subject, body)
top, bot = msg.split("\n\n", 1)
#text = top + "\r\n\r\n" + "\r\n".join(bot[:lines])
return "+OK top of message follows\r\n%s\r\n." % top
def handleRetr(data):
logging.debug(data.split())
msgID = int(data.split()[1])-1
dateTime, toAddress, fromAddress, subject, body = bminterface.get(msgID)
msg = makeEmail(dateTime, toAddress, fromAddress, subject, body)
return "+OK %i octets\r\n%s\r\n." % (len(msg), msg)
def handleDele(data):
msgID = int(data.split()[1])-1
bminterface.markForDelete(msgID)
return "+OK I'll try..."
def handleNoop(data):
return "+OK"
def handleQuit(data):
bminterface.cleanup()
return "+OK just pretend I'm gone"
def handleCapa(data):
returnData = "+OK List of capabilities follows\r\n"
for k in dispatch:
returnData += "%s\r\n" % k
returnData += "."
return returnData
def handleUIDL(data):
data = data.split()
logging.debug(data)
if len(data) == 1:
refdata = bminterface.getUIDLforAll()
logging.debug(refdata)
returnData = '+OK\r\n'
for msgID, d in enumerate(refdata):
returnData += "%s %s\r\n" % (msgID+1, d)
returnData += '.'
else:
refdata = bminterface.getUIDLforSingle(int(data[1])-1)
logging.debug(refdata)
returnData = '+OK ' + data[0] + str(refdata[0])
return returnData
def makeEmail(dateTime, toAddress, fromAddress, subject, body):
body = parseBody(body)
msgType = len(body)
if msgType == 1:
msg = email.mime.text.MIMEText(body[0], 'plain', 'UTF-8')
else:
msg = email.mime.multip
|
art.MIMEMultipart('
|
mixed')
bodyText = email.mime.text.MIMEText(body[0], 'plain', 'UTF-8')
body = body[1:]
msg.attach(bodyText)
for item in body:
img = 0
itemType, itemData = [0], [0]
try:
itemType, itemData = item.split(';', 1)
itemType = itemType.split('/', 1)
except:
logging.warning("Could not parse message type")
pass
if itemType[0] == 'image':
try:
itemDataFinal = itemData.lstrip('base64,').strip(' ').strip('\n').decode('base64')
img = email.mime.image.MIMEImage(itemDataFinal)
except:
#Some images don't auto-detect type correctly with email.mime.image
#Namely, jpegs with embeded color profiles look problematic
#Trying to set it manually...
try:
itemDataFinal = itemData.lstrip('base64,').strip(' ').strip('\n').decode('base64')
img = email.mime.image.MIMEImage(itemDataFinal, _subtype=itemType[1])
except:
logging.warning("Failed to parse image data. This could be an image.")
logging.warning("This could be from an image tag filled with junk data.")
logging.warning("It could also be a python email.mime.image problem.")
if img:
img.add_header('Content-Disposition', 'attachment')
msg.attach(img)
msg['To'] = toAddress
msg['From'] = fromAddress
msg['Subject'] = email.header.Header(subject, 'UTF-8')
msg['Date'] = dateTime
return msg.as_string()
def parseBody(body):
returnData = []
text = ''
searchString = '<img[^>]*'
attachment = re.search(searchString, body)
while attachment:
imageCode = body[attachment.start():attachment.end()]
imageDataRange = re.search('src=[\"\'][^\"\']*[\"\']', imageCode)
imageData=''
if imageDataRange:
try:
imageData = imageCode[imageDataRange.start()+5:imageDataRange.end()-1].lstrip('data:')
except:
pass
if imageData:
returnData.append(imageData)
body = body[:attachment.start()] + body[attachment.end()+1:]
attachment = re.search(searchString, body)
text = body
returnData = [text] + returnData
return returnData
dispatch = dict(
USER=handleUser,
PASS=handlePass,
STAT=handleStat,
LIST=handleList,
TOP=handleTop,
RETR=handleRetr,
DELE=handleDele,
NOOP=handleNoop,
QUIT=handleQuit,
CAPA=handleCapa,
UIDL=handleUIDL,
)
def incomingServer(host, port, run_event):
popthread = threading.Thread(target=incomingServer_main, args=(host, port, run_event))
popthread.daemon = True
popthread.start()
return popthread
def incomingServer_main(host, port, run_event):
sock = None
try:
while run_event.is_set():
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
ready = select.select([sock], [], [], .2)
if ready[0]:
conn, addr = sock.accept()
# stop listening, one client only
sock.close()
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/cross_decomposition/cca_.py
|
Python
|
mit
| 3,192
| 0
|
from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, c
|
opy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode=
|
"canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
|
Rosebotics/pymata-aio
|
pymata_aio/private_constants.py
|
Python
|
gpl-3.0
| 4,660
| 0.000215
|
"""
Copyright (c) 20115 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
class PrivateConstants:
"""
This class contains a set of constants for PyMata internal use .
"""
# the following defines are from Firmata.h
# message command bytes (128-255/ 0x80- 0xFF)
# from this client to firmata
MSG_CMD_MIN = 0x80 # minimum value for a message from firmata
REPORT_ANALOG = 0xC0 # enable analog input by pin #
REPORT_DIGITAL = 0xD0 # enable digital input by port pair
SET_PIN_MODE = 0xF4 # set a pin to INPUT/OUTPUT/PWM/etc
START_SYSEX = 0xF0 # start a MIDI Sysex message
END_SYSEX = 0xF7 # end a MIDI Sysex message
SYSTEM_RESET = 0xFF # reset from MIDI
# messages from firmata
DIGITAL_MESSAGE = 0x90 # send or receive data for a digital pin
ANALOG_MESSAGE = 0xE0 # send or receive data for a PWM configured pin
REPORT_VERSION = 0xF9 # report protocol version
# start of FirmataPlus defined SYSEX commands
KEEP_ALIVE = 0x50 # keep alive message
TONE_DATA = 0x5F # play a tone at a specified frequency and duration
ENCODER_CONFIG = 0x60 # create and enable encoder object
ENCODER_DATA = 0x61 # current encoder position data
SONAR_CONFIG = 0x62 # configure pins to control a sonar distance device
SONAR_DATA = 0x63 # distance data returned
PIXY_CONFIG = 0x64 # configure the Pixy. Configure has 4 subcommands
PIXY_DATA = 0x65 # blocks data returned
# end of FirmataPlus defined SYSEX commands
SERVO_CONFIG = 0x70 # set servo pin and max and min angles
STRING_DATA = 0x71 # a string message with 14-bits per char
STEPPER_DATA = 0x72 # Stepper motor command
I2C_REQUEST = 0x76 # send an I2C read/write request
I2C_REPLY = 0x77 # a reply to an I2C read request
I2C_CONFIG = 0x78 # config I2C settings such as delay times and power pins
REPORT_FIRMWARE = 0x79 # report name and version of the firmware
SAMPLING_INTERVAL = 0x7A # modify the sampling interval
EXTENDED_ANALOG = 0x6F # analog write (PWM, Servo, etc) to any pin
PIN_STATE_QUERY = 0x6D # ask for a pin's current mode and value
PIN_STATE_RESPONSE = 0x6E # reply with pin's current mode and value
CAPABILITY_QUERY = 0x6B # ask for supported modes of all pins
CAPABILITY_RESPONSE = 0x6C # reply with supported modes and resolution
ANALOG_MAPPING_QUERY = 0x69 # ask for mapping of analog to pin numbers
ANALOG_MAPPING_RESPONSE = 0x6A # reply with analog mapping data
# reserved values
SYSEX_NON_REALTIME = 0x7E # MIDI Reserved for non-realtime messages
SYSEX_REALTIME = 0x7F # MIDI Reserved for realtime messages
# reserved for PyMata
PYMATA_VERSION = "2.7"
# each byte represents a digital port
# and its value contains the current port settings
DIGITAL_OUTPUT_PORT_PINS = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
# These values are the index into the data passed by _arduino and
# used to reassemble integer values
MSB = 2
LSB = 1
# enable reporting for REPORT_ANALOG or REPORT_DIGITAL message
# sent to firmata
REPORTING_ENABLE = 1
# disable re
|
porting for REPORT_ANALOG or REPORT_DIGITAL message
# sent to firmata
REPORTING_DISABLE = 0
# Stepper Motor Sub-commands
STEPPER_CONFIGURE = 0 # configure a stepper motor for operation
STEPPER_STEP = 1 # command a motor to move at the provided speed
STEPPER_LIBRARY_VERSION = 2 # used to get stepper library version number
# Pixy sub commands
PIXY_INIT = 0 # Initialize the Pixy object and
|
set the max number of blocks to report
PIXY_SET_SERVOS = 1 # directly control the pan and tilt servo motors
PIXY_SET_BRIGHTNESS = 2 # adjust the brightness of the Pixy exposure
PIXY_SET_LED = 3 # control the color of the Pixy LED
# Pin used to store Pixy data
PIN_PIXY_MOSI = 11
|
davidam/python-examples
|
rdflib/rdflib-example.py
|
Python
|
gpl-3.0
| 1,020
| 0.004916
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warran
|
ty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import rdflib
g=rdflib.Graph()
g.load('http://dbpedia.org/resource/Semantic_Web')
for s,p,o in g:
print(s,p,o)
|
puttarajubr/commcare-hq
|
corehq/apps/hqpillow_retry/tasks.py
|
Python
|
bsd-3-clause
| 1,845
| 0.002168
|
from datetime import datetime, timedelta
from celery.schedules import crontab
from celery.task.base import periodic_task
from django.core.mail import mail_admins
from django.core.urlresolvers import reverse
from django.db.models.aggregates import Count
from django.template.loader import render_to_string
from dimagi.utils.web import get_url_base
from pillow_retry.models import PillowError
from django.conf import settings
@periodic_task(run_every=crontab(minute=0), queue='background_queue')
def pillow_retry_notifier():
enddate = datetime.utcnow()
startdate = enddate - timedelta(hours=1)
results = PillowError.objects \
.filter(date_last_attempt__gte=startdate) \
.filter(current_attempt__gte=settings.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS) \
.values_list('pillow', 'error_type') \
.annotate(Count('pillow')) \
.order_by('-pillow__count').all()
if results:
results = list(results)
text_rows = format_text_table([('Pillow', 'Error', 'Count')] + results)
context = {
'startdate': startdate,
'enddate': enddate,
'rows': text_rows,
'url': get_url_base() + reverse('admin_report_dispatcher', args=('pillow_errors',))
}
text_message = render_to_string('hqpillow_retry/email.txt', context)
context['rows'] = results
html_message = render_to_string('hqpillow_retry/email.html', context)
mail_admins('PillowTop errors in the last hour', text_message
|
, html_message=html_message)
def for
|
mat_text_table(table):
col_width = [max(len(str(x)) for x in col) for col in zip(*table)]
output = []
for row in table:
inner = " | ".join("{0:{1}}".format(x, col_width[i]) for i, x in enumerate(row))
output.append("| {0} |".format(inner))
return output
|
cloudnull/tribble-api
|
tribble/engine/constructor.py
|
Python
|
gpl-3.0
| 13,205
| 0
|
# =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
import httplib
import logging
import time
import traceback
from libcloud.compute import base
from libcloud.compute import deployment
from libcloud.compute import types
import tribble
from tribble.api import application
from tribble.common.db import db_proc
from tribble.common.db import zone_status
import tribble.engine as engine
from tribble.engine import config_manager
from tribble.engine import connection_engine
from tribble.engine import utils
LOG = logging.getLogger('tribble-engine')
DB = application.DB
class InstanceDeployment(object):
"""Perform actions based on a described application action.
:param packet: ``dict``
"""
def __init__(self, packet):
self.driver = None
self.user_data = None
self.deployment_methods = None
self.packet = packet
|
self.user_specs = {
'max_tries': 15,
'timeout': 3600
}
self.zone_status = zone_status.ZoneState(cell=self.pa
|
cket)
def engine_setup(self):
"""Load connection engine.
this will set the driver user_data and deployment_methods.
"""
_engine = connection_engine.ConnectionEngine(
packet=self.packet
)
self.driver, self.user_data, self.deployment_methods = _engine.run()
def api_setup(self):
"""Ensure that all parts of the Connection Driver is setup properly."""
if not self.driver:
msg = 'No Available Connection'
self.zone_status.error(error_msg=msg)
raise base.DeploymentError(msg)
image = self.user_specs['image'] = engine.ret_image(
conn=self.driver, specs=self.packet
)
if not image:
msg = 'No image_id found'
self.zone_status.error(error_msg=msg)
raise base.DeploymentError(msg)
size = self.user_specs['size'] = engine.ret_size(
conn=self.driver, specs=self.packet
)
if not size:
msg = 'No size_id Found'
self.zone_status.error(error_msg=msg)
raise base.DeploymentError(msg)
server_instances = int(self.packet.get('quantity', 1))
utils.worker_proc(
job_action=self._vm_constructor, num_jobs=server_instances
)
def _build_exception(self, node=None):
"""Log exception on build failure.
Cleanup the failed instance if provided.
:param node: ``object``
"""
tb = traceback.format_exc()
msg = '%s ==> %s' % ('Exception while Building Instance', tb)
if node is None:
_msg = ('Node not deleted because the system did\nnot know what'
' node to delete')
node_none_msg = '%s\n%s' % (_msg, msg)
self.check_for_dead_nodes()
self.zone_status.error(error_msg=node_none_msg)
else:
self.driver.destroy_node(node=node)
self.zone_status.error(error_msg=msg)
LOG.critical(msg)
def _vm_constructor(self):
"""Build a new Instance.
This method will build a new instance with a known provider. If the
provider from the application map has multiple deployment methods
this method will break on the first successful deployment method.
"""
name_convention = self.packet.get('name_convention', 'tribble_node')
node_name = '%s-%s'.lower() % (name_convention, utils.rand_string())
self.packet['node_name'] = self.user_specs['name'] = node_name
LOG.debug(self.user_specs)
LOG.info('Building Node Based on %s' % self.user_specs)
for dep in self.deployment_methods:
action = getattr(self, '_%s' % dep)
node = action(self.user_specs)
self._node_post(info=node)
def vm_constructor(self):
"""Build VMs."""
self.engine_setup()
self.api_setup()
def vm_destroyer(self):
"""Kill an instance from information in our DB.
When an instance is destroyed the instance will be removed from the
configuration management system set in the zones configuration
management table.
"""
self.engine_setup()
LOG.debug('Nodes to Delete %s' % self.packet['uuids'])
try:
node_list = self.driver.list_nodes()
except base.LibcloudError as exp:
self.zone_status.error(error_msg=exp)
LOG.warn('Error When getting Node list for Deleting ==> %s' % exp)
return False
else:
LOG.debug('API connected.')
for node in node_list:
if node.id in self.packet['uuids']:
LOG.info('DELETING %s' % node.id)
try:
time.sleep(utils.stupid_hack())
self.driver.destroy_node(node)
except Exception as exp:
self.zone_status.error(error_msg=exp)
LOG.info('Node %s NOT Deleted ==> %s' % (node.id, exp))
else:
self._node_remove(ids=self.packet['uuids'])
def _remove_user_data(self):
"""Return the user data.
:return: ``object``
"""
remove_packet = self.packet.copy()
remove_packet['job'] = 'instance_delete'
config = config_manager.ConfigManager(packet=remove_packet)
return config.check_configmanager()
def _get_user_data(self, use_ssh=False):
"""Return the user data.
:param use_ssh: ``bol``
:return: ``object``
"""
config = config_manager.ConfigManager(packet=self.packet, ssh=use_ssh)
return config.check_configmanager()
def ssh_deploy(self):
"""Return a Libcloud deployment.MultiStepDeployment object.
Prepare for an SSH deployment Method for any found config management
and or scripts.
:return: ``object``
"""
script = '/tmp/deployment_tribble_%s.sh'
dep_action = []
public_key = self.packet.get('ssh_key_pub')
if public_key:
ssh = deployment.SSHKeyDeployment(key=public_key)
dep_action.append(ssh)
conf_init = self._get_user_data(use_ssh=True)
if conf_init:
conf_init = str(conf_init)
con = deployment.ScriptDeployment(
name=script % utils.rand_string(), script=conf_init
)
dep_action.append(con)
if dep_action:
return deployment.MultiStepDeployment(dep_action)
def _ssh_deploy(self, user_specs):
"""Deploy an instance via SSH.
:param user_specs: ``dict``
:return: ``object``
"""
node = None
try:
user_specs['deploy'] = self.ssh_deploy()
LOG.debug('DEPLOYMENT ARGS: %s' % user_specs)
node = self.driver.deploy_node(**user_specs)
return self.state_wait(node=node)
except Exception:
self._build_exception(node)
def _cloud_init(self, user_specs):
"""Deploy an instance via Cloud Init.
:param user_specs: ``dict``
:return ``object``
"""
user_specs['ex_userdata'] = self._get_user_data()
LOG.debug('DEPLOYMENT ARGS: %s' % user_specs)
return self.driver.create_node(**user_specs)
def _list_instances(self):
"""Return a list of nodes.
:return: ``object``
"""
return self.driver.list_nodes()
def check_for_dead_nodes(self):
"""Look for any instances which may not be in a Running state.
If no nodes are dead return None and if any nodes are dead, delete
the in
|
jiadaizhao/LeetCode
|
1301-1400/1376-Time Needed to Inform All Employees/1376-Time Needed to Inform All Employees.py
|
Python
|
mit
| 524
| 0.005725
|
import collections
class Solution:
def numOfMinutes(self, n: int, headID: int, manager: List[
|
int], informTime: List[int]) -> int:
table = collections.defaultdict(list)
for i, m in enumerate(manager):
table[m].append(i)
Q = collections.deque([(headID, 0)])
mins = 0
while Q:
curr, time = Q.popleft()
mins = max(mins, time)
for e in table[curr]:
Q.append((e, time + informTime[curr]))
return
|
mins
|
hartym/bonobo
|
tests/util/test_objects.py
|
Python
|
apache-2.0
| 4,935
| 0.001621
|
import operator
import pytest
from bonobo.util.objects import ValueHolder, Wrapper, get_attribute_or_create, get_name
from bonobo.util.testing import optional_contextmanager
class foo:
pass
class bar:
__name__ = "baz"
def test_get_name():
assert get_name(42) == "int"
assert get_name("eat at joe.") == "str"
assert get_name(str) == "str"
assert get_name(object) == "object"
assert get_name(get_name) == "get_name"
assert get_name(foo) == "foo"
assert get_name(foo()) == "foo"
assert get_name(bar) == "bar"
assert get_name(bar()) == "baz"
def test_wrapper_name():
assert get_name(Wrapper(42)) == "int"
assert get_name(Wr
|
apper("eat at joe.")) == "str"
assert get_name(Wrapper(str)) == "str"
assert get_name(Wrapper(object)) == "object"
assert get_name(Wrapper(foo)) == "foo"
assert get_name(Wrapper(foo())) == "foo"
assert get_name(Wrapper(bar)) == "bar"
assert get_name(Wrapper(bar())) == "baz"
assert get_name(Wrapper(get_nam
|
e)) == "get_name"
def test_valueholder():
x = ValueHolder(42)
assert x == 42
x += 1
assert x == 43
assert x + 1 == 44
assert x == 43
y = ValueHolder(44)
assert y == 44
y -= 1
assert y == 43
assert y - 1 == 42
assert y == 43
assert y == x
assert y is not x
assert repr(x) == repr(y) == repr(43)
def test_valueholder_notequal():
x = ValueHolder(42)
assert x != 41
assert not (x != 42)
@pytest.mark.parametrize("rlo,rhi", [(1, 2), ("a", "b")])
def test_valueholder_ordering(rlo, rhi):
vlo, vhi = ValueHolder(rlo), ValueHolder(rhi)
for lo in (rlo, vlo):
for hi in (rhi, vhi):
assert lo < hi
assert hi > lo
assert lo <= lo
assert not (lo < lo)
assert lo >= lo
def test_valueholder_negpos():
neg, zero, pos = ValueHolder(-1), ValueHolder(0), ValueHolder(1)
assert -neg == pos
assert -pos == neg
assert -zero == zero
assert +pos == pos
assert +neg == neg
def test_valueholders_containers():
x = ValueHolder({1, 2, 3, 5, 8, 13})
assert 5 in x
assert 42 not in x
y = ValueHolder({"foo": "bar", "corp": "acme"})
assert "foo" in y
assert y["foo"] == "bar"
with pytest.raises(KeyError):
y["no"]
y["no"] = "oh, wait"
assert "no" in y
assert "oh, wait" == y["no"]
def test_get_attribute_or_create():
class X:
pass
x = X()
with pytest.raises(AttributeError):
x.foo
foo = get_attribute_or_create(x, "foo", "bar")
assert foo == "bar"
assert x.foo == "bar"
foo = get_attribute_or_create(x, "foo", "baz")
assert foo == "bar"
assert x.foo == "bar"
unsupported_operations = {
int: {operator.matmul},
str: {
operator.sub,
operator.mul,
operator.matmul,
operator.floordiv,
operator.truediv,
operator.mod,
divmod,
operator.pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.xor,
operator.or_,
},
}
@pytest.mark.parametrize("x,y", [(5, 3), (0, 10), (0, 0), (1, 1), ("foo", "bar"), ("", "baz!")])
@pytest.mark.parametrize(
"operation,inplace_operation",
[
(operator.add, operator.iadd),
(operator.sub, operator.isub),
(operator.mul, operator.imul),
(operator.matmul, operator.imatmul),
(operator.truediv, operator.itruediv),
(operator.floordiv, operator.ifloordiv),
(operator.mod, operator.imod),
(divmod, None),
(operator.pow, operator.ipow),
(operator.lshift, operator.ilshift),
(operator.rshift, operator.irshift),
(operator.and_, operator.iand),
(operator.xor, operator.ixor),
(operator.or_, operator.ior),
],
)
def test_valueholder_integer_operations(x, y, operation, inplace_operation):
v = ValueHolder(x)
is_supported = operation not in unsupported_operations.get(type(x), set())
isdiv = ("div" in operation.__name__) or ("mod" in operation.__name__)
# forward...
with optional_contextmanager(pytest.raises(TypeError), ignore=is_supported):
with optional_contextmanager(pytest.raises(ZeroDivisionError), ignore=y or not isdiv):
assert operation(x, y) == operation(v, y)
# backward...
with optional_contextmanager(pytest.raises(TypeError), ignore=is_supported):
with optional_contextmanager(pytest.raises(ZeroDivisionError), ignore=x or not isdiv):
assert operation(y, x) == operation(y, v)
# in place...
if inplace_operation is not None:
with optional_contextmanager(pytest.raises(TypeError), ignore=is_supported):
with optional_contextmanager(pytest.raises(ZeroDivisionError), ignore=y or not isdiv):
inplace_operation(v, y)
assert v == operation(x, y)
|
ag-sc/QALD
|
4/scripts/XMLGenerator.py
|
Python
|
mit
| 22,039
| 0.021877
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.dom.minidom as dom
import xml.dom
import os
import socket
import re
import sys
import datetime
import codecs
from SPARQLWrapper import SPARQLWrapper, JSON
endpoint = "http://vtentacle.techfak.uni-bielefeld.de:443/sparql/"
sparql = SPARQLWrapper(endpoint)
# Dokument erzeugen
implement = xml.dom.getDOMImplementation()
#dbpedia_Server="http://vtentacle.techfak.uni-bielefeld.de:443/sparql/?default-graph-uri=&query="
#restdbpedia="&format=text%2Fhtml&debug=on&timeout="
filename_out_html = None
filename_out_xml = None
knoten_id = None
def set_filename_xml_out(time):
global filename_out_xml
filename_out_xml="upload/out"+str(time)+".xml"
def set_filename_out(time):
global filename_out_html
filename_out_html="upload/out"+str(time)+".html"
def _knoten_auslesen(knoten):
try:
string = knoten.firstChild.data.strip().encode("utf-8")
return string
except:
# print "Error in knoten auslesen"
# print "Unexpected error:", sys.exc_info()[0]
pass
def lade_baum(dateiname):
d = {}
c={}
# baum = None
# with codecs.open(dateiname, "r", "utf-8") as in_f:
# #doc.writexml(out)
# baum = dom.parse(in_f)
baum = dom.parse(dateiname.encode( "utf-8" ))
global knoten_id
try:
zaehler=1
for eintrag in baum.firstChild.childNodes:
if(zaehler==1):
knoten_id=((eintrag.parentNode).attributes["id"]).value
zaehler=2
id=""
answertype=""
fusion=""
aggregation=""
onlydbo=""
onlyesdbp = ""
strinbla=""
keywords = []
questions = []
try:
if eintrag.nodeName == "question":
#=eintrag.attributes["id"]
#id=a.value
id=(eintrag.attributes["id"]).value
#id=id
try:
answertype=(eintrag.attributes["answertype"]).value
except Exception:
answertype="ERROR"
try:
fusion=(eintrag.attributes["fusion"]).value
except Exception:
fusion="ERROR"
try:
aggregation=(eintrag.attributes["aggregation"]).value
except Exception:
aggregation="ERROR"
try:
onlydbo=(eintrag.attributes["onlydbo"]).value
except Exception:
onlydbo="ERROR"
try:
onlyesdbp=(eintrag.attributes["onlyesdbp"]).value
except Exception:
onlyesdbp="ERROR"
#print(id)
english_question_text = query = None
#print(eintrag.Attr)
for knoten in eintrag.childNodes:
if knoten.nodeName == "string" or knoten.nodeName == "text":
if (knoten.attributes["lang"]).value == "en":
english_question_text = _knoten_auslesen(knoten)
questions.append([english_question_text,"en"])
error_string = english_question_text
elif (knoten.attributes["lang"]).value == "de":
try:
questions.append([_knoten_auslesen(knoten),"de"])
except:
questions.append(["","de"])
elif (knoten.attributes["lang"]).value == "es":
try:
questions.append([_knoten_auslesen(knoten),"es"])
except:
questions.append(["","es"])
elif (knoten.attributes["lang"]).value == "it":
try:
questions.append([_knoten_auslesen(knoten),"it"])
except:
questions.append(["","it"])
elif (knoten.attributes["lang"]).value == "fr":
try:
questions.append([_knoten_auslesen(knoten),"fr"])
except:
questions.append(["","fr"])
elif (knoten.attributes["lang"]).value == "nl":
try:
questions.append([_knoten_auslesen(knoten),"nl"])
except:
questions.append(["","nl"])
elif knoten.nodeName == "keywords":
if (knoten.attributes["lang"]).value == "en":
try:
keywords.append([_knoten_auslesen(knoten),"en"])
except:
keywords.append(["","en"])
elif (knoten.attributes["lang"]).value == "de":
try:
keywords.append([_knoten_auslesen(knoten),"de"])
except:
keywords.append(["","de"])
elif (knoten.attributes["lang"]).value == "es":
try:
keywords.append([_knoten_auslesen(knoten),"es"])
except:
keywords.append(["","es"])
elif (knoten.attributes["lang"]).value == "it":
try:
keywords.append([_knoten_auslesen(knoten),"it"])
except:
keywords.append(["","it"])
elif (knoten.attributes["lang"]).value == "fr":
try:
keywords.append([_knoten_auslesen(knoten),"fr"])
except:
keywords.append(["","fr"])
elif (knoten.attributes["lang"]).value == "nl":
try:
keywords.append([_knoten_auslesen(knoten),"nl"])
except:
keywords.append(["","nl"])
elif knoten.nodeName == "query":
#query = _knoten_auslesen(knoten)
query=knoten.firstChild.data.strip()
#print "found query: "+str(query)
#add here at the
|
end array with keywords and all language questions
|
d[english_question_text] = [query,id,answertype,fusion,aggregation, onlydbo, questions, keywords, onlyesdbp]
except Exception as inst:
d[error_string] = ["error",id,answertype,fusion,aggregation, onlydbo, questions, keywords, onlyesdbp]
|
cisco-openstack/tempest
|
tempest/api/compute/admin/test_floating_ips_bulk.py
|
Python
|
apache-2.0
| 3,760
| 0
|
# Copyright 2014 NEC Technologies India Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.api.compute import base
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
# TODO(stephenfin): Remove this test class once the nova queens branch goes
# into extended maintenance mode.
class FloatingIPsBulkAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Floating IPs Bulk APIs that require admin privileges.
API documentation - http://docs.openstack.org/api/openstack-compute/2/
content/ext-os-floating-ips-bulk.html
"""
max_microversion = '2.35'
depends_on_nova_network = True
@classmethod
def setup_clients(cls):
super(FloatingIPsBulkAdminTestJSON, cls).setup_clients()
cls.client = cls.os_admin.floating_ips_bulk_client
@classmethod
def resource_setup(cls):
|
super(FloatingIPsBulkAdminTestJSON, cls).resource_setup()
cls.ip_range = CONF.validation.floating_ip_range
cls.verify_unallocated_floating_ip_range(cls.ip_range)
@classmethod
|
def verify_unallocated_floating_ip_range(cls, ip_range):
# Verify whether configure floating IP range is not already allocated.
body = cls.client.list_floating_ips_bulk()['floating_ip_info']
allocated_ips_list = map(lambda x: x['address'], body)
for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts():
if str(ip_addr) in allocated_ips_list:
msg = ("Configured unallocated floating IP range is already "
"allocated. Configure the correct unallocated range "
"as 'floating_ip_range'")
raise exceptions.InvalidConfiguration(msg)
return
@decorators.idempotent_id('2c8f145f-8012-4cb8-ac7e-95a587f0e4ab')
@utils.services('network')
def test_create_list_delete_floating_ips_bulk(self):
# Create, List and delete the Floating IPs Bulk
pool = 'test_pool'
# NOTE(GMann): Reserving the IP range but those are not attached
# anywhere. Using the below mentioned interface which is not ever
# expected to be used. Clean Up has been done for created IP range
interface = 'eth0'
body = (self.client.create_floating_ips_bulk(self.ip_range,
pool,
interface)
['floating_ips_bulk_create'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_floating_ips_bulk, self.ip_range)
self.assertEqual(self.ip_range, body['ip_range'])
ips_list = self.client.list_floating_ips_bulk()['floating_ip_info']
self.assertNotEmpty(ips_list)
for ip in netaddr.IPNetwork(self.ip_range).iter_hosts():
self.assertIn(str(ip), map(lambda x: x['address'], ips_list))
body = (self.client.delete_floating_ips_bulk(self.ip_range)
['floating_ips_bulk_delete'])
self.assertEqual(self.ip_range, body)
|
tobegit3hub/cinder_docker
|
cinder/zonemanager/fc_zone_manager.py
|
Python
|
apache-2.0
| 9,631
| 0
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
ZoneManager is responsible to manage access control using FC zoning
when zoning mode is set as 'fabric'.
ZoneManager provides interfaces to add connection and remove connection
for given initiator and target list associated with a FC volume attach and
detach operation.
**Related Flags**
:zone_driver: Used by:class:`ZoneManager`.
Defaults to
`cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver`
:zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none'
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
from cinder import exception
from cinder.i18n import _, _LI
from cinder.volume import configuration as config
from cinder.zonemanager import fc_common
LOG = logging.getLogger(__name__)
zone_manager_opts = [
cfg.StrOpt('zone_driver',
default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver',
help='FC Zone Driver responsible for zone management'),
cfg.StrOpt('zoning_policy',
default='initiator-target',
help='Zoning policy configured by user; valid values include '
'"initiator-target" or "initiator"'),
cfg.StrOpt('fc_fabric_names',
help='Comma separated list of Fibre Channel fabric names.'
' This list of names is used to retrieve other SAN credentials'
' for connecting to each SAN fabric'),
cfg.StrOpt('fc_san_lookup_service',
default='cinder.zonemanager.drivers.brocade'
'.brcd_fc_san_lookup_service.BrcdFCSanLookupService',
help='FC SAN Lookup Service'),
]
CONF = cfg.CONF
CONF.register_opts(zone_manager_opts, group='fc-zone-manager')
class ZoneManager(fc_common.FCCommon):
"""Manages Connection control during attach/detach.
Version History:
1.0 - Initial version
1.0.1 - Added __new__ for singleton
"""
VERSION = "1.0.1"
driver = None
fabric_names = []
def __new__(class_, *args, **kwargs):
if not hasattr(class_, "_instance"):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
super(ZoneManager, self).__init__(**kwargs)
self.configuration = config.Configuration(zone_manager_opts,
'fc-zone-manager')
self._build_driver()
def _build_driver(self):
zone_driver = self.configuration.zone_driver
LOG.debug("Zone Driver from config: {%s}", zone_driver)
# Initialize vendor specific implementation of FCZoneDriver
self.driver = importutils.import_object(
zone_driver,
configuration=self.configuration)
def get_zoning_state_ref_count(self, initiator_wwn, target_wwn):
"""Zone management state check.
Performs state check for given I-T pair to return the current count of
active attach for the pair.
"""
# TODO(sk): ref count state management
count = 0
# check the state for I-T pair
return count
def add_connection(self, initiator_target_map):
"""Add connection control.
Adds connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.debug("Target List: %s", target_list)
# get SAN context for
|
the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Fabric Map after context lookup: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
|
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, True)
LOG.info(_LI("Final filtered map for fabric: %s"),
valid_i_t_map)
# Call driver to add connection control
self.driver.add_connection(fabric, valid_i_t_map)
LOG.info(_LI("Add Connection: Finished iterating "
"over all target list"))
except Exception as e:
msg = _("Failed adding connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def delete_connection(self, initiator_target_map):
"""Delete connection.
Updates/deletes connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.info(_LI("Delete connection Target List: %s"),
target_list)
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Delete connection Fabric Map from SAN "
"context: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, False)
LOG.info(_LI("Final filtered map for delete "
"connection: %s"), valid_i_t_map)
# Call driver to delete connection control
if len(valid_i_t_map) > 0:
self.driver.delete_connection(fabric, valid_i_t_map)
LOG.debug("Delete Connection - Finished iterating over all"
" target list")
except Exception as e:
msg = _("Failed removing connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def get_san_context(self, target_wwn_list):
"""SAN lookup for end devices.
Look up each SAN configured and return a map of SAN (fabric IP)
to list of
|
SNU-sunday/fisspy
|
fisspy/cm.py
|
Python
|
bsd-2-clause
| 19,011
| 0.011993
|
from __future__ import absolute_import, print_function, division
import numpy as np
from matplotlib.colors import LinearSegmentedColormap,ListedColormap
import sys
__author__ = "Juhyeong Kang "
__email__ = "jhkang@astro.snu.ac.kr"
def create_cdict(r, g, b):
i = np.linspace(0, 1, 256)
cdict = dict(
(name, list(zip(i, el / 255.0, el / 255.0)))
for el, name in [(r, 'red'), (g, 'green'), (b, 'blue')]
)
return cdict
def hac(r=False):
hr=np.array([0, 0, 1, 2, 3, 4, 4, 6, 6, 7, 8, 9, 10, 10, 12, 12, 13, 14, 15,
16, 16, 18, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29,
30, 31, 31, 33, 33, 34, 35, 36, 37, 37, 39, 39, 40, 41, 42, 43,
43, 45, 45, 46, 47, 48, 49, 50, 51, 51, 52, 53, 54, 55, 56, 57,
58, 58, 59, 60, 61, 62, 63, 64, 64, 66, 66, 67, 68, 69, 70, 70,
72, 72, 73, 74, 75, 76, 76, 78, 78, 79, 80, 81, 82, 83, 84, 84,
86, 87, 88, 89, 91, 92, 93, 94, 96, 97, 98, 99, 100, 102, 102,
104, 105, 106, 107, 108, 110, 111, 112, 113, 115, 116, 117, 118,
120, 121, 121, 123, 124, 125, 126, 128, 129, 130, 131, 132, 134,
135, 136, 137, 139, 139, 141, 142, 143, 144, 145, 147, 148, 149,
150, 152, 153, 154, 155, 156, 158, 158, 160, 161, 162, 163, 165,
166, 167, 168, 169, 171, 172, 173, 174, 176, 176, 178, 178, 179,
179, 179, 180, 180, 180, 181, 181, 181, 182, 182, 182, 183, 183,
183, 184, 186, 187, 188, 189, 190, 191, 192, 193, 195, 196, 197,
198, 199, 200, 201, 202, 204, 205, 206, 207, 208, 209, 210, 212,
213, 214, 215, 216, 217, 218, 219, 221, 222, 223, 224, 225, 226,
227, 228, 230, 231, 232, 233, 234, 235, 237, 238, 239, 240, 241,
242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 255])
hg=np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8,
8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 12, 12,
12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15, 15,
15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18,
18, 19, 19, 19, 20, 20, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26,
26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 33, 34, 36, 38, 39,
41, 43, 44, 46, 47, 49, 51, 53, 54, 56, 58, 59, 61, 62, 64, 66,
67, 69, 71, 73, 74, 76, 77, 79, 81, 82, 84, 86, 88, 89, 91, 92,
94, 96, 97, 99, 101, 102, 104, 106, 107, 109, 110, 112, 114, 116,
117, 119, 121, 122, 124, 125, 127, 129, 130, 132, 134, 136, 137,
138, 140, 142, 144, 145, 147, 149, 150, 152, 153, 155, 157, 158,
160, 162, 164, 165, 166, 168, 170, 172, 173, 175, 177, 179, 180,
181, 183, 185, 187, 188, 190, 192, 193, 195, 196, 198, 200, 201,
203, 205, 207, 208, 210, 211, 213, 215, 216, 218, 220, 221, 223,
225, 226, 228, 229, 231, 233, 235, 236, 238, 240, 241, 243, 244,
246, 248, 250, 251, 253, 255])
hb=np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5,
5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10,
11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15,
15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 19, 19, 19,
19, 20, 20, 20, 20, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 28, 28,
28, 28, 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31, 32, 32, 32,
32, 33, 33, 33, 33, 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 36,
37, 37, 37, 38, 38, 38, 38, 39, 39, 39, 39, 40, 40, 40, 41, 41,
41, 41, 42, 42, 42, 42, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45,
45, 46, 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 49, 50, 51, 52,
53, 54, 55, 56, 57, 59, 62, 65, 68, 71, 74, 78, 81, 84, 87, 90,
93, 96, 99, 102, 105, 108, 111, 114, 117, 120, 123, 126, 130,
133, 136, 138, 141, 144, 148, 151, 154, 157, 160, 163, 166, 169,
172, 175, 178, 181, 184, 187, 190, 193, 196, 199, 203, 206, 209,
212, 215, 217, 221, 224, 227, 230, 233, 236, 239, 242, 245, 248,
251, 255])
hadic=create_cdict(hr,hg,hb)
hardic=create_cdict(hr[::-1],hg[::-1],hb[::-1])
if r:
return LinearSegmentedColormap('mytables',hardic)
else:
return LinearSegmentedColormap('mytables',hadic)
def cac(r=False):
cr=np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11,
11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15, 15, 15, 16,
16, 16, 17, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20,
21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 24, 24, 24, 24, 25, 25,
25, 26, 26, 26, 26, 27, 27, 27, 28, 28, 30, 31, 32, 34, 35, 36,
37, 39, 40, 41, 42, 43, 45, 46, 47, 49, 50, 51, 53, 53, 55, 56,
57, 59, 60, 61, 63, 64, 65, 67, 67, 69, 70, 71, 73, 74, 75, 76,
78, 79, 80, 81, 83, 84, 85, 86, 88, 89, 90, 92, 92, 94, 95, 96,
98, 99, 100, 102, 103, 104, 106, 106, 108, 109, 110, 112, 113,
114, 115, 117, 118, 119, 120, 122, 123, 124, 125, 127, 128, 129,
130, 130, 132, 133, 133, 135, 136, 136, 138, 138, 139, 141, 141,
142, 144, 146, 148, 149, 151, 153, 155, 157, 158, 160, 162, 164,
166, 167, 169, 171, 172, 174, 176, 178, 180, 181, 183, 185, 187,
189, 190, 192, 194, 196, 198, 199, 201, 203, 204, 206, 208, 210,
212, 213, 215, 217, 219, 221, 222, 224, 226, 228, 230, 232, 233,
235, 236, 238, 240, 242, 244, 245, 247, 249, 251, 253, 255])
cg=np.array([0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10, 11,
12, 12, 13, 13, 14, 15, 15, 16, 16, 17, 18, 18, 19, 19, 20, 21,
21, 22, 22, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 30, 30,
31, 31, 32, 33, 33, 34, 34, 35, 36, 36, 37, 37, 38, 39, 39, 40,
40, 41, 42, 42, 43, 43, 44, 45, 45, 46, 46, 47, 48, 48, 49, 49,
50, 51, 51, 52, 52, 53, 54, 54, 55, 55, 56, 57, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 67, 68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78, 78, 79, 81, 82, 83, 84, 86, 87, 88, 90, 91, 92, 94,
95, 96, 97, 99, 100, 101, 103, 104, 105, 107, 108, 109, 110, 112,
113, 114, 116, 117, 118, 120, 121, 122, 124, 125, 126,
|
127, 129,
130, 131, 133, 134, 135, 137, 138, 139, 140, 142, 143, 144, 146,
147, 148, 150, 151, 152, 153, 155, 156, 157, 159, 160, 161, 162,
164, 165, 166, 168, 169, 170, 172, 173, 174, 175, 177, 178, 179,
181, 182, 183, 185, 186, 187, 188, 190
|
, 191, 192, 194, 195, 196,
197, 199, 200, 201, 203, 204, 205, 207, 208, 209, 210, 212, 213,
214, 216, 217, 218, 220, 221, 222, 223, 225, 226, 227, 229, 230,
231, 232, 234, 235, 236, 238, 239, 240, 242, 243, 244, 245, 247,
248, 249, 251, 252, 253, 255])
cb=np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5,
5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10,
10, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14,
15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 19, 19,
19, 19, 20, 20, 20, 20, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23,
23, 24, 24, 24, 25, 25, 25, 25, 26,
|
s-maj/integrations-core
|
postgres/check.py
|
Python
|
bsd-3-clause
| 31,953
| 0.005164
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""PostgreSQL check
Collects database-wide metrics and optionally per-relation metrics, custom metrics.
"""
# stdlib
import socket
# 3rd party
try:
import psycopg2
except ImportError:
psycopg2 = None
import pg8000
# project
from checks import AgentCheck, CheckException
from config import _is_affirmative
MAX_CUSTOM_RESULTS = 100
TABLE_COUNT_LIMIT = 200
def psycopg2_connect(*args, **kwargs):
if 'ssl' in kwargs:
del kwargs['ssl']
if 'unix_sock' in kwargs:
kwargs['host'] = kwargs['unix_sock']
del kwargs['unix_sock']
return psycopg2.connect(*args, **kwargs)
class ShouldRestartException(Exception):
pass
class PostgreSql(AgentCheck):
"""Collects per-database, and optionally per-relation metrics, custom metrics
"""
SOURCE_TYPE_NAME = 'postgresql'
RATE = AgentCheck.rate
GAUGE = AgentCheck.gauge
MONOTONIC = AgentCheck.monotonic_count
SERVICE_CHECK_NAME = 'postgres.can_connect'
# turning columns into tags
DB_METRICS = {
'descriptors': [
('datname', 'db')
],
'metrics': {},
'query': """
SELECT datname,
%s
FROM pg_stat_database
WHERE datname not ilike 'template%%'
AND datname not ilike 'rdsadmin'
AND datname not ilike 'postgres'
""",
'relation': False,
}
# Copy of the previous DB_METRICS, _including_ the default 'postgres' database
DB_METRICS_WITH_DEF
|
AULT = {
'descriptors': [
('datname', 'db')
],
'metrics': {},
'query': """
SELECT datname,
%s
FROM pg_stat_database
WHERE datname not ilike 'template%%'
AND datname not il
|
ike 'rdsadmin'
""",
'relation': False,
}
COMMON_METRICS = {
'numbackends' : ('postgresql.connections', GAUGE),
'xact_commit' : ('postgresql.commits', RATE),
'xact_rollback' : ('postgresql.rollbacks', RATE),
'blks_read' : ('postgresql.disk_read', RATE),
'blks_hit' : ('postgresql.buffer_hit', RATE),
'tup_returned' : ('postgresql.rows_returned', RATE),
'tup_fetched' : ('postgresql.rows_fetched', RATE),
'tup_inserted' : ('postgresql.rows_inserted', RATE),
'tup_updated' : ('postgresql.rows_updated', RATE),
'tup_deleted' : ('postgresql.rows_deleted', RATE),
}
DATABASE_SIZE_METRICS = {
'pg_database_size(datname) as pg_database_size' : ('postgresql.database_size', GAUGE),
}
NEWER_92_METRICS = {
'deadlocks' : ('postgresql.deadlocks', RATE),
'temp_bytes' : ('postgresql.temp_bytes', RATE),
'temp_files' : ('postgresql.temp_files', RATE),
}
BGW_METRICS = {
'descriptors': [],
'metrics': {},
'query': "select %s FROM pg_stat_bgwriter",
'relation': False,
}
COMMON_BGW_METRICS = {
'checkpoints_timed' : ('postgresql.bgwriter.checkpoints_timed', MONOTONIC),
'checkpoints_req' : ('postgresql.bgwriter.checkpoints_requested', MONOTONIC),
'buffers_checkpoint' : ('postgresql.bgwriter.buffers_checkpoint', MONOTONIC),
'buffers_clean' : ('postgresql.bgwriter.buffers_clean', MONOTONIC),
'maxwritten_clean' : ('postgresql.bgwriter.maxwritten_clean', MONOTONIC),
'buffers_backend' : ('postgresql.bgwriter.buffers_backend', MONOTONIC),
'buffers_alloc' : ('postgresql.bgwriter.buffers_alloc', MONOTONIC),
}
NEWER_91_BGW_METRICS = {
'buffers_backend_fsync': ('postgresql.bgwriter.buffers_backend_fsync', MONOTONIC),
}
NEWER_92_BGW_METRICS = {
'checkpoint_write_time': ('postgresql.bgwriter.write_time', MONOTONIC),
'checkpoint_sync_time' : ('postgresql.bgwriter.sync_time', MONOTONIC),
}
ARCHIVER_METRICS = {
'descriptors': [],
'metrics': {},
'query': "select %s FROM pg_stat_archiver",
'relation': False,
}
COMMON_ARCHIVER_METRICS = {
'archived_count' : ('postgresql.archiver.archived_count', MONOTONIC),
'failed_count' : ('postgresql.archiver.failed_count', MONOTONIC),
}
LOCK_METRICS = {
'descriptors': [
('mode', 'lock_mode'),
('relname', 'table'),
],
'metrics': {
'lock_count' : ('postgresql.locks', GAUGE),
},
'query': """
SELECT mode,
pc.relname,
count(*) AS %s
FROM pg_locks l
JOIN pg_class pc ON (l.relation = pc.oid)
WHERE l.mode IS NOT NULL
AND pc.relname NOT LIKE 'pg_%%'
GROUP BY pc.relname, mode""",
'relation': False,
}
REL_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema'),
],
'metrics': {
'seq_scan' : ('postgresql.seq_scans', RATE),
'seq_tup_read' : ('postgresql.seq_rows_read', RATE),
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
'n_tup_ins' : ('postgresql.rows_inserted', RATE),
'n_tup_upd' : ('postgresql.rows_updated', RATE),
'n_tup_del' : ('postgresql.rows_deleted', RATE),
'n_tup_hot_upd' : ('postgresql.rows_hot_updated', RATE),
'n_live_tup' : ('postgresql.live_rows', GAUGE),
'n_dead_tup' : ('postgresql.dead_rows', GAUGE),
},
'query': """
SELECT relname,schemaname,%s
FROM pg_stat_user_tables
WHERE relname = ANY(array[%s])""",
'relation': True,
}
IDX_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema'),
('indexrelname', 'index')
],
'metrics': {
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_read' : ('postgresql.index_rows_read', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
},
'query': """
SELECT relname,
schemaname,
indexrelname,
%s
FROM pg_stat_user_indexes
WHERE relname = ANY(array[%s])""",
'relation': True,
}
SIZE_METRICS = {
'descriptors': [
('relname', 'table'),
],
'metrics': {
'pg_table_size(C.oid) as table_size' : ('postgresql.table_size', GAUGE),
'pg_indexes_size(C.oid) as index_size' : ('postgresql.index_size', GAUGE),
'pg_total_relation_size(C.oid) as total_size' : ('postgresql.total_size', GAUGE),
},
'relation': True,
'query': """
SELECT
relname,
%s
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema') AND
nspname !~ '^pg_toast' AND
relkind IN ('r') AND
relname = ANY(array[%s])"""
}
COUNT_METRICS = {
'descriptors': [
('schemaname', 'schema')
],
'metrics': {
'pg_stat_user_tables': ('postgresql.table.count', GAUGE),
},
'relation': False,
'query': """
SELECT schemaname, count(*) FROM
(
SELECT schemaname
FROM %s
ORDER BY schemaname, relname
LIMIT {table_count_limit}
) AS subquery GROUP BY schemaname
""".format(table_count_limit=TABLE_COUNT_LIMIT)
}
REPLICATION_METRICS_9_1 = {
'CASE WHEN pg_last_xlog_receive_location() = pg_last_xlog_replay_location() THEN 0 ELSE GREATEST (0, EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())) END': ('postgresql.replication_delay', GAUGE),
}
REPLICATION_METRICS_9_2 = {
# postgres.replication_delay_bytes is deprecated and will be removed in a future version. Please use postgresql.replication_delay_bytes instead.
'abs(pg_xlog_location_diff(pg_last_xlog_receive_location(), pg_last_xlog_replay_location())) AS replication_delay_bytes_dup': ('postgres.replication_delay_bytes', GAUGE),
|
nataddrho/DigiCue-USB
|
Python3/src/bgapi.py
|
Python
|
mit
| 13,154
| 0.002585
|
#!/usr/bin/env python
# Nathan Rhoades 4/13/2021
import platform
import math
import bglib
import serial
import time
import datetime
import optparse
import signal
import sys
import struct
import importlib
class Bluegiga():
def __init__(self, dcb, ser, debugprint=False):
self.dcb = dcb
self.ser = ser
self.debugprint = debugprint
while True:
importlib.reload(bglib)
self.initialize()
self.main_loop()
def initialize(self):
self.ble = 0
self.peripheral_list = []
self.connection_handle = 0
self.att_handle_start = 0
self.att_handle_end = 0
self.att_handle_data = 0
self.att_handle_data_ccc = 0
self.crp_link_ready = False
self.pending_write = False
self.disconnected = 0
self.connection_type = None
self.connection_count = None
self.connection_count_last = None
self.mcu_data = None
self.init_sent = False
self.read_data = ""
self.remoteAddressString = ""
self.my_timeout = None
self.uuid_service = [0x28, 0x00] # 0x2800
self.uuid_client_characteristic_configuration = [0x29, 0x02] # 0x2902
self.uuid_crp_service = [
0x0b,
0xd5,
0x16,
0x66,
0xe7,
0xcb,
0x46,
0x9b,
0x8e,
0x4d,
0x27,
0x42,
0xf1,
0xba,
0x77,
0xcc]
self.uuid_crp_characteristic = [
0xe7,
0xad,
0xd7,
0x80,
0xb0,
0x42,
0x48,
0x76,
0xaa,
0xe1,
0x11,
0x28,
0x55,
0x35,
0x3c,
0xc1]
self.STATE_STANDBY = 0
self.STATE_CONNECTING = 1
self.STATE_FINDING_SERVICES = 2
self.STATE_FINDING_ATTRIBUTES = 3
self.STATE_LISTENING_DATA = 4
self.state = self.STATE_STANDBY
def dprint(self, prnt):
if self.debugprint:
print("%s: %s" % (datetime.datetime.now().time(), prnt))
# handler to notify of an API parser timeout condition
def my_timeout(self, sender, args):
# might want to try the following lines to reset, though it probably
# wouldn't work at this point if it's already timed out:
#ble.send_command(ser, self.ble.ble_cmd_system_reset(0))
#ble.check_activity(ser, 1)
self.dprint(
"BGAPI parser timed out. Make sure the BLE device is in a known/idle state.")
# gap_scan_response handler
def my_ble_evt_gap_scan_response(self, sender, args):
# pull all advertised service info from ad packet
ad_services = []
this_field = []
bytes_left = 0
for b in args['data']:
if bytes_left == 0:
bytes_left = b
this_field = []
else:
this_field.append(b)
bytes_left = bytes_left - 1
if bytes_left == 0:
# partial or complete list of 16-bit UUIDs
if this_field[0] == 0x02 or this_field[0] == 0x03:
for i in range(int((len(this_field) - 1) / 2)):
ad_services.append(
this_field[-1 - i * 2: -3 - i * 2: -1])
# partial or complete list of 32-bit UUIDs
if this_field[0] == 0x04 or this_field[0] == 0x05:
for i in range(int((len(this_field) - 1) / 4)):
ad_services.append(
this_field[-1 - i * 4: -5 - i * 4: -1])
# partial or complete list of 128-bit UUIDs
if this_field[0] == 0x06 or this_field[0] == 0x07:
for i in range(int((len(this_field) - 1) / 16)):
ad_services.append(
this_field[-1 - i * 16: -17 - i * 16: -1])
# check for packets
if self.uuid_crp_service in ad_services:
# Attempt to connect for configuration reception
if not args['sender'] in self.peripheral_list:
self.peripheral_list.append(args['sender'])
# connect to this device
self.ble.send_command(self.ser, self.ble.ble_cmd_gap_connect_direct(
args['sender'], args['address_type'], 0x06, 0x10, 0x100, 0))
self.ble.check_activity(self.ser, 1)
self.state = self.STATE_CONNECTING
else:
self.dcb.receive(args['sender'], args['data'])
# connection_status handler
def my_ble_evt_connection_status(self, sender, args):
if (args['flags'] & 0x05) == 0x05:
# connected, now perform service discovery
self.remoteAddressString = ':'.join(
['%02X' % b for b in args['address'][::
|
-1]])
self.dprint("Connected to %s" % self.remoteAddressString)
self.connection_handle = args['connection']
self.ble.send_command(self.ser, self.ble.ble_cmd_attclient_read_by_group_type(
args['connection'], 0x0001, 0xFFFF, list(reversed(self.uuid_service))))
self.ble.check_activity(self.ser, 1)
self.state = self.STATE_FINDING_SERVICES
# attclient_group_found handler
def my_ble_evt_attclient_group_found(self
|
, sender, args):
# found "service" attribute groups (UUID=0x2800), check for CRP service
#if args['uuid'] == list(reversed(self.uuid_crp_service)):
if args['uuid'] == bytearray(self.uuid_crp_service)[::-1]:
self.dprint(
"Found attribute group for CRP service: start=%d, end=%d" %
(args['start'], args['end']))
self.att_handle_start = args['start']
self.att_handle_end = args['end']
# attclient_find_information_found handler
def my_ble_evt_attclient_find_information_found(self, sender, args):
# check for CRP data characteristic
#if args['uuid'] == list(reversed(self.uuid_crp_characteristic)):
if args['uuid'] == bytearray(self.uuid_crp_characteristic)[::-1]:
self.dprint(
"Found CRP data attribute: handle=%d" %
args['chrhandle'])
self.att_handle_data = args['chrhandle']
# check for subsequent client characteristic configuration
#elif args['uuid'] == list(reversed(self.uuid_client_characteristic_configuration)) and self.att_handle_data > 0:
elif args['uuid'] == bytearray(self.uuid_client_characteristic_configuration)[::-1] and self.att_handle_data > 0:
self.dprint(
"Found CRP client characteristic config attribute w/UUID=0x2902: handle=%d" %
args['chrhandle'])
self.att_handle_data_ccc = args['chrhandle']
# attclient_procedure_completed handler
def my_ble_evt_attclient_procedure_completed(self, sender, args):
# check if we just finished searching for services
if self.state == self.STATE_FINDING_SERVICES:
if self.att_handle_end > 0:
self.dprint("Found CRP service")
# found the Cable Replacement service, so now search for the
# attributes inside
self.state = self.STATE_FINDING_ATTRIBUTES
self.ble.send_command(self.ser, self.ble.ble_cmd_attclient_find_information(
self.connection_handle, self.att_handle_start, self.att_handle_end))
self.ble.check_activity(self.ser, 1)
else:
self.dprint("Could not find CRP service")
# check if we just finished searching for attributes within the CRP
# service
elif self.state == self.STATE_FINDING_ATTRIBUTES:
if self.att_handle_data_ccc > 0:
self.dprint("
|
safwanrahman/linuxdesh
|
kitsune/users/migrations/0006_add_migration_user.py
|
Python
|
bsd-3-clause
| 7,501
| 0.007466
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.auth.hashers import make_password
class Migration(DataMigration):
def forwards(self, orm):
"""Adds a user to be used for migrations."""
# ``make_password(None)`` makes an unusable password.
orm['auth.User'].objects.create(
username='migrations',
password=make_password(None))
def backwards(self, orm):
"""Removes the user to be used for migrations."""
orm['auth.User'].objects.get(username='migrations').delete()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.A
|
utoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
|
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.deactivation': {
'Meta': {'object_name': 'Deactivation'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deactivations'", 'to': u"orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'users.emailchange': {
'Meta': {'object_name': 'EmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'irc_handle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'locale': ('kitsune.sumo.models.LocaleField', [], {'default': "'en-US'", 'max_length': '7'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'public_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'users.registrationprofile': {
'Meta': {'object_name': 'RegistrationProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'users.setting': {
'Meta': {'unique_together': "(('user', 'name'),)", 'object_name': 'Setting'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settings'", 'to': u"orm['auth.User']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
}
}
complete_apps = ['users']
symmetrical = True
|
billyhunt/osf.io
|
website/addons/base/serializer.py
|
Python
|
apache-2.0
| 7,369
| 0.000679
|
import abc
from framework.auth.decorators import collect_auth
from website.util import api_url_for, web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
# TODO take addon_node_settings, addon_user_settings
def __init__(self, node_settings=None, user_settings=None):
self.node_settings = node_settings
self.user_settings = user_settings
@abc.abstractproperty
def addon_short_name(self):
pass
@abc.abstractproperty
def addon_serialized_urls(self):
pass
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_node_settings(self):
result = {
'nodeHasAuth': self.node_settings.has_auth,
'userIsOwner': self.user_is_owner,
'urls': self.serialized_urls,
}
if self.user_settings:
result['userHasAuth'] = self.user_settings.has_auth
else:
result['userHasAuth'] = False
if self.node_settings.has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
@property
def serialized_user_settings(self):
return {}
class OAuthAddonSerializer(AddonSerializer):
@property
def credentials_owner(self):
return self.user_settings.owner if self.user_settings else None
@property
def user_is_owner(self):
if self.user_settings is None or self.node_settings is None:
return False
user_accounts = self.user_settings.external_accounts
return bool(
(
self.node_settings.has_auth and
(self.node_settings.external_account in user_accounts)
) or len(user_accounts)
)
@property
def serialized_urls(self):
ret = self.addon_serialized_urls
# Make sure developer returns set of needed urls
for url in self.REQUIRED_URLS:
msg = "addon_serialized_urls must include key '{0}'".format(url)
assert url in ret, msg
ret.update({'settings': web_url_for('user_addons')})
return ret
@pro
|
perty
def serialized_accounts(self):
return [
self.serialize_account(each)
for each in self.user_settings.external_accounts
]
@property
def serialized_user_settings(self):
retval = super(OAuthAddonSerializer, self).serialized_user_set
|
tings
retval['accounts'] = []
if self.user_settings:
retval['accounts'] = self.serialized_accounts
return retval
def serialize_account(self, external_account):
if external_account is None:
return None
return {
'id': external_account._id,
'provider_id': external_account.provider_id,
'provider_name': external_account.provider_name,
'provider_short_name': external_account.provider,
'display_name': external_account.display_name,
'profile_url': external_account.profile_url,
'nodes': [
self.serialize_granted_node(node)
for node in self.user_settings.get_attached_nodes(
external_account=external_account
)
],
}
@collect_auth
def serialize_granted_node(self, node, auth):
node_settings = node.get_addon(
self.user_settings.oauth_provider.short_name
)
serializer = node_settings.serializer(node_settings=node_settings)
urls = serializer.addon_serialized_urls
urls['view'] = node.url
return {
'id': node._id,
'title': node.title if node.can_view(auth) else None,
'urls': urls,
}
class StorageAddonSerializer(OAuthAddonSerializer):
REQUIRED_URLS = ('auth', 'importAuth', 'folders', 'files', 'config', 'deauthorize', 'accounts')
@abc.abstractmethod
def credentials_are_valid(self, user_settings):
pass
@abc.abstractmethod
def serialized_folder(self, node_settings):
pass
def serialize_settings(self, node_settings, current_user, client=None):
user_settings = node_settings.user_settings
self.node_settings = node_settings
current_user_settings = current_user.get_addon(self.addon_short_name)
user_is_owner = user_settings is not None and user_settings.owner == current_user
valid_credentials = self.credentials_are_valid(user_settings, client)
result = {
'userIsOwner': user_is_owner,
'nodeHasAuth': node_settings.has_auth,
'urls': self.serialized_urls,
'validCredentials': valid_credentials,
'userHasAuth': current_user_settings is not None and current_user_settings.has_auth,
}
if node_settings.has_auth:
# Add owner's profile URL
result['urls']['owner'] = web_url_for(
'profile_view_id',
uid=user_settings.owner._id
)
result['ownerName'] = user_settings.owner.fullname
# Show available folders
if node_settings.folder_id is None:
result['folder'] = {'name': None, 'path': None}
elif valid_credentials:
result['folder'] = self.serialized_folder(node_settings)
return result
class CitationsAddonSerializer(OAuthAddonSerializer):
REQUIRED_URLS = ('importAuth', 'folders', 'config', 'deauthorize', 'accounts')
@property
def serialized_urls(self):
external_account = self.node_settings.external_account
ret = {
'auth': api_url_for('oauth_connect',
service_name=self.node_settings.provider_name),
'files': self.node_settings.owner.url,
}
if external_account and external_account.profile_url:
ret['owner'] = external_account.profile_url
ret.update(super(CitationsAddonSerializer, self).serialized_urls)
return ret
@property
def serialized_node_settings(self):
result = super(CitationsAddonSerializer, self).serialized_node_settings
result['folder'] = {
'name': self.node_settings.selected_folder_name
}
return result
@property
def user_is_owner(self):
if self.user_settings is None:
return False
user_accounts = self.user_settings.external_accounts
return bool(
(
self.node_settings.has_auth and
(self.node_settings.external_account in user_accounts)
) or len(user_accounts)
)
@property
def credentials_owner(self):
return self.node_settings.user_settings.owner
@abc.abstractmethod
def serialize_folder(self, folder):
pass
def serialize_citation(self, citation):
return {
'csl': citation,
'kind': 'file',
'id': citation['id'],
}
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/mixture/gaussian_mixture.py
|
Python
|
mit
| 27,687
| 0
|
"""Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import BaseMixture, _check_shape
from ..externals.six.moves import zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..utils.extmath import row_norms
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like, shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components,), 'weights')
# check range
if (any(np.less(weights, 0.)) or
any(np.greater(weights, 1.))):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1. - np.sum(weights)), 0.):
raise ValueError("The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f" % np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like, shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), 'means')
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be "
"positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T) and
np.all(linalg.eigvalsh(precision) > 0.)):
raise ValueError("'%s precision' should be symmetric, "
"positive-definite" % covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for prec in precisions:
_check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like,
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : string
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(precisions, dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == 'full')
precisions_shape = {'full': (n_components, n_features, n_features),
'tied': (n_features, n_features),
'diag': (n_components, n_features),
'spherical': (n_components,)}
_check_shape(precisions, precisions_shape[covariance_type],
'%s precision' % covariance_type)
_check_precisions = {'full': _check_precisions_full,
'tied': _check_precision_matrix,
'diag': _check_precision_positivity,
'spherical': _check_precision_positivity}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[::len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk,
means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
|
X : array-like, shape (n_samples, n_features)
The input data array.
resp : array-like, shap
|
e (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 's
|
andrewjrobinson/SportsReview
|
sportsreview/support/qtlib/pyside.py
|
Python
|
lgpl-3.0
| 1,959
| 0.009188
|
# /*******************************************************************************
# * (c) Andrew Robinson (andrewjrobinson@gmail.com) 2014 *
# * *
# * This file is part of SportsReview. *
# * *
# * SportsReview is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License as published *
# * by the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * SportsReview is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public License *
# * along with SportsReview. If not, see <http://www.gnu.org/licenses/>. *
# *
|
*
# *******************************************************************************/
'''
Standardisation of PySide symbols
Created on 19/04/2014
@author: Andrew Robinson
'''
__all__ = ['QtCore', 'QtGui', 'Slot', 'Signal', '__implementation__', '__version__']
# from Py
import PySide
from PySide import QtGui
from PySide import QtCore
from PySide.QtCore import Slot
from PySide.QtCore import Signal
__impl
|
ementation__ = 'PySide'
__version__ = PySide.__version__
|
initOS/odoo-addons
|
website_product_gross_net/__openerp__.py
|
Python
|
agpl-3.0
| 468
| 0
|
{
'name': 'Website Gross/Net Price (B2B
|
/B2C)',
'summary': 'Website Product Gross/Net Price (B2B/B2C)',
'category': 'Website',
'version': '1.0',
"sequence": 10,
'website': 'http://wt-io-it.at',
'author': 'WT-IO-IT GmbH',
'depends': ['website_sale_options', 'account_product_gross_net'],
'data': [
'views/templates.xml',
],
'demo': [
],
'license': 'AGPL-3',
'applic
|
ation': True,
'installable': True,
}
|
fengxiaoiie/volatility
|
volatility/plugins/overlays/windows/win2003.py
|
Python
|
gpl-2.0
| 6,956
| 0.010926
|
# Volatility
# Copyright (c) 2008-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Jamie Levy (Gleeda)
@license: GNU General Public License 2.0
@contact: jamie@memoryanalysis.net
This file provides support for Windows 2003.
"""
#pylint: disable-msg=C0111
import volatility.plugins.overlays.windows.windows as windows
import volatility.debug as debug #pylint: disable-msg=W0611
import volatility.obj as obj
class Win2003x86Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 5,
'minor': lambda x: x == 2}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x2)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0xff)]],
}]}
profile.merge_overlay(overlay)
class Win2003x64Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x == 5,
'minor': lambda x: x == 2}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x2)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0x7f)]],
}]}
profile.merge_overlay(overlay)
class Win2003KDBG(windows.AbstractKDBGMod):
before = ['WindowsOverlay']
conditions = {'os': lambda x : x == 'windows',
'major': lambda x: x == 5,
'minor': lambda x: x >= 2}
kdbgsize = 0x318
class Win2003SP0x86DTB(obj.ProfileModification):
# Make sure we apply after the normal Win2003 DTB
before = ['WindowsOverlay', 'Win2003x86DTB']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 5,
'minor': lambda x: x == 2,
'build': lambda x: x == 3789}
def modification(self, profil
|
e):
overlay = {'VOLATILITY_MAGIC': [ None, {
'DTBSignature': [ None, ['VolatilityMagic', dict(value = "\x03\x00\x1b\x00")]]}
]}
profile.merge_overlay(overlay)
class Win2003x86DTB(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x : x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 5,
|
'minor': lambda x: x == 2}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'DTBSignature': [ None, ['VolatilityMagic', dict(value = "\x03\x00\x1e\x00")]]}
]}
profile.merge_overlay(overlay)
class Win2003x64DTB(obj.ProfileModification):
before = ['WindowsOverlay', 'Windows64Overlay']
conditions = {'os': lambda x : x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x == 5,
'minor': lambda x: x == 2}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'KPCR' : [ None, ['VolatilityKPCR', dict(configname = "KPCR")]],
'DTBSignature': [ None, ['VolatilityMagic', dict(value = "\x03\x00\x2e\x00")]]}
]}
profile.merge_overlay(overlay)
class EThreadCreateTime(obj.ProfileModification):
before = ['WindowsOverlay']
def check(self, profile):
m = profile.metadata
return (m.get('os', None) == 'windows' and
((m.get('major', 0) == 5 and m.get('minor', 0) >= 2) or
m.get('major', 0) >= 6) and
profile.__class__.__name__ != 'Win2003SP0x86')
def modification(self, profile):
overlay = {'_ETHREAD': [ None, {
'CreateTime' : [ None, ['WinTimeStamp', {}]]}
]}
profile.merge_overlay(overlay)
class Win2003SP0x86(obj.Profile):
""" A Profile for Windows 2003 SP0 x86 """
_md_os = 'windows'
_md_major = 5
_md_minor = 2
# FIXME: 2003's build numbers didn't differentiate between SP0 and SP1/2
# despite there being a large change. As such we fake a special build number
# for 2003 SP0 to help us differentiate it
_md_build = 3789
_md_memory_model = '32bit'
_md_vtype_module = 'volatility.plugins.overlays.windows.win2003_sp0_x86_vtypes'
class Win2003SP1x86(obj.Profile):
""" A Profile for Windows 2003 SP1 x86 """
_md_os = 'windows'
_md_major = 5
_md_minor = 2
_md_build = 3790
_md_memory_model = '32bit'
_md_vtype_module = 'volatility.plugins.overlays.windows.win2003_sp1_x86_vtypes'
class Win2003SP2x86(obj.Profile):
""" A Profile for Windows 2003 SP2 x86 """
_md_os = 'windows'
_md_major = 5
_md_minor = 2
# This is a fake build number. See the comment in Win2003SP0x86
_md_build = 3791
_md_memory_model = '32bit'
_md_vtype_module = 'volatility.plugins.overlays.windows.win2003_sp2_x86_vtypes'
class Win2003SP1x64(obj.Profile):
""" A Profile for Windows 2003 SP1 x64 """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 5
_md_minor = 2
_md_build = 3790
_md_vtype_module = 'volatility.plugins.overlays.windows.win2003_sp1_x64_vtypes'
class Win2003SP2x64(obj.Profile):
""" A Profile for Windows 2003 SP2 x64 """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 5
_md_minor = 2
# This is a fake build number. See the comment in Win2003SP0x86
_md_build = 3791
_md_vtype_module = 'volatility.plugins.overlays.windows.win2003_sp2_x64_vtypes'
class WinXPSP1x64(Win2003SP1x64):
""" A Profile for Windows XP SP1 x64 """
class WinXPSP2x64(Win2003SP2x64):
""" A Profile for Windows XP SP2 x64 """
|
endlessm/chromium-browser
|
tools/origin_trials/check_token.py
|
Python
|
bsd-3-clause
| 8,056
| 0.01142
|
#!/usr/bin/env python
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for validating and inspecting origin trial token
|
s
usage: check_token.py [-h] [--use-chrome-key |
--use-test-key |
--private-key-file KEY_FILE]
"base64-encoded token"
Run "check_token.py -h" for more help on
|
usage.
"""
from __future__ import print_function
import argparse
import base64
from datetime import datetime
import json
import os
import struct
import sys
import time
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(script_dir, 'third_party', 'ed25519'))
import ed25519
# Version is a 1-byte field at offset 0.
# - To support version-dependent formats, the version number must be the first
# first part of the token.
VERSION_OFFSET = 0
VERSION_SIZE = 1
# These constants define the Version 2 field sizes and offsets.
# Contents are: version|signature|payload length|payload
SIGNATURE_OFFSET = VERSION_OFFSET + VERSION_SIZE
SIGNATURE_SIZE = 64
PAYLOAD_LENGTH_OFFSET = SIGNATURE_OFFSET + SIGNATURE_SIZE
PAYLOAD_LENGTH_SIZE = 4
PAYLOAD_OFFSET = PAYLOAD_LENGTH_OFFSET + PAYLOAD_LENGTH_SIZE
# This script supports Version 2 and Version 3 tokens.
VERSION2 = "\x02"
VERSION3 = "\x03"
# Chrome public key, used by default to validate signatures
# - Copied from chrome/common/origin_trials/chrome_origin_trial_policy.cc
CHROME_PUBLIC_KEY = [
0x7c, 0xc4, 0xb8, 0x9a, 0x93, 0xba, 0x6e, 0xe2, 0xd0, 0xfd, 0x03,
0x1d, 0xfb, 0x32, 0x66, 0xc7, 0x3b, 0x72, 0xfd, 0x54, 0x3a, 0x07,
0x51, 0x14, 0x66, 0xaa, 0x02, 0x53, 0x4e, 0x33, 0xa1, 0x15,
]
# Default key file, relative to script_dir.
DEFAULT_KEY_FILE = 'eftest.key'
class OverrideKeyFileAction(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(OverrideKeyFileAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, "use_chrome_key", None)
setattr(namespace, self.dest, values)
def main():
parser = argparse.ArgumentParser(
description="Inspect origin trial tokens")
parser.add_argument("token",
help="Token to be checked (must be Base64 encoded)")
key_group = parser.add_mutually_exclusive_group()
key_group.add_argument("--use-chrome-key",
help="Validate token using the real Chrome public key",
dest="use_chrome_key",
action="store_true")
key_group.add_argument("--use-test-key",
help="Validate token using the eftest.key",
dest="use_chrome_key",
action="store_false")
key_group.add_argument("--key-file",
help="Ed25519 private key file to validate the token",
dest="key_file",
action=OverrideKeyFileAction)
parser.set_defaults(use_chrome_key=False)
args = parser.parse_args()
# Figure out which public key to use: Chrome, test key (default option), or
# key file provided on command line.
public_key = None
private_key_file = None
if (args.use_chrome_key is None):
private_key_file = args.key_file
else:
if (args.use_chrome_key):
public_key = "".join(chr(x) for x in CHROME_PUBLIC_KEY)
else:
# Use the test key, relative to this script.
private_key_file = os.path.join(script_dir, DEFAULT_KEY_FILE)
# If not using the Chrome public key, extract the public key from either the
# test key file, or the private key file provided on the command line.
if public_key is None:
try:
key_file = open(os.path.expanduser(private_key_file), mode="rb")
except IOError as exc:
print("Unable to open key file: %s" % private_key_file)
print("(%s)" % exc)
sys.exit(1)
private_key = key_file.read(64)
# Validate that the key file read was a proper Ed25519 key -- running the
# publickey method on the first half of the key should return the second
# half.
if (len(private_key) < 64 or
ed25519.publickey(private_key[:32]) != private_key[32:]):
print("Unable to use the specified private key file.")
sys.exit(1)
public_key = private_key[32:]
try:
token_contents = base64.b64decode(args.token)
except TypeError as exc:
print("Error decoding the token (%s)" % exc)
sys.exit(1)
# Only version 2 and version 3 currently supported.
if (len(token_contents) < (VERSION_OFFSET + VERSION_SIZE)):
print("Token is malformed - too short.")
sys.exit(1)
version = token_contents[VERSION_OFFSET:(VERSION_OFFSET + VERSION_SIZE)]
# Convert the version string to a number
version_number = 0
for x in version:
version_number <<= 8
version_number += ord(x)
if (version != VERSION2 and version != VERSION3):
print("Token has wrong version: %d" % version_number)
sys.exit(1)
# Token must be large enough to contain a version, signature, and payload
# length.
minimum_token_length = PAYLOAD_LENGTH_OFFSET + PAYLOAD_LENGTH_SIZE
if (len(token_contents) < minimum_token_length):
print("Token is malformed - too short: %d bytes, minimum is %d" % \
(len(token_contents), minimum_token_length))
sys.exit(1)
# Extract the length of the signed data (Big-endian).
# (unpack returns a tuple).
payload_length = struct.unpack_from(">I", token_contents,
PAYLOAD_LENGTH_OFFSET)[0]
# Validate that the stated length matches the actual payload length.
actual_payload_length = len(token_contents) - PAYLOAD_OFFSET
if (payload_length != actual_payload_length):
print("Token is %d bytes, expected %d" % (actual_payload_length,
payload_length))
sys.exit(1)
# Extract the version-specific contents of the token.
# Contents are: version|signature|payload length|payload
signature = token_contents[SIGNATURE_OFFSET:PAYLOAD_LENGTH_OFFSET]
# The data which is covered by the signature is (version + length + payload).
signed_data = version + token_contents[PAYLOAD_LENGTH_OFFSET:]
# Validate the signature on the data.
try:
ed25519.checkvalid(signature, signed_data, public_key)
except Exception as exc:
print("Signature invalid (%s)" % exc)
sys.exit(1)
try:
payload = token_contents[PAYLOAD_OFFSET:].decode('utf-8')
except UnicodeError as exc:
print("Unable to decode token contents (%s)" % exc)
sys.exit(1)
try:
token_data = json.loads(payload)
except Exception as exc:
print("Unable to parse payload (%s)" % exc)
print("Payload: %s" % payload)
sys.exit(1)
print()
print("Token data: %s" % token_data)
print()
# Extract the required fields
for field in ["origin", "feature", "expiry"]:
if field not in token_data:
print("Token is missing required field: %s" % field)
sys.exit(1)
origin = token_data["origin"]
trial_name = token_data["feature"]
expiry = token_data["expiry"]
# Extract the optional fields
is_subdomain = token_data.get("isSubdomain")
is_third_party = token_data.get("isThirdParty")
if (is_third_party is not None and version != VERSION3):
print("isThirdParty flag can only be be set in Version 3 token.")
sys.exit(1)
# Output the token details
print("Token details:")
print(" Version: %s" % version_number)
print(" Origin: %s" % origin)
print(" Is Subdomain: %s" % is_subdomain)
if (version == VERSION3):
print(" Is Third Party: %s" % is_third_party)
print(" Feature: %s" % trial_name)
print(" Expiry: %d (%s UTC)" % (expiry, datetime.utcfromtimestamp(expiry)))
print(" Signature: %s" % ", ".join('0x%02x' % ord(x) for x in signature))
print(" Signature (Base64): %s" % base64.b64encode(signature))
print()
if __name__ == "__main__":
main()
|
xzturn/tensorflow
|
tensorflow/python/framework/op_callbacks_test.py
|
Python
|
apache-2.0
| 33,617
| 0.004194
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for op_callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import compat
# Keep all the hard-coded op type strings in one place so they are easy to
# change all at once in the face of any possible future op type name changes.
_ADD_OP = b"AddV2"
_ASSIGN_ADD_VARIABLE_OP = b"AssignAddVariableOp"
_CONSTANT_OP = b"Const"
_COS_OP = b"Cos"
_ENTER_OP = b"Enter"
_EXIT_OP = b"Exit"
_GREATER_OP = b"Greater"
_IDENTITY_OP = b"Identity"
_IF_OP = b"If"
_LESS_OP = b"Less"
_LOG_OP = b"Log"
_MERGE_OP = b"Merge"
_MATMUL_OP = b"MatMul"
_MUL_OP = b"Mul"
_NEXT_ITERATION_OP = b"NextIteration"
_PLACEHOLDER_OP = b"Placeholder"
_POW_OP = b"Pow"
_READ_VARIABLE_OP = b"ReadVariableOp"
_SIN_OP = b"Sin"
_SPARSE_TENSOR_DENSE_MATMUL_OP = b"SparseTensorDenseMatMul"
_SQRT_OP = b"Sqrt"
_SQUARE_OP = b"Square"
_STATELESS_IF_OP = b"StatelessIf"
_SWITCH_OP = b"Switch"
_UNIQUE_OP = b"Unique"
_VAR_HANDLE_OP = b"VarHandleOp"
_WHILE_OP = b"While"
class _NumpyFunctionCallback(object):
def __init__(self, instrument_graph_ops=True, float_only=False):
self.instrument_graph_ops = instrument_graph_ops
self._float_only = float_only
self.reset()
def callback(self, op_type, inputs, attrs, outputs, op_name=None, graph=None):
is_eager = not graph
if is_eager:
self.eager_op_types.append(
compat.as_bytes(op_type) if op_type else op_type)
self.eager_op_names.append(
compat.as_bytes(op_name) if op_name else op_name)
self.eager_attrs.append(attrs)
self.eager_graphs.append(graph)
self.eager_inputs.append(inputs)
else:
self.graph_op_types.append(
compat.as_bytes(op_type) if op_type else op_type)
self.graph_op_names.append(
compat.as_bytes(op_name) if op_name else op_name)
self.graph_attrs.append(attrs)
self.graph_graphs.append(graph)
self.graph_graph_versions.append(graph.version)
self.graph_inputs.append(inputs)
if not self.instrument_graph_ops:
return outputs
# Instrument the graph with numpy_function.
instrumented_outputs = []
for output in outputs:
if compat.as_bytes(op_type) in (_ENTER_OP, _EXIT_OP, _IF_OP, _MERGE_OP,
_NEXT_ITERATION_OP, _STATELESS_IF_OP,
_SWITCH_OP, _WHILE_OP, _IDENTITY_OP,
_VAR_HANDLE_OP, _PLACEHOLDER_OP):
# TODO(cais): Overriding the output of StatelessIf, If and While ops
# currently fails with error. Investigate (b/139668453).
# Avoid instrumenting Identity ops as well, as they are inserted
# by tf.function/AutoGraph for marshalling outputs.
instrumented_output = output
else:
def record(ndarray_value):
if compat.as_bytes(op_name) not in self.graph_internal_ndarrays:
self.graph_internal_ndarrays[compat.as_bytes(op_name)] = []
self.graph_internal_ndarrays[compat.as_bytes(op_name)].append(
ndarray_value)
return ndarray_value
if self._float_only and not output.dtype.is_floating:
instrumented_output = output
else:
instrumented_output = script_ops.numpy_function(
record, [output], output.dtype)
instrumented_output.set_shape(outp
|
ut.shape)
instrumented_outputs.append(instrumented_output)
return instrumented_outputs
def reset(self):
self.eager_op_types = []
self.eager_op_names =
|
[]
self.eager_attrs = []
self.eager_graphs = []
self.eager_inputs = []
self.graph_op_types = []
self.graph_op_names = []
self.graph_attrs = []
self.graph_graphs = []
self.graph_graph_versions = []
self.graph_inputs = []
# A dict mapping tensor name (e.g., "MatMut_10") to a list of ndarrays.
# The list is the history of the tensor's computation result inside
# `tf.Graph`s (`FuncGraph`s).
# For an op with multiple output tensors, the outputs are interleaved in
# the list.
self.graph_internal_ndarrays = {}
class OpCallbacksTest(test_util.TensorFlowTestCase):
def tearDown(self):
op_callbacks.clear_op_callbacks()
super(OpCallbacksTest, self).tearDown()
def testSingleThreadedStack(self):
ctx = context.context()
instrument_0 = _NumpyFunctionCallback()
instrument_1 = _NumpyFunctionCallback()
op_callbacks.add_op_callback(instrument_0.callback)
self.assertEqual(1, len(ctx.op_callbacks))
self.assertIn(instrument_0.callback, ctx.op_callbacks)
op_callbacks.add_op_callback(instrument_1.callback)
self.assertEqual(2, len(ctx.op_callbacks))
self.assertIn(instrument_0.callback, ctx.op_callbacks)
self.assertIn(instrument_1.callback, ctx.op_callbacks)
op_callbacks.remove_op_callback(instrument_1.callback)
self.assertEqual(1, len(ctx.op_callbacks))
self.assertIn(instrument_0.callback, ctx.op_callbacks)
op_callbacks.remove_op_callback(instrument_0.callback)
self.assertEqual(0, len(ctx.op_callbacks))
def testMultiThreadedStacks(self):
# Instrument for the main thread.
instrument_0 = _NumpyFunctionCallback()
# Instrument for the to-be-created thread.
instrument_1 = _NumpyFunctionCallback()
def thread1_job():
op_callbacks.add_op_callback(instrument_1.callback)
@def_function.function
def func1(x):
return math_ops.sqrt(math_ops.log(x))
x = constant_op.constant(4.0)
self.assertAllClose(func1(x), np.sqrt(np.log(4.0)))
thread1 = threading.Thread(target=thread1_job)
# Start job on separate thread.
thread1.start()
# Run something on the main thread.
op_callbacks.add_op_callback(instrument_0.callback)
@def_function.function
def func0(x):
return math_ops.square(math_ops.sin(x))
x = constant_op.constant(4.0)
self.assertAllClose(func0(x), np.square(np.sin(4.0)))
thread1.join()
# Assert that there is no cross-talk between the main thread
# and the created thread.
self.assertIn(_PLACEHOLDER_OP, instrument_1.graph_op_types)
self.assertIn(_LOG_OP, instrument_1.graph_op_types)
self.assertIn(_SQRT_OP, instrument_1.graph_op_types)
self.assertNotIn(_SIN_OP, instrument_1.graph_op_types)
self.assertNotIn(_SQUARE_OP, instrument_1.graph_op_types)
self.assertNotIn(_LOG_OP, instrument_0.graph_op_types)
self.assertNotIn(_SQRT_OP, instrument_0.graph_op_types)
self.assertIn(_SIN_O
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.