repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
kobejean/tensorflow
|
tensorflow/python/training/checkpoint_utils.py
|
Python
|
apache-2.0
| 15,013
| 0.005728
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Li
|
cense.
# ==========================
|
====================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return pywrap_tensorflow.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
@tf_export("train.init_from_checkpoint")
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Initializes current variables with tensors loaded from given checkpoint.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.variable_scope('new_scope_1'):
var1 = tf.get_variable('var1', shape=[20, 2],
initializer=tf.zeros_initializer())
with tf.variable_scope('new_scope_2'):
var2 = tf.get_variable('var2', shape=[50, 4],
initializer=tf.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.get_variable(name='var3', shape=[100, 100],
initializer=tf.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
if distribution_strategy_context.get_cross_tower_context():
_init_from_checkpoint(None, ckpt_dir_or_file, assignment_map)
else:
distribution_strategy_context.get_tower_context().merge_call(
_init_from_checkpoint, ckpt_dir_or_file, assignment_map)
def _init_from_checkpoint(_, ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tenso
|
tripzero/soletta
|
data/gdb/libsoletta.so-gdb.py
|
Python
|
apache-2.0
| 29,075
| 0.002511
|
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gdb
import re
## IMPORTANT NOTE:
#
# This file is a Python GDB script that is highly dependent on
# symbol names, even the internal functions and parameters.
#
# Whenever depending on a symbol, mark them in the source file
# so people know they have to adapt this file on changes.
## LOADING:
#
# This file should be auto-loaded by gdb if it is installed in GDB's
# auto-load directory and matches the installed libsoletta.so,
# including the final so-version.
#
# If soletta is installed to custom directory, then make sure GDB knows
# about this location and that the directory is marked as safe-path:
#
# (gdb) add-auto-load-scripts-directory ${soletta_prefix}/share/gdb/auto-load
# (gdb) add-auto-load-safe-path ${soletta_prefix}/share/gdb/auto-load
#
# It may be included directly if not auto-loaded:
#
# (gdb) source ${soletta_prefix}/share/gdb/auto-load/libsoletta.so-gdb.py
#
## Usage:
# commands start with 'sol_' prefix, then you can use 'apropos ^sol_' to
# filter commands in our namespace or tabl-completion.
# GDB's "help command" to get more information
defvalue_member_map = {
"string": "s",
"byte": "byte",
"boolean": "b",
"int": "i",
"float": "f",
"rgb": "rgb",
"direction_vector": "direction_vector",
}
def get_type_description(type):
try:
tdesc = type["description"]
if tdesc:
return tdesc.dereference()
except KeyError:
pass
return None
def get_node_type_description(node):
type = node["type"]
return get_type_description(type)
def _get_node_port_index_by_name(node, member, port_name):
tdesc = get_node_type_description(node)
if not tdesc:
return -1
array = tdesc[member]
if not array:
return -1
i = 0
while array[i]:
port = array[i]
if port["name"] and port["name"].string() == port_name:
return i
i += 1
return -1
def get_node_port_out_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_out", port_name)
def get_node_port_in_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_in", port_name)
def _get_node_port_name_by_index(node, member, port_index):
tdesc = get_node_type_description(node)
if not tdesc:
return None
array = tdesc[member]
if not array:
return None
i = 0
while array[i]:
if i == port_index:
port = array[i]
if port["name"]:
return port["name"].string()
return None
elif i > port_index:
break
i += 1
return None
def get_node_port_out_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_out", port_index)
def get_node_port_in_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_in", port_index)
class FlowTypePrinter(object):
"Print a 'struct sol_flow_node_type'"
def __init__(self, val):
self.val = val
self.port_in_type = gdb.lookup_type("struct sol_flow_port_type_in").const().pointer()
def display_hint(self):
return 'sol_flow_node_type'
def _port_description_to_string(self, index, port, port_type):
s = ("\n %d %s (%s)\n" \
" description: %s\n") % (
index,
port["name"].string(),
port["data_type"].string(),
port["description"].string())
if port_type["connect"]:
s += " connect(): %s\n" % (port_type["connect"],)
if port_type["disconnect"]:
s += " disconnect(): %s\n" % (port_type["disconnect"],)
if port_type.type == self.port_in_type and port_type["process"]:
s += " process(): %s\n" % (port_type["process"],)
return s
def _option_description_to_string(self, option):
data_type = option["data_type"].string()
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = option["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
return "\n %s(%s) \"%s\"%s," % (
option["name"].string(),
data_type,
option["description"].string(),
defvalue)
def _ports_description_to_string(self, array, get_port_type):
if not array:
return ""
i = 0
r = []
while array[i]:
port_type = get_port_type(i)
r.append(self._port_description_to_string(i, array[i], port_type))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def _options_description_to_string(self, opts):
if not opts:
return ""
opts = opts.dereference()
array = opts["members"]
if not array:
return ""
i = 0
r = []
while array[i]["name"]:
r.append(self._option_description_to_string(array[i]))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def to_string(self):
type = self.val
tdesc = get_type_description(type)
if tdesc:
get_port_in = gdb.parse_and_eval("sol_flow_no
|
de_type_get_port_in")
get_port_out = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
p_type = type.address
ports_in = self._ports_description_to_string(tdesc["ports_in"], lambda idx: get_port_in(p_type, idx))
ports_out = self._ports_description_to_string(tdesc["ports_out"], lambda idx: get_port_out(p_type, idx))
|
options = self._options_description_to_string(tdesc["options"])
return "%s=%s" \
"\n name=\"%s\"," \
"\n category=\"%s\"," \
"\n description=\"%s\"," \
"\n ports_in={%s}," \
"\n ports_out={%s}," \
"\n options={%s})" % (
tdesc["symbol"].string(),
type.address,
tdesc["name"].string(),
tdesc["category"].string(),
tdesc["description"].string(),
ports_in,
ports_out,
options)
return "(struct sol_flow_node_type)%s (no node type description)" % (type.address,)
class FlowPrinter(object):
"Print a 'struct sol_flow_node'"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'sol_flow_node'
def to_string(self):
id = self.val["id"]
type = self.val["type"]
if not type:
return "sol_flow_node(%s) is under construction." % (
self.val.address,)
tname = "%#x (no node type description)" % (type.address,)
tdesc = get_type_description(type)
if tdesc:
tname = "%s(%s=%s)" % (
tdesc["name"].string(),
tdesc["symbol"].string(),
type.address)
return "sol_flow_node(%s, id=\"%s\", type=%s)" % (
self.val.address, id.string(), tname)
def sol_flow_pretty_printers(val):
lookup_tag = val.type.tag
if lookup_tag == "sol_flow_node":
|
zwChan/VATEC
|
~/eb-virt/Lib/site-packages/wheel/test/complex-dist/complexdist/__init__.py
|
Python
|
apache-2.0
| 23
| 0
|
def ma
|
in()
|
:
return
|
fhirschmann/penchy
|
penchy/util.py
|
Python
|
mit
| 6,303
| 0.000476
|
"""
This module provides miscellaneous utilities.
.. moduleauthor:: Fabian Hirschmann <fabian@hirschmann.email>
.. moduleauthor:: Michael Markert <markert.michael@googlemail.com>
:copyright: PenchY Developers 2011-2012, see AUTHORS
:license: MIT License, see LICENSE
"""
from __future__ import print_function
import hashlib
import imp
import logging
import os
import shutil
import sys
import tempfile
import inspect
from contextlib import contextmanager
from functools import wraps
from xml.etree.ElementTree import SubElement
from tempfile import NamedTemporaryFile
from penchy.compat import write
from penchy import bootstrap
log = logging.getLogger(__name__)
def memoized(f):
"""
Decorator that provides memoization, i.e. a cache that saves the result of
a function call and returns them if called with the same arguments.
The function will not be evaluated if the arguments are present in the
cache.
"""
cache = {}
@wraps(f)
def _memoized(*args, **kwargs):
key = tuple(args) + tuple(kwargs.items())
try:
if key in cache:
return cache[key]
except TypeError: # if passed an unhashable type evaluate directly
return f(*args, **kwargs)
ret = f(*args, **kwargs)
cache[key] = ret
return ret
return _memoized
# Copyright (c) 1995-2010 by Frederik Lundh
# <http://effbot.org/zone/element-lib.htm#prettyprint>
# Licensed under the terms of the Historical Permission Notice
# and Disclaimer, see <http://effbot.org/zone/copyright.htm>.
def tree_pp(elem, level=0):
"""
Pretty-prints an ElementTree.
:param elem: root node
:type elem: :class:`~xml.etree.ElementTree.Element`
:param level: current level in tree
:type level: int
"""
i = '\n' + level * ' '
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
tree_pp(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def dict2tree(elem, dict_):
"""
Transform the given dictionary to a ElementTree and
add it to the given element.
:param elem: parent element
:type elem: :class:`xml.etree.ElementTree.Element`
:param dict_: dict to add to ``elem``
:type dict_: dict
"""
for key in dict_:
if dict_[key]:
e = SubElement(elem, key)
if type(dict_[key]) == dict:
dict2tree(e, dict_[key])
else:
e.text = dict_[key]
def sha1sum(filename, blocksize=65536):
"""
Returns the sha1 hexdigest of a file.
"""
hasher = hashlib.sha1()
with open(filename, 'rb') as afile:
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
@contextmanager
def tempdir(prefix='penchy-invocation', delete=False):
"""
Contextmanager to execute in new created temporary directory.
:param prefix: prefix of the temporary directory
:type prefix: str
:param delete: delete the temporary directory afterwards
:type delete: bool
"""
fwd = os.getcwd()
cwd = tempfile.mkdtemp(prefix=prefix)
os.chdir(cwd)
yield
os.chdir(fwd)
if delete:
shutil.rmtree(cwd)
def make_bootstrap_client():
"""
Returns the temporary filename of a file containing
the bootstrap client.
"""
tf = NamedTemporaryFile()
source = inspect.getsource(bootstrap)
write(tf, source)
tf.flush()
return tf
def load_job(filename):
"""
Loads a job.
:param filename: filename of the job
:type filename: str
"""
assert 'config' in sys.modules, 'You have to load the penchyrc before a job'
with disable_write_bytecode():
job = imp.load_source('job', filename)
log.info('Loaded job from %s' % filename)
return job
def load_config(filename):
"""
Loads the config module from filename.
:param filename: filename of the config file
:type filename: str
"""
with disable_write_bytecode():
config = imp.load_source('config', filename)
log.info('Loaded configuration from %s' % filename)
return config
def get_config_attribute(config, name, default_value):
"""
Returns an attribute of a config module or the
default value.
:param config: config module to use
:param name: attribute name
:type name: str
:param default: default value
"""
if hasattr(config, name):
return getattr(config, name)
|
else:
return default_value
@contextmanager
def disable_write_bytecode():
|
"""
Contextmanager to temporarily disable writing bytecode while executing.
"""
old_state = sys.dont_write_bytecode
sys.dont_write_bytecode = True
yield
sys.dont_write_bytecode = old_state
def default(value, replacement):
"""
Check if ``value`` is ``None`` and then return ``replacement`` or else
``value``.
:param value: value to check
:param replacement: default replacement for value
:returns: return the value or replacement if value is None
"""
return value if value is not None else replacement
def die(msg):
"""
Print msg to stderr and exit with exit code 1.
:param msg: msg to print
:type msg: str
"""
print(msg, file=sys.stderr)
sys.exit(1)
def depth(l):
"""
Computes the depth of a nested balanced list.
Raises ``ValueError`` if the lists are not
balanced.
:param l: the nested list
:rtype: int
:raises: ValueError
"""
if isinstance(l, list):
depths = map(depth, l)
if min(depths) != max(depths):
raise ValueError("Lists are not balanced.")
return max(depths) + 1
else:
return 0
def unify(xs):
"""
Removes duplicates from xs while preserving the order.
:param xs: the list
:type xs: list object
"""
seen = set()
return [x for x in xs if x not in seen and not seen.add(x)]
|
mptei/smarthome
|
lib/logic.py
|
Python
|
gpl-3.0
| 4,430
| 0.001806
|
#!/usr/bin/env python
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2011-2013 Marcus Popp marcus@popp.mx
#########################################################################
# This file is part of SmartHome.py. http://smarthome.sourceforge.net/
#
# SmartHome.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHome.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHome.py. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
import logging
import os
import configobj
logger = logging.getLogger('')
class Logics():
def __init__(self, smarthome, configfile):
logger.info('Start Logics')
self._sh = smarthome
self._workers = []
self._logics = {}
self._bytecode = {}
self.alive = True
logger.debug("reading logics from %s" % configfile)
try:
self._config = configobj.ConfigObj(configfile, file_error=True)
except Exception, e:
logger.critical(e)
return
for name in self._config:
logger.debug("Logic: %s" % name)
logic = Logic(self._sh, name, self._config[name])
if hasattr(logic, 'bytecode'):
self._logics[name] = logic
self._sh.scheduler.add(name, logic, logic.prio, logic.crontab, logic.cycle)
else:
continue
# plugin hook
for plugin in self._sh._plugins:
if hasattr(plugin, 'parse_logic'):
plugin.parse
|
_logic(logic)
# item hook
if hasattr(logic, 'watch_item'):
if isinstance(logic.watch_item,
|
str):
logic.watch_item = [logic.watch_item]
items = []
for entry in logic.watch_item:
items += self._sh.match_items(entry)
for item in items:
item.add_logic_trigger(logic)
def __iter__(self):
for logic in self._logics:
yield logic
def __getitem__(self, name):
if name in self._logics:
return self._logics[name]
class Logic():
def __init__(self, smarthome, name, attributes):
self._sh = smarthome
self.name = name
self.crontab = None
self.cycle = None
self.prio = 3
self.last = None
self.conf = attributes
for attribute in attributes:
vars(self)[attribute] = attributes[attribute]
self.generate_bytecode()
self.prio = int(self.prio)
if self.crontab is not None:
if isinstance(self.crontab, list):
self.crontab = ','.join(self.crontab) # rejoin crontab entry to a string
def id(self):
return self.name
def __call__(self, caller='Logic', source=None, value=None, dest=None, dt=None):
self._sh.scheduler.trigger(self.name, self, prio=self.prio, by=caller, source=source, dest=dest, value=value, dt=dt)
def trigger(self, by='Logic', source=None, value=None, dest=None, dt=None):
self._sh.scheduler.trigger(self.name, self, prio=self.prio, by=by, source=source, dest=dest, value=value, dt=dt)
def generate_bytecode(self):
if hasattr(self, 'filename'):
filename = self._sh.base_dir + '/logics/' + self.filename
if not os.access(filename, os.R_OK):
logger.warning("%s: Could not access logic file (%s) => ignoring." % (self.name, self.filename))
return
try:
self.bytecode = compile(open(filename).read(), self.filename, 'exec')
except Exception, e:
logger.warning("Exception: %s" % e)
else:
logger.warning("%s: No filename specified => ignoring." % self.name)
|
apple/swift-lldb
|
packages/Python/lldbsuite/test/commands/breakpoint/basic/TestBreakpointCommand.py
|
Python
|
apache-2.0
| 12,042
| 0.002076
|
"""
Test lldb breakpoint command add/list/delete.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import side_effect
class BreakpointCommandTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24528")
def test_breakpoint_command_sequence(self):
"""Test a sequence of breakpoint command add, list, and delete."""
self.build()
self.breakpoint_command_sequence()
def test_script_parameters(self):
"""Test a sequence of breakpoint command add, list, and delete."""
self.build()
self.breakpoint_command_script_parameters()
def test_commands_on_creation(self):
self.build()
self.breakpoint_commands_on_creation()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.c', '// Set break point at this line.')
# disable "There is a running process, kill it and restart?" prompt
self.runCmd("settings set auto-confirm true")
self.addTearDownHook(
lambda: self.runCmd("settings clear auto-confirm"))
def test_delete_all_breakpoints(self):
"""Test that deleting all breakpoints works."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_symbol(self, "main")
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
self.ru
|
nCmd("run", RUN_SUCCEEDED)
self.runCmd("breakpoint delete")
self.runCmd("process continue")
self.expect("process status", PROCESS_STOPPED,
patter
|
ns=['Process .* exited with status = 0'])
def breakpoint_command_sequence(self):
"""Test a sequence of breakpoint command add, list, and delete."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add three breakpoints on the same line. The first time we don't specify the file,
# since the default file is the one containing main:
lldbutil.run_break_set_by_file_and_line(
self, None, self.line, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
# Breakpoint 4 - set at the same location as breakpoint 1 to test
# setting breakpoint commands on two breakpoints at a time
lldbutil.run_break_set_by_file_and_line(
self, None, self.line, num_expected_locations=1, loc_exact=True)
# Make sure relative path source breakpoints work as expected. We test
# with partial paths with and without "./" prefixes.
lldbutil.run_break_set_by_file_and_line(
self, "./main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "breakpoint/basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./breakpoint/basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
# Test relative breakpoints with incorrect paths and make sure we get
# no breakpoint locations
lldbutil.run_break_set_by_file_and_line(
self, "invalid/main.c", self.line,
num_expected_locations=0, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./invalid/main.c", self.line,
num_expected_locations=0, loc_exact=True)
# Now add callbacks for the breakpoints just created.
self.runCmd(
"breakpoint command add -s command -o 'frame variable --show-types --scope' 1 4")
self.runCmd(
"breakpoint command add -s python -o 'import side_effect; side_effect.one_liner = \"one liner was here\"' 2")
self.runCmd(
"breakpoint command add --python-function bktptcmd.function 3")
# Check that the breakpoint commands are correctly set.
# The breakpoint list now only contains breakpoint 1.
self.expect(
"breakpoint list", "Breakpoints 1 & 2 created", substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1" %
self.line], patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" %
self.line])
self.expect(
"breakpoint list -f",
"Breakpoints 1 & 2 created",
substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1" %
self.line],
patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" %
self.line,
"1.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" %
self.line,
"2.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" %
self.line])
self.expect("breakpoint command list 1", "Breakpoint 1 command ok",
substrs=["Breakpoint commands:",
"frame variable --show-types --scope"])
self.expect("breakpoint command list 2", "Breakpoint 2 command ok",
substrs=["Breakpoint commands (Python):",
"import side_effect",
"side_effect.one_liner"])
self.expect("breakpoint command list 3", "Breakpoint 3 command ok",
substrs=["Breakpoint commands (Python):",
"bktptcmd.function(frame, bp_loc, internal_dict)"])
self.expect("breakpoint command list 4", "Breakpoint 4 command ok",
substrs=["Breakpoint commands:",
"frame variable --show-types --scope"])
self.runCmd("breakpoint delete 4")
self.runCmd("command script import --allow-reload ./bktptcmd.py")
# Next lets try some other breakpoint kinds. First break with a regular expression
# and then specify only one file. The first time we should get two locations,
# the second time only one:
lldbutil.run_break_set_by_regexp(
self, r"._MyFunction", num_expected_locations=2)
lldbutil.run_break_set_by_regexp(
self,
r"._MyFunction",
extra_options="-f a.c",
num_expected_locations=1)
lldbutil.run_break_set_by_regexp(
self,
r"._MyFunction",
extra_options="-f a.c -f b.c",
num_expected_locations=2)
# Now try a source regex breakpoint:
lldbutil.run_break_set_by_source_regexp(
self,
r"is about to return [12]0",
extra_options="-f a.c -f b.c",
num_expected_locations=2)
lldbutil.run_break_set_by_source_regexp(
self,
r"is about to return [12]0",
extra_options="-f a.c",
num_expected_locations=1)
# Reset our canary variables and run the program.
side_effect.one_liner = None
side_effect.bktptcmd = None
self.runCmd("run", RUN_SUCCEEDED)
# Check the value of canary variables.
self.assertEquals("one liner was here", side_effect.one_liner)
self.assertEqu
|
ThiefMaster/indico
|
bin/maintenance/update_header.py
|
Python
|
mit
| 8,843
| 0.003279
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import re
import subprocess
import sys
from datetime import date
import click
import yaml
from indico.util.console import cformat
# Dictionary listing the files for which to change the header.
# The key is the extension of the file (without the dot) and the value is another
# dictionary containing two keys:
# - 'regex' : A regular expression matching comments in the given file type
# - 'format': A dictionary with the comment characters to add to the header.
# There must be a `comment_start` inserted before the header,
# `comment_middle` inserted at the beginning of each line except the
# first and last one, and `comment_end` inserted at the end of the
# header. (See the `HEADER` above)
SUPPORTED_FILES = {
'py': {
'regex': re.compile(r'((^#|[\r\n]#).*)*'),
'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
'wsgi': {
'regex': re.compile(r'((^#|[\r\n]#).*)*'),
'format': {'comment_start': '#', 'comment_middle': '#', 'comment_end': ''}},
'js': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
'jsx': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
'css': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/'),
'format': {'comment_start': '/*', 'comment_middle': ' *', 'comment_end': ' */'}},
'scss': {
'regex': re.compile(r'/\*(.|[\r\n])*?\*/|((^//|[\r\n]//).*)*'),
'format': {'comment_start': '//', 'comment_middle': '//', 'comment_end': ''}},
}
# The substring which must be part of a comment block in order for the comment to be updated by the header.
SUBSTRING = 'This file is part of'
USAGE = '''
Updates all the headers in the supported files ({supported_files}).
By default, all the files tracked by git in the current repository are updated
to the current year.
You can specify a year to update to as well as a file or directory.
This will update all the supported files in the scope including those not tracked
by git. If the directory does not contain any supported files (or if the file
specified is not supported) nothing will be updated.
'''.format(supported_files=', '.join(SUPPORTED_FILES)).strip()
def _walk_to_root(path):
"""Yield directories starting from the given directory up to the root."""
# Based on code from python-dotenv (BSD-licensed):
# https://github.com/theskumar/python-dotenv/blob/e13d957b/src/dotenv/main.py#L245
if os.path.isfile(path):
path = os.path.dirname(path)
last_dir = None
current_dir = os.path.abspath(path)
while last_dir != current_dir:
yield current_dir
parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
last_dir, current_dir = current_dir, parent_dir
def _get_config(path, end_year):
config = {}
for dirname in _walk_to_root(path):
check_path = os.path.join(dirname, 'headers.yml')
if os.path.isfile(check_path):
with open(check_path) as f:
config.update((k, v) for k, v in yaml.safe_load(f.read()).items() if k not in config)
if config.pop('root', False):
break
if 'start_year' not in config:
click.echo('no valid headers.yml files found: start_year missing')
sys.exit(1)
if 'name' not in config:
click.echo('no valid headers.yml files found: name missing')
sys.exit(1)
if 'header' not in config:
click.echo('no valid headers.yml files found: header missing')
sys.e
|
xit(1)
config['end_year'] = end_year
return config
def gen_header(data):
if data['start_
|
year'] == data['end_year']:
data['dates'] = data['start_year']
else:
data['dates'] = '{} - {}'.format(data['start_year'], data['end_year'])
return '\n'.join(line.rstrip() for line in data['header'].format(**data).strip().splitlines())
def _update_header(file_path, config, substring, regex, data, ci):
found = False
with open(file_path) as file_read:
content = orig_content = file_read.read()
if not content.strip():
return False
shebang_line = None
if content.startswith('#!/'):
shebang_line, content = content.split('\n', 1)
for match in regex.finditer(content):
if substring in match.group():
found = True
content = content[:match.start()] + gen_header(data | config) + content[match.end():]
if shebang_line:
content = shebang_line + '\n' + content
if content != orig_content:
msg = 'Incorrect header in {}' if ci else cformat('%{green!}Updating header of %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
if not ci:
with open(file_path, 'w') as file_write:
file_write.write(content)
return True
elif not found:
msg = 'Missing header in {}' if ci else cformat('%{red!}Missing header%{reset} in %{blue!}{}')
print(msg.format(os.path.relpath(file_path)))
return True
def update_header(file_path, year, ci):
config = _get_config(file_path, year)
ext = file_path.rsplit('.', 1)[-1]
if ext not in SUPPORTED_FILES or not os.path.isfile(file_path):
return False
if os.path.basename(file_path)[0] == '.':
return False
return _update_header(file_path, config, SUBSTRING, SUPPORTED_FILES[ext]['regex'],
SUPPORTED_FILES[ext]['format'], ci)
def blacklisted(root, path, _cache={}):
orig_path = path
if path not in _cache:
_cache[orig_path] = False
while (path + os.path.sep).startswith(root):
if os.path.exists(os.path.join(path, '.no-headers')):
_cache[orig_path] = True
break
path = os.path.normpath(os.path.join(path, '..'))
return _cache[orig_path]
@click.command(help=USAGE)
@click.option('--ci', is_flag=True, help='Indicate that the script is running during CI and should use a non-zero '
'exit code unless all headers were already up to date. This also prevents '
'files from actually being updated.')
@click.option('--year', '-y', type=click.IntRange(min=1000), default=date.today().year, metavar='YEAR',
help='Indicate the target year')
@click.option('--path', '-p', type=click.Path(exists=True), help='Restrict updates to a specific file or directory')
@click.pass_context
def main(ctx, ci, year, path):
error = False
if path and os.path.isdir(path):
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all the files in '
'%{yellow!}{path}%{reset}...').format(year=year, path=path))
for root, _, filenames in os.walk(path):
for filename in filenames:
if not blacklisted(path, root):
if update_header(os.path.join(root, filename), year, ci):
error = True
elif path and os.path.isfile(path):
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for the file '
'%{yellow!}{file}%{reset}...').format(year=year, file=path))
if update_header(path, year, ci):
error = True
else:
if not ci:
print(cformat('Updating headers to the year %{yellow!}{year}%{reset} for all '
'git-tracked files...').format(year=year))
try:
for filepath in subprocess.check_output(['git', 'ls-files'], text=True).splitlines():
filepath = os.path.abspath(filepath)
|
trevor/calendarserver
|
txdav/carddav/datastore/test/__init__.py
|
Python
|
apache-2.0
| 695
| 0
|
# -*- test-case-name: txdav.carddav.datastore.test -*-
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lic
|
enses/LICENSE-2.0
#
# Unless required by applica
|
ble law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
AddressBook store tests.
"""
|
wulczer/ansible
|
v2/ansible/executor/playbook_iterator.py
|
Python
|
gpl-3.0
| 4,905
| 0.018756
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class PlaybookState:
'''
A helper class, which keeps track of the task iteration
state for a given playbook. This is used in the PlaybookIterator
class on a per-host basis.
'''
def __init__(self, parent_iterator):
self._parent_iterator = parent_iterator
self._cur_play = 0
self._task_list = None
self._cur_task_pos = 0
self._done = False
def next(self, peek=False):
'''
Determines and returns the next available task from the playbook,
advancing through the list of plays as it goes.
'''
task = None
# we save these locally so that we can peek at the next task
# without updating the internal state of the iterator
cur_play = self._cur_play
task_list = self._task_list
cur_task_pos = self._cur_task_pos
while True:
# when we hit the end of the playbook entries list, we set a flag
# and return None to indicate we're there
# FIXME: accessing the entries and parent iterator playbook members
# should be done through accessor functions
if self._done or cur_play > len(self._parent_iterator._playbook._entries) - 1:
self._done = True
return None
# initialize the task list by calling the .compile() method
# on the play, which will call compile() for all child objects
if task_list is None:
task_list = self._parent_iterator._playbook._entries[cur_play].compile()
# if we've hit the end of this plays task list, move on to the next
# and reset the position values for the next iteration
if cur_task_pos > len(task_list) - 1:
cur_play += 1
task_list = None
cur_task_pos = 0
continue
else:
# FIXME: do tag/conditional evaluation here and advance
# the task position if it should be skipped without
# returning a task
task = task_list[cur_task_pos]
cur_task_pos += 1
# Skip the task if it is the member of a role which has already
# been run, unless the role allows multiple executions
if task._role:
# FIXME: this should all be done via member functions
# instead of direct access to internal variables
if task._role.has_run() and not task._role._metadata._allow_duplicates:
continue
# Break out of the while loop now that we have our task
break
# If we're not just peeking at the next task, save the internal state
if not peek:
self._cur_play = cur_play
self._task_list = task_list
self._cur_task_pos = cur_task_pos
return task
class PlaybookIterator:
'''
The main iterator class, which keeps the state of the playbook
on a per-host basis using the above PlaybookState class.
'''
def __init__(self, inventory, log_manager, playbook):
self._playbook = playbook
self._log_manager = log_manager
self._host_entries = dict()
self._first_host = None
# build the per-host dictionary of playbook states
for host in inventory.get_hosts():
if self._
|
first_host is None:
|
self._first_host = host
self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self)
def get_next_task(self, peek=False):
''' returns the next task for host[0] '''
return self._host_entries[self._first_host.get_name()].next(peek=peek)
def get_next_task_for_host(self, host, peek=False):
''' fetch the next task for the given host '''
if host.get_name() not in self._host_entries:
raise AnsibleError("invalid host specified for playbook iteration")
return self._host_entries[host.get_name()].next(peek=peek)
|
stelfrich/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webclient/controller/impexp.py
|
Python
|
gpl-2.0
| 1,024
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:/
|
/www.gnu.org/licenses/>.
#
|
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from webclient.controller import BaseController
class BaseImpexp(BaseController):
def __init__(self, conn, **kw):
BaseController.__init__(self, conn)
|
rapidpro/ureport
|
ureport/polls/migrations/0070_install_triggers.py
|
Python
|
agpl-3.0
| 285
| 0
|
# Generated by Django 3.2.6 on 2021-10-19
|
09:14
from django.db import migrations
from ureport.sql import InstallSQL
class Migratio
|
n(migrations.Migration):
dependencies = [
("polls", "0069_pollquestion_color_choice"),
]
operations = [InstallSQL("polls_0070")]
|
Tojaj/librepo
|
examples/python/download_package.py
|
Python
|
lgpl-2.1
| 1,388
| 0.005764
|
#!/usr/bin/env python3
"""
librepo - download a package
"""
import os
import sys
import shutil
from pprint import pprint
import librepo
DESTDIR = "downloaded_metadata"
PROGRESSBAR_LEN = 40
finished = False
def callback(data, total_to_download, downloaded):
"""Progress callback"""
global finished
|
if total_to_download != downloaded:
finished = False
if total_to_download <= 0 or finished == True:
return
completed = int(downloaded / (total_to_download / PROGRESSBAR_LEN))
print("%30s: [%s%s] %8s/%8s\r" % (data, '#'*completed, '-'*(PROGRESSBAR_LEN-completed), int(downloaded), int(total_to_download)), )
sys.stdout.flush()
|
if total_to_download == downloaded and not finished:
print()
finished = True
return
if __name__ == "__main__":
pkgs = [
("ImageMagick-djvu", "Packages/i/ImageMagick-djvu-6.7.5.6-3.fc17.i686.rpm"),
("i2c-tools-eepromer", "Packages/i/i2c-tools-eepromer-3.1.0-1.fc17.i686.rpm")
]
h = librepo.Handle()
h.setopt(librepo.LRO_URLS, ["http://ftp.linux.ncsu.edu/pub/fedora/linux/releases/17/Everything/i386/os/"])
h.setopt(librepo.LRO_REPOTYPE, librepo.LR_YUMREPO)
h.setopt(librepo.LRO_PROGRESSCB, callback)
h.setopt(librepo.LRO_PROGRESSDATA, "")
for pkg_name, pkg_url in pkgs:
h.progressdata = pkg_name
h.download(pkg_url)
|
gmimano/commcaretest
|
corehq/apps/users/management/commands/ptop_fast_reindex_users.py
|
Python
|
bsd-3-clause
| 433
| 0.004619
|
from corehq.apps.users.models import CommCareUser
from corehq.apps.hqcase.management.commands.ptop_fast_reindexer import PtopReindexer
from corehq.pillows.user import UserPillow
CHUNK_SIZE = 500
POOL_SIZE = 15
class Command(PtopReindexer):
help = "Fast r
|
einde
|
x of user elastic index by using the domain view and reindexing users"
doc_class = CommCareUser
view_name = 'users/by_username'
pillow_class = UserPillow
|
Samweli/inasafe
|
safe/gui/tools/wizard/step_kw25_classification.py
|
Python
|
gpl-3.0
| 6,796
| 0
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid -**InaSAFE Wizard**
This module provides: Keyword Wizard Step: Classification Selector
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'qgis@borysjurgiel.pl'
__revision__ = '$Format:%H$'
__date__ = '16/03/2016'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
# noinspection PyPackageRequirements
from PyQt4 import QtCore
# noinspection PyPackageRequirements
from PyQt4.QtGui import QListWidgetItem
from safe.definitions import layer_purpose_hazard
from safe.utilities.gis import is_raster_layer
from safe.utilities.keyword_io import definition
from safe.gui.tools.wizard.wizard_strings import classification_question
from safe.gui.tools.wizard.wizard_step import get_wizard_step_ui_class
from safe.gui.tools.wizard.wizard_step import WizardStep
FORM_CLASS = get_wizard_step_ui_class(__file__)
class StepKwClassification(WizardStep, FORM_CLASS):
"""Keyword Wizard Step: Classification Selector"""
def is_ready_to_next_step(self):
"""Check if the step is complete. If so, there is
no reason to block the Next button.
:returns: True if new step may be enabled.
:rtype: bool
"""
return bool(self.selected_classification())
def get_previous_step(self):
"""Find the proper step when user clicks the Previous button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
new_step = self.parent.step_kw_layermode
return new_step
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
if is_raster_layer(self.parent.layer):
new_step = self.parent.step_kw_classify
else:
new_step = self.parent.step_kw_field
return new_step
def classifications_for_layer(self):
"""Return a list of valid classifications for a layer.
:returns: A list where each value represents a valid classification.
:rtype: list
"""
layer_geometry_id = self.parent.get_layer_geometry_id()
layer_mode_id = self.parent.step_kw_layermode.\
selected_layermode()['key']
subcategory_id = self.parent.step_kw_subcategory.\
selected_subcategory()['key']
if self.parent.step_kw_purpose.\
selected_purpose() == layer_purpose_hazard:
hazard_category_id = self.parent.step_kw_hazard_category.\
selected_hazard_category()['key']
if is_raster_layer(self.parent.layer):
return self.impact_function_manager.\
raster_hazards_classifications_for_layer(
subcategory_id,
layer_geometry_id,
layer_mode_id,
hazard_category_id)
else:
return sel
|
f.impact_funct
|
ion_manager\
.vector_hazards_classifications_for_layer(
subcategory_id,
layer_geometry_id,
layer_mode_id,
hazard_category_id)
else:
# There are no classifications for exposures defined yet, apart
# from postprocessor_classification, processed paralelly
return []
def on_lstClassifications_itemSelectionChanged(self):
"""Update classification description label and unlock the Next button.
.. note:: This is an automatic Qt slot
executed when the field selection changes.
"""
self.clear_further_steps()
classification = self.selected_classification()
# Exit if no selection
if not classification:
return
# Set description label
self.lblDescribeClassification.setText(classification["description"])
# Enable the next button
self.parent.pbnNext.setEnabled(True)
def selected_classification(self):
"""Obtain the classification selected by user.
:returns: Metadata of the selected classification.
:rtype: dict, None
"""
item = self.lstClassifications.currentItem()
try:
return definition(item.data(QtCore.Qt.UserRole))
except (AttributeError, NameError):
return None
def clear_further_steps(self):
""" Clear all further steps
in order to properly calculate the prev step
"""
self.parent.step_kw_field.lstFields.clear()
self.parent.step_kw_classify.treeClasses.clear()
def set_widgets(self):
"""Set widgets on the Classification tab."""
self.clear_further_steps()
purpose = self.parent.step_kw_purpose.selected_purpose()['name']
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()['name']
self.lstClassifications.clear()
self.lblDescribeClassification.setText('')
self.lblSelectClassification.setText(
classification_question % (subcategory, purpose))
classifications = self.classifications_for_layer()
for classification in classifications:
if not isinstance(classification, dict):
classification = definition(classification)
item = QListWidgetItem(
classification['name'],
self.lstClassifications)
item.setData(QtCore.Qt.UserRole, classification['key'])
self.lstClassifications.addItem(item)
# Set values based on existing keywords (if already assigned)
geom = 'raster' if is_raster_layer(self.parent.layer) else 'vector'
key = '%s_%s_classification' % (
geom, self.parent.step_kw_purpose.selected_purpose()['key'])
classification_keyword = self.parent.get_existing_keyword(key)
if classification_keyword:
classifications = []
for index in xrange(self.lstClassifications.count()):
item = self.lstClassifications.item(index)
classifications.append(item.data(QtCore.Qt.UserRole))
if classification_keyword in classifications:
self.lstClassifications.setCurrentRow(
classifications.index(classification_keyword))
self.auto_select_one_item(self.lstClassifications)
|
davidgardenier/frbpoppy
|
tests/monte_carlo/goodness_of_fit.py
|
Python
|
mit
| 11,050
| 0
|
from weighted_quantiles import median
from scipy.stats import ks_2samp
import numpy as np
import os
import matplotlib.pyplot as plt
from frbpoppy import unpickle, TNS, poisson_interval, pprint
from tests.rates.alpha_real import EXPECTED
from tests.convenience import plot_aa_style, rel_path
from simulations import SimulationOverview, POP_SIZE
NORM_SURV = 'parkes-htru'
class GoodnessOfFit:
def __init__(self):
self.run_pars = {1: ['alpha', 'si', 'li'],
2: ['li', 'lum_min', 'lum_max'],
3: ['w_mean', 'w_std'],
4: ['dm_igm_slope', 'dm_host']}
self.norm_surv = NORM_SURV
self.so = SimulationOverview()
self.tns = self.get_tns()
def get_tns(self):
# Only get one-offs
return TNS(repeaters=False, mute=True, update=False).df
def dm(self, pop, survey_name):
"""Calculate GoodnessOfFit for DM distributions."""
mask = ((self.tns.survey == survey_name) & (self.tns.dm <= 950))
try:
ks_dm = ks_2samp(pop.frbs.dm, self.tns[mask].dm)[1]
except ValueError:
ks_dm = np.nan
return ks_dm
def snr(self, pop, survey_name):
mask = ((self.tns.survey == survey_name) & (self.tns.dm <= 950))
try:
ks_snr = ks_2samp(pop.frbs.snr, self.tns[mask].snr)[1]
except ValueError:
ks_snr = np.nan
return ks_snr
def rate(self, pop, survey_name, norm_uuid, run, errs=False):
# Add rate details
sr = pop.source_rate
surv_sim_rate = sr.det / sr.days
# Perhaps use at some stage
if errs:
p_int = poisson_interval(sr.det, sigma=1)
surv_sim_rate_errs = [p/sr.days for p in p_int]
# Determine ratio of detection rates
if survey_name in EXPECTED:
n_frbs, n_days = EXPECTED[survey_name]
else:
n_frbs, n_days = [np.nan, np.nan]
surv_real_rate = n_frbs/n_days
# Get normalisation properties
norm_real_n_frbs, norm_real_n_days = EXPECTED[self.norm_surv]
norm_pop = unpickle(f'mc/run_{run}/{norm_uuid}')
norm_sim_n_frbs = norm_pop.source_rate.det
norm_sim_n_days = norm_pop.source_rate.days
norm_sim_rate = norm_sim_n_frbs / norm_sim_n_days
norm_real_rate = norm_real_n_frbs / norm_real_n_days
if norm_sim_rate == 0:
norm_sim_rate = POP_SIZE / norm_sim_n_days
sim_ratio = surv_sim_rate / norm_sim_rate
real_ratio = surv_real_rate / norm_real_rate
diff = np.abs(sim_ratio - real_ratio)
if diff == 0:
rate_diff = 1e-3
else:
rate_diff = 1 / diff
return rate_diff, pop.n_sources()
def calc_gofs(self, run):
# For each requested run
self.so = SimulationOverview()
par_set = self.so.df[self.so.df.run == run].par_set.iloc[0]
pprint(f'Calculating goodness of fit for run {run}, par set {par_set}')
pars = self.run_pars[par_set]
values = []
# Loop through all combination of parameters
for values, group in self.so.df[self.so.df.run == run].groupby(pars):
pprint(f' - {list(zip(pars, values))}')
# Calculate goodness of fit values for each simulation
for row_ix, row in group.iterrows():
survey_name = row.survey
uuid = row.uuid
pop = unpickle(f'mc/run_{run}/{uuid}')
# Apply a DM cutoff
mask = (pop.frbs.dm <= 950)
pop.frbs.apply(mask)
pop.source_rate.det = pop.n_sources() * pop.source_rate.f_area
dm_gof = self.dm(pop, survey_name)
snr_gof = self.snr(pop, survey_name)
self.so.df.at[row_ix, 'dm_gof'] = dm_gof
self.so.df.at[row_ix, 'snr_gof'] = snr_gof
if pop.n_sources() == 0:
self.so.df.at[row_ix, 'weight'] = 0
self.so.df.at[row_ix, 'n_det'] = pop.n_sources()
pprint(f' - No sources in {survey_name}')
continue
# Find corresponding rate normalisation population uuid
norm_mask = dict(zip(pars, values))
norm_mask['survey'] = self.norm_surv
|
norm_mask['run']
|
= run
k = norm_mask.keys()
v = norm_mask.values()
norm_uuid = group.loc[group[k].isin(v).all(axis=1), :].uuid
norm_uuid = norm_uuid.values[0]
rate_diff, n_det = self.rate(pop, survey_name, norm_uuid, run)
# Get rate weighting
self.so.df.at[row_ix, 'weight'] = rate_diff
self.so.df.at[row_ix, 'n_det'] = n_det
pprint(f'Saving the results for run {run}')
# Best matching in terms of rates
max_w = np.nanmax(self.so.df.weight)
self.so.df.loc[self.so.df.weight == 1e3]['weight'] = max_w
self.so.save()
def plot(self, run):
# Get data
# For each requested run
df = self.so.df
par_set = df[df.run == run].par_set.iloc[0]
# For each parameter
for main_par in self.run_pars[par_set]:
pprint(f'Plotting {main_par}')
other_pars = [e for e in self.run_pars[par_set] if e != main_par]
for compare_par in ['dm', 'snr']:
compare_col = f'{compare_par}_gof'
pprint(f' - {compare_col}')
for survey, group_surv in df[df.run == run].groupby('survey'):
pprint(f' - {survey}')
# Set up plot
plot_aa_style()
plt.rcParams["figure.figsize"] = (5.75373*3, 5.75373*3)
plt.rcParams['figure.max_open_warning'] = 125
n_x = group_surv[other_pars[0]].nunique()
if len(other_pars) > 1:
n_y = group_surv[other_pars[1]].nunique()
else:
n_y = 1
fig, ax = plt.subplots(n_x, n_y,
sharex='col', sharey='row')
groups = group_surv.groupby(other_pars)
x = -1
for i, (other_pars_vals, group) in enumerate(groups):
bins = group[main_par].values
values = group[compare_col].values
bins, values = self.add_edges_to_hist(bins, values)
if n_y > 1:
y = i % n_y
if y == 0:
x += 1
a = ax[y, x]
else:
y = i
a = ax[y]
a.step(bins, values, where='mid')
a.set_title = str(other_pars_vals)
diff = np.diff(bins)
if diff[1] != diff[0]:
a.set_xscale('log')
# Set axis label
if y == n_y - 1:
p = other_pars[0]
if isinstance(other_pars_vals, float):
val = other_pars_vals
else:
val = other_pars_vals[0]
p = p.replace('_', ' ')
a.set_xlabel(f'{p} = {val:.2}')
if x == 0:
p = other_pars[1]
val = other_pars_vals[1]
p = p.replace('_', ' ')
a.set_ylabel(f'{p} = {val:.2}')
# Set axis limits
subset = df[df.run == run][main_par]
y_subset = group_surv[compare_col].copy()
try:
low = np.nanmin(y_subset)
high = np.nanmax(y_subset)
|
CERNDocumentServer/invenio
|
modules/websubmit/lib/functions/Print_Success_SRV.py
|
Python
|
gpl-2.0
| 1,933
| 0.008795
|
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# Licens
|
e, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free So
|
ftware Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
from invenio.config import CFG_SITE_URL, CFG_SITE_RECORD
from invenio.websubmit_functions.Shared_Functions import ParamFromFile
## Description: function Print_Success_SRV
## This function displays a message telling the user the
## revised files have been correctly received
## Author: T.Baron
## PARAMETERS: -
def Print_Success_SRV(parameters, curdir, form, user_info=None):
"""
This function simply displays a text on the screen, telling the
user the revision went fine. To be used in the Submit New File
(SRV) action.
"""
global rn
sysno = ParamFromFile("%s/%s" % (curdir, 'SN')).strip()
t = "<b>Modification completed!</b><br /><br />"
if sysno:
# If we know the URL of the document, we display it for user's convenience (RQF0800417)
url = '%s/%s/%s' % (CFG_SITE_URL, CFG_SITE_RECORD, sysno)
t = "<br /><br /><b>Document %s (<b><a href='%s'>%s</a></b>) was successfully revised.</b>" % (rn, url, url)
else:
t = "<br /><br /><b>Document %s was successfully revised.</b>" % rn
return t
|
wxgeo/geophar
|
wxgeometrie/sympy/physics/quantum/tests/test_innerproduct.py
|
Python
|
gpl-2.0
| 1,468
| 0
|
from sympy import I, Integer
from sympy.physics.quantum.innerproduct import InnerProduct
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.state import Bra, Ket, StateBase
def test_innerproduct():
k = Ket('k')
b = Bra('b')
ip = InnerProduct(b, k)
assert isinstance(ip, InnerProduct)
assert ip.bra == b
assert ip.ket == k
assert b*k == InnerProduct(b, k)
assert k*(b*k)*b == k*InnerProduct(b, k)*b
assert InnerProduct(b, k).subs(b, Dagger(k)) == Dagger(k)*k
def test_innerproduct_dagger():
k = Ket('k')
b = Bra('b')
ip = b*k
assert Dagger(ip) == Dagger(k)*Dagger(b)
class FooState(StateBase):
pass
class FooKet(Ket, FooState):
@classmethod
def dual_class(self):
return FooBra
def _eval_innerproduct_FooBra(self, bra):
return Integer(1)
def _eval_innerproduct_BarBra(self, bra):
return I
class FooBra(
|
Bra, FooState):
@classmethod
def dual_class(self):
return FooKet
class BarState(StateBase):
pass
class BarKet(Ket, BarState):
@classmethod
def dual_class(self):
return BarBra
class BarBra(Bra, BarState):
@classmethod
def dual_class(self):
return BarKet
def test_doit():
f = FooKet('foo')
b = BarBra('bar')
assert InnerProduct(b, f).doit() == I
assert InnerProduct(Dagger(f), Dagger(b)).doit() == -I
asse
|
rt InnerProduct(Dagger(f), f).doit() == Integer(1)
|
almey/policycompass-services
|
apps/datasetmanager/__init__.py
|
Python
|
agpl-3.0
| 69
| 0
|
default_app
|
_config = '
|
apps.datasetmanager.apps.datasetmanagerConfig'
|
locationlabs/confab
|
confab/jinja_filters.py
|
Python
|
apache-2.0
| 2,518
| 0.001191
|
"""
Allows custom jinja filters.
"""
### Built-in filters ###
def select(value, key):
"""
Select a key f
|
rom a dictionary.
If ``value`` is not a dictionary or ``key`` does not exist in it,
the ``value`` is returned as is.
"""
return value.get(key, value) if isinstance(value, dict) else value
def rotate(list_, pivot):
"""
Rotate a list around a pivot.
"""
try:
pos = list_.index(pivot)
except ValueError:
# pivot not in list
return li
|
st_
else:
return list_[pos:] + list_[:pos]
def map_format(sequence, format):
"""
Apply format string on elements in sequence.
:param format: format string. can use one positional format argument, i.e. '{}' or '{0}',
which will map to elements in the sequence.
"""
return [format.format(item) for item in sequence]
def built_in_filters():
"""
Confab built-in Jinja filters.
"""
return [
select,
rotate,
map_format,
]
### End built-in filters ###
class JinjaFiltersRegistry(object):
"""
Registry of custom Jinja filters that are applied on Jinja environments
when Confab generates templates.
"""
def __init__(self):
self._filters = set(built_in_filters())
def add_filter(self, filter):
self._filters.add(filter)
def remove_filter(self, filter):
try:
self._filters.remove(filter)
except KeyError:
return False
return True
@property
def filters(self):
return {filter.__name__: filter for filter in self._filters}
def register(self, environment):
"""
Register filters on a Jinja environment object.
"""
for name, filter in self.filters.iteritems():
environment.filters[name] = filter
class JinjaFilters(object):
"""
Context manager for Jinja filters.
"""
def __init__(self, *filters):
self._filters = filters
def __enter__(self):
for filter in self._filters:
add_jinja_filter(filter)
def __exit__(self, type, value, traceback):
for filter in self._filters:
remove_jinja_filter(filter)
def add_jinja_filter(filter):
"""
Add a custom jinja filter.
"""
jinja_filters.add_filter(filter)
def remove_jinja_filter(filter):
"""
Remove a custom jinja filter.
"""
return jinja_filters.remove_filter(filter)
jinja_filters = JinjaFiltersRegistry()
|
wood-galaxy/FreeCAD
|
src/Mod/Path/PathScripts/PathProfileEdges.py
|
Python
|
lgpl-2.1
| 32,805
| 0.003475
|
# -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2016 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import Path
from FreeCAD import Vector
from PathScripts import PathUtils
from PathScripts.PathUtils import depth_params
from DraftGeomUtils import findWires
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
else:
def translate(ctxt, txt):
return txt
__title__ = "Path Profile Edges Operation"
__author__ = "sliptonic (Brad Collette)"
__url__ = "http://www.freecadweb.org"
"""Path Profile object and FreeCAD command for operating on sets of edges"""
class ObjectProfile:
def __init__(self, obj):
obj.addProperty("App::PropertyLinkSubList", "Base", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","The base geometry of this toolpath"))
obj.addProperty("App::PropertyBool", "Active", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","Make False, to prevent operation from generating code"))
obj.addProperty("App::PropertyString", "Comment", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","An optional comment for this profile"))
obj.addProperty("App::PropertyString", "UserLabel", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","User Assigned Label"))
# obj.addProperty("App::PropertyEnumeration", "Algorithm", "Algorithm", "The library or algorithm used to generate the path")
# obj.Algorithm = ['OCC Native', 'libarea']
obj.addProperty("App::PropertyIntegerConstraint", "ToolNumber", "Tool", QtCore.QT_TRANSLATE_NOOP("App::Property","The tool number in use"))
obj.ToolNumber = (0, 0, 1000, 1)
obj.setEditorMode('ToolNumber', 1) # make this read only
obj.addProperty("App::PropertyString", "ToolDescription", "Tool", QtCore.QT_TRANSLATE_NOOP("App::Property","The description of the tool"))
obj.setEditorMode('ToolDescription', 1) # make this read onlyt
# Depth Properties
obj.addProperty("App::PropertyDistance", "ClearanceHeight", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","The height needed to clear clamps and obstructions"))
obj.addProperty("App::PropertyDistance", "SafeHeight", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Rapid Safety Height between locations"))
obj.addProperty("App::PropertyFloatConstraint", "StepDown", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Incremental Step Down of Tool"))
obj.StepDown = (1, 0.01, 1000, 0.5)
obj.addProperty("App::PropertyDistance", "StartDepth", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Starting Depth of Tool- first cut depth in Z"))
obj.addProperty("App::PropertyDistance", "FinalDepth", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Final Depth of Tool- lowest value in Z"))
# Start Point Properties
obj.addProperty("App::PropertyVector", "StartPoint", "Start Point", QtCore.QT_TRANSLATE_NOOP("App::Property","The start point of this path"))
obj.addProperty("App::PropertyBool", "UseStartPoint", "Start Point", QtCore.QT_TRANSLATE_NOOP("App::Property","make True, if specifying a Start Point"))
obj.addProperty("App::PropertyLength", "ExtendAtStart", "Start Point", QtCore.QT_TRANSLATE_NOOP("App::Property","extra length of tool path before start of part edge"))
obj.addProperty("App::PropertyLength", "LeadInLineLen", "Start Point", QtCore.QT_TRANSLATE_NOOP("App::Property","length of straight segment of toolpath that comes in at angle to first part edge"))
# End Point Properties
obj.addProperty("App::PropertyBool", "UseEndPoint", "End Point", QtCore.QT_TRANSLATE_NOOP("App::Property","make True, if specifying an End Point"))
obj.addProperty("App::PropertyLength", "ExtendAtEnd", "End Point", QtCore.QT_TRANSLATE_NOOP("App::Property","extra length of tool path after end of part edge"))
|
obj.addProperty("App::PropertyLength", "LeadOutLineLen", "End Point", QtCore.QT_TRANSLATE_NOOP("App::Property","length of straight segment of toolpath that comes in at angle to last part edge"))
obj.addProperty("App::PropertyVector", "EndPoint", "End Point", QtCore.QT_TRANSLATE_NOOP("App::Property","The end
|
point of this path"))
# Profile Properties
obj.addProperty("App::PropertyEnumeration", "Side", "Profile", QtCore.QT_TRANSLATE_NOOP("App::Property","Side of edge that tool should cut"))
obj.Side = ['Left', 'Right', 'On'] # side of profile that cutter is on in relation to direction of profile
obj.addProperty("App::PropertyEnumeration", "Direction", "Profile", QtCore.QT_TRANSLATE_NOOP("App::Property","The direction that the toolpath should go around the part ClockWise CW or CounterClockWise CCW"))
obj.Direction = ['CW', 'CCW'] # this is the direction that the profile runs
obj.addProperty("App::PropertyBool", "UseComp", "Profile", QtCore.QT_TRANSLATE_NOOP("App::Property","make True, if using Cutter Radius Compensation"))
obj.addProperty("App::PropertyDistance", "RollRadius", "Profile", QtCore.QT_TRANSLATE_NOOP("App::Property","Radius at start and end"))
obj.addProperty("App::PropertyDistance", "OffsetExtra", "Profile", QtCore.QT_TRANSLATE_NOOP("App::Property","Extra value to stay away from final profile- good for roughing toolpath"))
obj.addProperty("App::PropertyLength", "SegLen", "Profile", QtCore.QT_TRANSLATE_NOOP("App::Property","Tesselation value for tool paths made from beziers, bsplines, and ellipses"))
obj.addProperty("App::PropertyAngle", "PlungeAngle", "Profile", QtCore.QT_TRANSLATE_NOOP("App::Property","Plunge angle with which the tool enters the work piece. Straight down is 90 degrees, if set small enough or zero the tool will descent exactly one layer depth down per turn"))
obj.addProperty("App::PropertyVectorList", "locs", "Tags", QtCore.QT_TRANSLATE_NOOP("App::Property","List of holding tag locations"))
obj.addProperty("App::PropertyFloatList", "angles", "Tags", QtCore.QT_TRANSLATE_NOOP("App::Property","List of angles for the holding tags"))
obj.addProperty("App::PropertyFloatList", "heights", "Tags", QtCore.QT_TRANSLATE_NOOP("App::Property","List of angles for the holding tags"))
obj.a
|
fmance/deep-medical-ir
|
ranking/unjudged.py
|
Python
|
gpl-3.0
| 594
| 0.023569
|
import sys
sys.path.insert(0, "../utils/")
import utils
def unjudged(qrelsFile, resultsFile):
qrels = utils.readQrels(qrelsFile)
results = utils.readResults(resultsFile)
unjudged = {}
for qid in results.keys():
qrelIds =
|
set([did for (did, _) in qrels[qid]])
resIds = set([did for (did, _, _) in results[qid][:10]])
unjudged[qid] = len(resIds - qrelIds)
print "%d -> %d" % (qid, unjudged[qid])
print "--------------------"
totalUnjudged = sum(unjudged.values())
return float(totalUnju
|
dged)/(len(results.keys()) * 10)
print "unjudged=%.2f" % unjudged(sys.argv[1], sys.argv[2])
|
hazelcast/hazelcast-python-client
|
hazelcast/protocol/codec/multi_map_lock_codec.py
|
Python
|
apache-2.0
| 1,157
| 0.002593
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x021000
_REQUEST_MESSAGE_TYPE = 135168
# hex: 0x021001
_RESPONSE_MESSAGE_TYPE = 135169
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_TTL_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_REFERENCE_ID_OFFSET = _REQUES
|
T_TTL_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, thread_id, ttl, reference_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TTL_OFFSET, ttl)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, re
|
ference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)
|
ahmad88me/PyGithub
|
github/Project.py
|
Python
|
lgpl-3.0
| 10,167
| 0.003836
|
############################ Copyrights and license ############################
# #
# Copyright 2018 bbi-yggy <yossarian@blackbirdinteractive.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.ProjectColumn
from . import Consts
class Project(github.GithubObject.CompletableGithubObject):
"""
This class represents Projects. The reference can be found here http://developer.github.com/v3/projects
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def columns_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._columns_url)
return self._columns_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def creator(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def node_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def owner_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._owner_url)
return self._owner_url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /projects/:project_id <https://developer.github.com/v3/projects/#delete-a-project>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url, headers={"Accept": Consts.mediaTypeProjectsPreview}
)
def edit(
self,
name=github.GithubObject.NotSet,
body=github.GithubObject.NotSet,
state=github.GithubObject.NotSet,
organization_permission=github.GithubObject.NotSet,
private=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /projects/:project_id <https://developer.github.com/v3/projects/#update-a-project>`_
:param name: string
:param body: string
:param state: string
:param organization_permission: string
:param private: bool
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, str), name
assert body is github.GithubObject.NotSet or isinstance(name, str), body
assert state is github.GithubObject.NotSet or isinstance(name, str), state
assert organization_permission is github.GithubObject.NotSet or isinstance(
organization_permission, str
), organization_permission
assert private is github.GithubObject.NotSet or isinstance(
private, bool
), private
patch_parameters = dict()
if name is not github.GithubObject.NotSet:
patch_parameters["name"] = name
if body is not github.GithubObject.NotSet:
patch_parameters["body"] = body
if state is not github.GithubObject.NotSet:
patch_parameters["state"] = state
if organization_permission is not github.GithubObject.NotSet:
patch_parameters["organization_permission"] = organization_permission
if private is not github.GithubObject.NotSet:
patch_parameters["private"] = private
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=patch_parameters,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
self._useAttributes(data)
def get_columns(self):
"""
:calls: `GET /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#list-project-columns>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.ProjectColumn.ProjectColumn`
"""
return github.PaginatedList.PaginatedList(
|
github.ProjectColumn.ProjectColumn,
self._request
|
er,
self.columns_url,
None,
{"Accept": Consts.mediaTypeProjectsPreview},
)
def create_column(self, name):
"""
calls: `POST /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string
"""
assert isinstance(name, str), name
post_parameters = {"name": name}
import_header = {"Accept": Consts.mediaTypeProjectsPreview}
headers, data = self._requester.requestJsonAndCheck(
"POST", self.url + "/columns", headers=import_header, input=post_parameters
)
return github.ProjectColumn.ProjectColumn(
self._requester, headers, data, completed=True
)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._columns_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._creator = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._node_id = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._owner_url = gi
|
razzius/PyClassLessons
|
instructors/course-2015/functions_gens_and_ducks/examples/in_class/warm_up/calc_tax.py
|
Python
|
mit
| 300
| 0.023333
|
def print_15perc_to
|
tal_tax(bill):
'''return the amount of money to pay in tax in US dollars
'''
bill = float(bill)
total_tax = str( bill * .15 )
return "please graciously pay at least the amount of {} in total tax ".format(total_tax)
print print_15perc_total_tax(79)
| |
Noeud/KirkByers_Course
|
Week2/IPaddValidity_hex_bin.py
|
Python
|
unlicense
| 1,174
| 0.007666
|
#!/usr/bin/env python
import fileinput
class NotValidIP(Exception):
pass
class NotValidIPLength(Exception):
pass
while True:
try:
|
ip_addr = input("Enter a network IP address: ")
ip_addr_split = ip_addr.split('.')
len1 = len(ip_addr_split)
ip_addr_split = ip_addr_split[:3]
ip_addr_split.append('0')
i=0
for element in ip_addr_split:
ip_addr_split[i] = int(element)
i = i+1
i = 0
for element in ip_addr_split:
if (element > 255
|
or element < 0):
raise NotValidIP
if (len1!=3 and len1!=4):
raise NotValidIPLength
print("The network IP address now is: %s" % ip_addr_split)
break
except ValueError:
print('Not a good value')
except NotValidIP:
print('this is not a valid IP address')
except NotValidIPLength:
print('this is not an IP address size')
print('%20s %20s %20s' % ('NETWORK_NUMBER', 'FIRST_OCTET_BINARY', 'FIRST_OCTET_HEX') )
a = '.'.join(str(q) for q in ip_addr_split)
b = bin(ip_addr_split[0])
c = hex(ip_addr_split[0])
print('%20s %20s %20s' % (a, b, c))
|
uclouvain/osis
|
infrastructure/shared_kernel/entite/dtos.py
|
Python
|
agpl-3.0
| 1,467
| 0.000682
|
# ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
|
# see http://www.gnu.org/licenses/.
#
# ##############################################################################
import attr
from osis_common.dd
|
d import interface
@attr.s(frozen=True, slots=True)
class EntiteUclDTO(interface.DTO):
sigle = attr.ib(type=str)
intitule = attr.ib(type=str)
|
nert-gu/Xposition
|
src/wiki/plugins/redlinks/mdx/redlinks.py
|
Python
|
gpl-3.0
| 3,427
| 0.000876
|
import html
from urllib.parse import urljoin
from urllib.parse import urlparse
import wiki
from django.urls import resolve
from django.urls.exceptions import Resolver404
from markdown.extensions import Extension
from markdown.postprocessors import AndSubstitutePostprocessor
from markdown.treeprocessors import Treeprocessor
from wiki.decorators import which_article
from wiki.models import Article
from wiki.models import URLPath
class LinkTreeprocessor(Treeprocessor):
def __init__(self, md, config):
super().__init__(md)
self.md = md
self.broken_class = config["broken"]
self.internal_class = config["internal"]
self.external_class = config["external"]
@property
def my_urlpath(self):
try:
return self._my_urlpath
except AttributeError:
self._my_urlpath = self.md.article.get_absolute_url()
return self._my_urlpath
def get_class(se
|
lf, el): # noqa: max-compl
|
exity 11
href = el.get("href")
if not href:
return
# The autolinker turns email links into links with many HTML entities.
# These entities are further escaped using markdown-specific codes.
# First unescape the markdown-specific, then use html.unescape.
href = AndSubstitutePostprocessor().run(href)
href = html.unescape(href)
try:
url = urlparse(href)
except ValueError:
return
if url.scheme == "mailto":
return
if url.scheme or url.netloc:
# Contains a hostname or url schema ⇒ External link
return self.external_class
# Ensure that path ends with a slash
relpath = url.path.rstrip("/") + "/"
try:
target = resolve(urljoin(self.my_urlpath, relpath))
except Resolver404:
# Broken absolute link
return self.external_class
if target.app_names != ["wiki"]:
# Links outside wiki
return self.external_class
try:
article, urlpath = which_article(**target.kwargs)
except (
wiki.core.exceptions.NoRootURL,
URLPath.DoesNotExist,
Article.DoesNotExist,
):
return self.broken_class
return self.internal_class
def run(self, doc):
for el in doc.iter():
if el.tag != "a":
continue
class_ = self.get_class(el)
if class_:
# Append class
classes = (el.get("class", "") + " " + class_).strip()
el.set("class", classes)
class LinkExtension(Extension):
TreeProcessorClass = LinkTreeprocessor
def __init__(self, *args, **kwargs):
self.config = {
"broken": ["wiki-broken", "CSS class to use for broken internal links"],
"internal": ["wiki-internal", "CSS class to use for internal links"],
"external": ["wiki-external", "CSS class to use for external links"],
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md):
md.registerExtension(self)
self.md = md
ext = self.TreeProcessorClass(md, self.getConfigs())
md.treeprocessors.add("redlinks", ext, ">inline")
def makeExtension(*args, **kwargs):
"""Return an instance of the extension."""
return LinkExtension(*args, **kwargs)
|
chrisfranklin/badasschat
|
badasschat/socketio/transports.py
|
Python
|
bsd-3-clause
| 11,277
| 0.000709
|
import gevent
import urllib
import urlparse
from geventwebsocket import WebSocketError
from gevent.queue import Empty
class BaseTransport(object):
"""Base class for all transports. Mostly wraps handler class functions."""
def __init__(self, handler, config, **kwargs):
"""Base transport class.
:param config: dict Should contain the config keys, like
``heartbeat_interval``, ``heartbeat_timeout`` and
``close_timeout``.
"""
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.headers = [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Allow-Methods", "POST, GET, OPTIONS"),
("Access-Control-Max-Age", 3600),
]
self.handler = handler
self.config = config
def write(self, data=""):
# Gevent v 0.13
if hasattr(self.handler, 'response_headers_list'):
if 'Content-Length' not in self.handler.response_headers_list:
self.handler.response_headers.append(('Content-Length', len(data)))
self.handler.response_headers_list.append('Content-Length')
elif not hasattr(self.handler, 'provided_content_length'):
# Gevent 1.0bX
l = len(data)
self.handler.provided_content_length = l
self.handler.response_headers.append(('Content-Length', l))
self.handler.write(data)
def start_response(self, status, headers, **kwargs):
if "Content-Type" not in [x[0] for x in headers]:
headers.append(self.content_type)
headers.extend(self.headers)
self.handler.start_response(status, headers, **kwargs)
class XHRPollingTransport(BaseTransport):
def __init__(self, *args, **kwargs):
super(XHRPollingTransport, self).__init__(*args, **kwargs)
def options(self):
self.start_response("200 OK", ())
self.write()
return []
def get(self, socket):
socket.heartbeat()
heartbeat_interval = self.config['heartbeat_interval']
payload = self.get_messages_payload(socket, timeout=heartbeat_interval)
if not payload:
payload = "8::" # NOOP
self.start_response("200 OK", [])
self.write(payload)
def _request_body(self):
return self.handler.wsgi_input.readline()
def post(self, socket):
for message in self.decode_payload(self._request_body()):
socket.put_server_msg(message)
self.start_response("200 OK", [
("Connection", "close"),
("Content-Type", "text/plain")
])
self.write("1")
def get_messages_payload(self, socket, timeout=None):
"""This will fetch the messages from the Socket's queue, and if
there are many messes, pack multiple messages in one payload and return
"""
try:
msgs = socket.get_multiple_client_msgs(timeout=timeout)
data = self.encode_payload(msgs)
except Empty:
data = ""
return data
def encode_payload(self, messages):
"""Encode list of messages. Expects messages to be unicode.
``messages`` - List of raw messages to encode, if necessary
"""
if not messages or messages[0] is None:
return ''
if len(messages) == 1:
return messages[0].encode('utf-8')
payload = u''.join([(u'\ufffd%d\ufffd%s' % (len(p), p))
for p in messages if p is not None])
# FIXME: why is it so that we must filter None from here ? How
# is it even possible that a None gets in there ?
return payload.encode('utf-8')
def decode_payload(self, payload):
"""This function can extract multiple messages from one HTTP payload.
Some times, the XHR/JSONP/.. transports can pack more than one message
on a single packet. They are encoding following the WebSocket
semantics, which need to be reproduced here to unwrap the messages.
The semantics are:
\ufffd + [length as a string] + \ufffd + [payload as a unicode string]
This function returns a list of messages, even though there is only
one.
Inspired by socket.io/lib/transports/http.js
"""
payload = payload.decode('utf-8')
if payload[0] == u"\ufffd":
ret = []
while len(payload) != 0:
len_end = payload.find(u"\ufffd", 1)
length = int(payload[1:len_end])
msg_start = len_end + 1
msg_end = length + msg_start
message = payload[msg_start:msg_end]
ret.append(message)
payload = payload[msg_end:]
|
return ret
return [payload]
def do_exchange(self, socket, request_method):
if not socket.connection_established:
# Runs only the first time we get a Socket opening
self.start_response("200 OK", [
("Connection", "close"),
])
|
self.write("1::") # 'connect' packet
return
elif request_method in ("GET", "POST", "OPTIONS"):
return getattr(self, request_method.lower())(socket)
else:
raise Exception("No support for the method: " + request_method)
class JSONPolling(XHRPollingTransport):
def __init__(self, handler, config):
super(JSONPolling, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/javascript; charset=UTF-8")
def _request_body(self):
data = super(JSONPolling, self)._request_body()
# resolve %20%3F's, take out wrapping d="...", etc..
data = urllib.unquote_plus(data)[3:-1] \
.replace(r'\"', '"') \
.replace(r"\\", "\\")
# For some reason, in case of multiple messages passed in one
# query, IE7 sends it escaped, not utf-8 encoded. This dirty
# hack handled it
if data[0] == "\\":
data = data.decode("unicode_escape").encode("utf-8")
return data
def write(self, data):
"""Just quote out stuff before sending it out"""
args = urlparse.parse_qs(self.handler.environ.get("QUERY_STRING"))
if "i" in args:
i = args["i"]
else:
i = "0"
# TODO: don't we need to quote this data in here ?
super(JSONPolling, self).write("io.j[%s]('%s');" % (i, data))
class XHRMultipartTransport(XHRPollingTransport):
def __init__(self, handler):
super(JSONPolling, self).__init__(handler)
self.content_type = (
"Content-Type",
"multipart/x-mixed-replace;boundary=\"socketio\""
)
def do_exchange(self, socket, request_method):
if request_method == "GET":
return self.get(socket)
elif request_method == "POST":
return self.post(socket)
else:
raise Exception("No support for such method: " + request_method)
def get(self, socket):
header = "Content-Type: text/plain; charset=UTF-8\r\n\r\n"
self.start_response("200 OK", [("Connection", "keep-alive")])
self.write_multipart("--socketio\r\n")
self.write_multipart(header)
self.write_multipart(str(socket.sessid) + "\r\n")
self.write_multipart("--socketio\r\n")
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
# See below
else:
try:
self.write_multipart(header)
self.write_multipart(payload)
self.write_multipart("--socketio\r\n")
except socket.error:
# The client might try to reconnect,
|
gileno/curso-citi
|
djangoecommerce/settings.py
|
Python
|
cc0-1.0
| 4,345
| 0.001151
|
"""
Django settings for djangoecommerce project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qg(sw)grt&2v+++odrz%zac+h*2f@gyd*szcov1u2x$=ul%svz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# libs
'widget_tweaks',
# apps
'core',
'accounts',
'catalog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'core.middleware.LogMiddleware',
]
ROOT_URLCONF = 'djangoecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# apps
'catalog.context_processors.categories',
],
},
},
]
WSGI_APPLICATION = 'djangoecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(con
|
n_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROX
|
Y_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# auth
LOGIN_REDIRECT_URL = 'accounts:index'
LOGIN_URL = 'login'
AUTH_USER_MODEL = 'accounts.User'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.ModelBackend',
]
# Messages
from django.contrib.messages import constants as message_constants
MESSAGE_TAGS = {
message_constants.DEBUG: 'debug',
message_constants.INFO: 'info',
message_constants.SUCCESS: 'success',
message_constants.WARNING: 'warning',
message_constants.ERROR: 'danger',
}
try:
from .local_settings import *
except ImportError:
pass
|
1905410/Misago
|
misago/threads/signals.py
|
Python
|
gpl-2.0
| 4,750
| 0.000632
|
from django.contrib.auth import get_user_model
from django.db import transaction
from django.db.models.signals import pre_delete
from django.dispatch import Signal, receiver
from misago.categories.models import Category
from misago.categories.signals import delete_category_content, move_category_content
from misago.core.pgutils import batch_delete, batch_update
from misago.users.signals import delete_user_content, username_changed
from .models import Attachment, Post, PostEdit, PostLike, Thread, Poll, PollVote
delete_post = Signal()
delete_thread = Signal()
merge_post = Signal(providing_args=["other_post"])
merge_thread = Signal(providing_args=["other_thread"])
move_post = Signal()
move_thread = Signal()
remove_thread_participant
|
= Signal(providing_args=["user"])
"""
Signal handlers
"""
@receiver(merge_thread)
def merge_threads_posts(sender, **kwargs):
other_thread = kwargs['other_thread']
other_thread.post_set.update(category=sender.category, thread=sender)
@receiver(merge_post)
def merge_posts
|
(sender, **kwargs):
other_post = kwargs['other_post']
for user in sender.mentions.iterator():
other_post.mentions.add(user)
@receiver(move_thread)
def move_thread_content(sender, **kwargs):
Post.objects.filter(thread=sender).update(category=sender.category)
PostEdit.objects.filter(thread=sender).update(category=sender.category)
PostLike.objects.filter(thread=sender).update(category=sender.category)
Poll.objects.filter(thread=sender).update(category=sender.category)
PollVote.objects.filter(thread=sender).update(category=sender.category)
@receiver(delete_category_content)
def delete_category_threads(sender, **kwargs):
sender.thread_set.all().delete()
sender.post_set.all().delete()
@receiver(move_category_content)
def move_category_threads(sender, **kwargs):
new_category = kwargs['new_category']
Thread.objects.filter(category=sender).update(category=new_category)
Post.objects.filter(category=sender).update(category=new_category)
PostEdit.objects.filter(category=sender).update(category=new_category)
PostLike.objects.filter(category=sender).update(category=new_category)
Poll.objects.filter(category=sender).update(category=new_category)
PollVote.objects.filter(category=sender).update(category=new_category)
@receiver(delete_user_content)
def delete_user_threads(sender, **kwargs):
recount_categories = set()
recount_threads = set()
for thread in batch_delete(sender.thread_set.all(), 50):
recount_categories.add(thread.category_id)
with transaction.atomic():
thread.delete()
for post in batch_delete(sender.post_set.all(), 50):
recount_categories.add(post.category_id)
recount_threads.add(post.thread_id)
with transaction.atomic():
post.delete()
if recount_threads:
changed_threads_qs = Thread.objects.filter(id__in=recount_threads)
for thread in batch_update(changed_threads_qs, 50):
thread.synchronize()
thread.save()
if recount_categories:
for category in Category.objects.filter(id__in=recount_categories):
category.synchronize()
category.save()
@receiver(username_changed)
def update_usernames(sender, **kwargs):
Thread.objects.filter(starter=sender).update(
starter_name=sender.username,
starter_slug=sender.slug
)
Thread.objects.filter(last_poster=sender).update(
last_poster_name=sender.username,
last_poster_slug=sender.slug
)
Post.objects.filter(poster=sender).update(poster_name=sender.username)
Post.objects.filter(last_editor=sender).update(
last_editor_name=sender.username,
last_editor_slug=sender.slug
)
PostEdit.objects.filter(editor=sender).update(
editor_name=sender.username,
editor_slug=sender.slug
)
PostLike.objects.filter(user=sender).update(
user_name=sender.username,
user_slug=sender.slug
)
Attachment.objects.filter(uploader=sender).update(
uploader_name=sender.username,
uploader_slug=sender.slug
)
Poll.objects.filter(poster=sender).update(
poster_name=sender.username,
poster_slug=sender.slug
)
PollVote.objects.filter(voter=sender).update(
voter_name=sender.username,
voter_slug=sender.slug
)
@receiver(pre_delete, sender=get_user_model())
def remove_unparticipated_private_threads(sender, **kwargs):
threads_qs = kwargs['instance'].private_thread_set.all()
for thread in batch_update(threads_qs, 50):
if thread.participants.count() == 1:
with transaction.atomic():
thread.delete()
|
chriscauley/django-devserver
|
devserver/models.py
|
Python
|
bsd-3-clause
| 1,050
| 0.004762
|
from django.core import exceptions
from devserver.logger import GenericLogger
MODULES = []
def load_modules():
global MODULES
MODULES = []
from devserver import settings
for path in settings.DEVSERVER_MODULES:
try:
name, class_name = path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured, '%s isn\'t a devserver
|
module' % path
try:
module = __import__(name, {}, {}, [''])
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing devserver module %s: "%s"' % (name, e)
try:
cls = getattr(module, clas
|
s_name)
except AttributeError:
raise exceptions.ImproperlyConfigured, 'Error importing devserver module "%s" does not define a "%s" class' % (name, class_name)
try:
instance = cls(GenericLogger(cls))
except:
raise # Bubble up problem loading panel
MODULES.append(instance)
if not MODULES:
load_modules()
|
Phil-LiDAR2-Geonode/pl2-geonode
|
geonode/catalogue/backends/geonetwork.py
|
Python
|
gpl-3.0
| 1,161
| 0
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Publ
|
ic License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.catalogue.backends.generic import CatalogueBackend \
as GenericCatalogueBackend
class CatalogueBackend(GenericCatalogueBackend):
"""GeoNetwork CSW Backend"""
def __init__(self, *args, **kwargs):
super(CatalogueBackend, self).__init__(*args, **kwargs)
self.catalogue.formats = ['Dublin Core', 'ISO']
|
shopkick/flawless
|
flawless/client/client.py
|
Python
|
mpl-2.0
| 9,588
| 0.002607
|
#!/usr/bin/env python
#
# Copyright (c) 2011-2013, Shopkick Inc.
# All rights reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ---
# Author: John Egan <jwegan@gmail.com>
import functools
import hashlib
import linecache
import logging
import math
import os.path
import random
import re
import socket
import sys
import time
import traceback
import warnings
from future.utils import raise_
from thrift.Thrift import TException
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
import flawless.lib.config
from flawless.lib.data_structures.lru_cache import ExpiringLRUCache
import flawless.server.api.ttypes as api_ttypes
from flawless.server.api import Flawless
log = logging.getLogger(__name__)
config = flawless.lib.config.get()
MAX_VARIABLE_REPR = 250
MAX_LOCALS = 100
NUM_FRAMES_TO_SAVE = 20
HOSTPORT_INFO = list()
SCRUBBED_VARIABLES_REGEX = None
CACHE_ERRORS_AFTER_N_OCCURRENCES = 10
REPORT_AFTER_N_MILLIS = 10 * 60 * 1000 # 10 minutes
LRU_CACHE_SIZE = 200
ERROR_CACHE = ExpiringLRUCache(size=LRU_CACHE_SIZE)
class HostportInfo(object):
def __init__(self, hostport):
host, port = hostport.split(":")
self.host = host
self.port = int(port)
self.backoff_ms = 0
self.consecutive_connection_errors = 0
def increment_backoff(self):
self.consecutive_connection_errors = max(12, self.consecutive_connection_errors + 1)
backoff = 1000 * random.randint(1, 2 ** self.consecutive_connection_errors)
self.backoff_ms = _get_epoch_ms() + backoff
def decrement_backoff(self):
self.consecutive_connection_errors = int(self.consecutive_connection_errors / 2)
if self.consecutive_connection_errors > 0:
backoff = 1000 * random.randint(1, 2 ** self.consecutive_connection_errors)
self.backoff_ms = _get_epoch_ms() + backoff
class CachedErrorInfo(object):
def __init__(self):
self.last_report_ts = _get_epoch_ms()
self.last_occur_ts = _get_epoch_ms()
self.curr_count = 0
self.last_report_count = 0
@classmethod
def get_hash_key(cls, stack_lines):
m = hashlib.md5()
for line in stack_lines:
m.update(line.filename.encode('utf8'))
m.update(str(line.line_number).encode('utf8'))
return m.digest()
def increment(self):
self.last_occur_ts = _get_epoch_ms()
self.curr_count += 1
def mark_reported(self):
self.last_report_ts = _get_epoch_ms()
diff = self.curr_count - self.last_report_count
self.last_report_count = self.curr_count
return diff
def should_report(self):
report_conditions = list()
report_conditions.append(self.curr_count <= CACHE_ERRORS_AFTER_N_OCCURRENCES)
report_conditions.append(self.last_report_ts < (_get_epoch_ms() - REPORT_AFTER_N_MILLIS))
log_count = math.log(self.curr_count, 2)
report_conditions.append(int(log_count) == log_count)
return any(report_conditions)
def _get_epoch_ms():
return int(time.time() * 1000)
def set_hostports(hostports):
if type(hostports) not in [tuple, list]:
raise ValueError("hostports must be a list or tuple")
global HOSTPORT_INFO
HOSTPORT_INFO = [HostportInfo(hp) for hp in hostports]
def install_scrubbers(variables_regex):
global SCRUBBED_VARIABLES_REGEX
SCRUBBED_VARIABLES_REGEX = re.compile(variables_regex)
def _get_backend_host():
if config.flawless_hostports and not HOSTPORT_INFO:
set_hostports(config.flawless_hostports)
return random.choice(HOSTPORT_INFO) if HOSTPORT_INFO else None
def _get_service():
hostport_info = _get_backend_host()
if not hostport_info:
warnings.warn("Unable to record error: flawless server hostport not set", RuntimeWarning)
return None, None, None
tsocket = TSocket.TSocket(hostport_info.host, hostport_info.port)
tsocket.setTimeout(2000) # 2 second timeout
transport = TTransport.TFramedTransport(tsocket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Flawless.Client(protocol)
return client, transport, hostport_info
def _send_request(req):
# Try to send the request. If there are too many connection errors, then backoff
client, transport, hostport_info = _get_service()
try:
if all([client, transport, hostport_info]) and _get_epoch_ms() >= hostport_info.backoff_ms:
transport.open()
client.record_error(req)
hostport_info.decrement_backoff()
except TException as e:
hostport_info.increment_backoff()
log.exception(e)
finally:
if transport and transport.isOpen():
transport.close()
def _myrepr(var_name, value):
try:
if SCRUBBED_VARIABLES_REGEX and SCRUBBED_VARIABLES_REGEX.match(var_name):
return '**scrubbed**'
repr_str = repr(value)
return repr_str[:MAX_VARIABLE_REPR] + "..." * int(len(repr_str) > MAX_VARIABLE_REPR)
except:
return "Exception executing repr for this field"
def record_error(hostname, exc_info, preceding_stack=None, error_threshold=None, additional_info=None):
''' Helper function to record errors to the flawless backend '''
stack = []
exc_type, exc_value, sys_traceback = exc_info
while sys_traceback is not None:
stack.append(sys_traceback)
sys_traceback = sys_traceback.tb_next
stack_lines = []
for row in preceding_stack or []:
stack_lines.append(
api_ttypes.StackLine(filename=os.path.abspath(row[0]), line_number=row[1],
function_name=row[2], text=row[3])
)
for index, tb in enumerate(stack):
filename = tb.tb_frame.f_code.co_filename
func_name = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno
line = linecache.getline(filename, lineno, tb.tb_frame.f_globals)
frame_locals = None
if index >= (len(stack) - NUM_FRAMES_TO_SAVE):
# In
|
clude some limits on max string length & number of variables to keep things from getting
# out of hand
frame_locals = dict((k, _myre
|
pr(k, v)) for k, v in
list(tb.tb_frame.f_locals.items())[:MAX_LOCALS] if k != "self")
if "self" in tb.tb_frame.f_locals and hasattr(tb.tb_frame.f_locals["self"], "__dict__"):
frame_locals.update(dict(("self." + k, _myrepr(k, v)) for k, v in
list(tb.tb_frame.f_locals["self"].__dict__.items())[:MAX_LOCALS]
if k != "self"))
stack_lines.append(
api_ttypes.StackLine(filename=os.path.abspath(filename), line_number=lineno,
function_name=func_name, text=line, frame_locals=frame_locals)
)
# Check LRU cache & potentially do not send error report if this client has already reported this error
# several times.
key = CachedErrorInfo.get_hash_key(stack_lines)
info = ERROR_CACHE.get(key) or CachedErrorInfo()
info.increment()
ERROR_CACHE[key] = info
if info.should_report():
error_count = info.mark_reported()
_send_request(
api_ttypes.RecordErrorRequest(
traceback=stack_lines,
exception_message=repr(exc_value),
exception_type=exc_type.__module__ + "." + exc_type.__name__,
hostname=hostname,
error_threshold=error_threshold,
additional_info=additional_info,
error_count=error_count,
)
)
def _safe_wrap(func):
safe_attrs = [attr for attr in functools.WRAPPER_ASSIGNMENTS if hasattr(func, attr)]
return functools.wraps(func, safe_attrs)
def _wrap_function_with_error_decorator(func,
save_current_stack_trace=True,
|
michaelflowersky/pressmess
|
blog/processors.py
|
Python
|
bsd-3-clause
| 1,181
| 0.005085
|
# -*- coding: utf-8 -*-
#
# PressMess processors.py.
# This file contains template processor functions.
# Copyright (C) 2013 Michał Kwiatkowski <michaelflowersky at gmail dot com>
# This file is released under the BSD license, see the COPYING file
from django.conf import settings
def disqus(request):
"""
Hands over DISQUS shortname to template context.
"""
return {'disqus_shortname': settings.DISQUS_SHORTNAME}
def pressmess(request):
"""
Hands over PRESSMESS_TITLE, PRESSMESS_URL
and PRESSMESS_HEADER to template context.
"""
return {
'pressmess_title': settings.PRESSMESS_TITLE,
'pressmess_header': settings.PRESSMESS_HEADER,
'pressmess_url': settings.PRESSMESS_URL,
}
def meta(request):
"""
Hands over:
META_AUTHOR, META_DESCRIPTI
|
ON, META_LANGUAGE and META_KEYWORDS
to template context.
"""
return {
'meta_author': settings.META_AUTHOR,
'meta_description_index': settings.META_DESCRIPTION_INDEX,
'meta_description_tags': settings.META_DESC
|
RIPTION_TAGS,
'meta_language': settings.META_LANGUAGE,
'meta_keywords': settings.META_KEYWORDS,
}
|
zwChan/VATEC
|
~/eb-virt/Lib/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
|
Python
|
apache-2.0
| 3,764
| 0.000531
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
|
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
|
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
OCA/stock-logistics-workflow
|
delivery_total_weight_from_packaging/wizard/__init__.py
|
Python
|
agpl-3.0
| 38
| 0
|
from . impor
|
t choose_delivery_pack
|
age
|
JohnGarbutt/TaskFlow
|
setup.py
|
Python
|
apache-2.0
| 1,330
| 0
|
#!/usr/bin/env python
import os
import setuptools
def read_requires(base):
path = os.path.join('tools', base)
requires = []
if not os.path.isfile(path):
return requires
with open(path, 'rb') as h:
for line in h.read.splitlines():
line = line.strip()
if len(line) == 0 or line.startswith("#"):
continue
requires.append(line)
return requires
setuptools.setup(
name='taskflow',
version='0.0.1',
author='OpenStack',
license='Apache Software License',
description='Taskflow structured state management library.',
long_description='The taskflow library provides core functionality that '
'can be used to build [resumable, reliable, '
'easily understandable, ...] highly available '
'systems which process workflows in a structured manner.',
author_email='openstack-dev@lists.openstack.org',
url='http://www.openstack.org/',
tests_require=read_requires('test-requires'),
install_requires=read_requires('pip-requires'),
classifiers=[
'Development Sta
|
tus :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
|
'Programming Language :: Python :: 2.6', ],
)
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/conf/locale/sq/formats.py
|
Python
|
mit
| 688
| 0
|
# This
|
file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Dja
|
ngo date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'g.i.A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
pschanely/gunicorn
|
gunicorn/app/pasterapp.py
|
Python
|
mit
| 5,258
| 0.004755
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import logging
import os
import pkg_resources
import sys
import ConfigParser
from paste.deploy import loadapp, loadwsgi
SERVER = loadwsgi.SERVER
from gunicorn.app.base import Application
from gunicorn.config import Config
class PasterBaseApplication(Application):
def app_config(self):
cx = loa
|
dwsgi.loadcontext(SERVER, self.cfgurl, relative_to=self.relpath)
gc, lc = cx.global_conf.copy(), cx.local_conf.copy()
cfg = {}
host, port = lc.pop('host', ''), lc.pop('port', '')
if host and port:
cfg['bind'] = '%s:%s' % (host
|
, port)
elif host:
cfg['bind'] = host
cfg['workers'] = int(lc.get('workers', 1))
cfg['umask'] = int(lc.get('umask', 0))
cfg['default_proc_name'] = gc.get('__file__')
for k, v in gc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
for k, v in lc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
return cfg
def configure_logging(self):
if hasattr(self, "cfgfname"):
self.logger = logging.getLogger('gunicorn')
# from paste.script.command
parser = ConfigParser.ConfigParser()
parser.read([self.cfgfname])
if parser.has_section('loggers'):
if sys.version_info >= (2, 6):
from logging.config import fileConfig
else:
# Use our custom fileConfig -- 2.5.1's with a custom Formatter class
# and less strict whitespace (which were incorporated into 2.6's)
from gunicorn.logging_config import fileConfig
config_file = os.path.abspath(self.cfgfname)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
return
super(PasterBaseApplication, self).configure_logging()
class PasterApplication(PasterBaseApplication):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
cfgfname = os.path.normpath(os.path.join(os.getcwd(), args[0]))
cfgfname = os.path.abspath(cfgfname)
if not os.path.exists(cfgfname):
parser.error("Config file not found: %s" % cfgfname)
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
sys.path.insert(0, self.relpath)
pkg_resources.working_set.add_entry(self.relpath)
return self.app_config()
def load(self):
return loadapp(self.cfgurl, relative_to=self.relpath)
class PasterServerApplication(PasterBaseApplication):
def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
self.cfg = Config()
self.app = app
self.callable = None
gcfg = gcfg or {}
cfgfname = gcfg.get("__file__")
if cfgfname is not None:
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
cfg = kwargs.copy()
if port and not host.startswith("unix:"):
bind = "%s:%s" % (host, port)
else:
bind = host
cfg["bind"] = bind
if gcfg:
for k, v in gcfg.items():
cfg[k] = v
cfg["default_proc_name"] = cfg['__file__']
try:
for k, v in cfg.items():
if k.lower() in self.cfg.settings and v is not None:
self.cfg.set(k.lower(), v)
except Exception, e:
sys.stderr.write("\nConfig error: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
self.configure_logging()
def load_config(self):
if not hasattr(self, "cfgfname"):
return
cfg = self.app_config()
for k,v in cfg.items():
try:
self.cfg.set(k.lower(), v)
except:
sys.stderr.write("Invalid value for %s: %s\n\n" % (k, v))
raise
def load(self):
if hasattr(self, "cfgfname"):
return loadapp(self.cfgurl, relative_to=self.relpath)
return self.app
def run():
"""\
The ``gunicorn_paster`` command for launcing Paster compatible
apllications like Pylons or Turbogears2
"""
from gunicorn.app.pasterapp import PasterApplication
PasterApplication("%prog [OPTIONS] pasteconfig.ini").run()
def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
"""\
A paster server.
Then entry point in your paster ini file should looks like this:
[server:main]
use = egg:gunicorn#main
host = 127.0.0.1
port = 5000
"""
from gunicorn.app.pasterapp import PasterServerApplication
PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
|
ahmadiga/min_edx
|
cms/djangoapps/contentstore/management/commands/tests/test_reindex_library.py
|
Python
|
agpl-3.0
| 6,713
| 0.004171
|
""" Tests for library reindex command """
import ddt
from django.core.management import call_command, Comm
|
andError
import mock
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
fro
|
m xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from common.test.utils import nostderr
from xmodule.modulestore.tests.factories import CourseFactory, LibraryFactory
from opaque_keys import InvalidKeyError
from contentstore.management.commands.reindex_library import Command as ReindexCommand
from contentstore.courseware_index import SearchIndexingError
@ddt.ddt
class TestReindexLibrary(ModuleStoreTestCase):
""" Tests for library reindex command """
def setUp(self):
""" Setup method - create libraries and courses """
super(TestReindexLibrary, self).setUp()
self.store = modulestore()
self.first_lib = LibraryFactory.create(
org="test", library="lib1", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
self.second_lib = LibraryFactory.create(
org="test", library="lib2", display_name="run2", default_store=ModuleStoreEnum.Type.split
)
self.first_course = CourseFactory.create(
org="test", course="course1", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
self.second_course = CourseFactory.create(
org="test", course="course2", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
REINDEX_PATH_LOCATION = 'contentstore.management.commands.reindex_library.LibrarySearchIndexer.do_library_reindex'
MODULESTORE_PATCH_LOCATION = 'contentstore.management.commands.reindex_library.modulestore'
YESNO_PATCH_LOCATION = 'contentstore.management.commands.reindex_library.query_yes_no'
def _get_lib_key(self, library):
""" Get's library key as it is passed to indexer """
return library.location.library_key
def _build_calls(self, *libraries):
""" BUilds a list of mock.call instances representing calls to reindexing method """
return [mock.call(self.store, self._get_lib_key(lib)) for lib in libraries]
def test_given_no_arguments_raises_command_error(self):
""" Test that raises CommandError for incorrect arguments """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* requires one or more arguments .*"):
call_command('reindex_library')
@ddt.data('qwerty', 'invalid_key', 'xblock-v1:qwe+rty')
def test_given_invalid_lib_key_raises_not_found(self, invalid_key):
""" Test that raises InvalidKeyError for invalid keys """
with self.assertRaises(InvalidKeyError):
call_command('reindex_library', invalid_key)
def test_given_course_key_raises_command_error(self):
""" Test that raises CommandError if course key is passed """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command('reindex_library', unicode(self.first_course.id))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command('reindex_library', unicode(self.second_course.id))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* is not a library key"):
call_command(
'reindex_library',
unicode(self.second_course.id),
unicode(self._get_lib_key(self.first_lib))
)
def test_given_id_list_indexes_libraries(self):
""" Test that reindexes libraries when given single library key or a list of library keys """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', unicode(self._get_lib_key(self.first_lib)))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.first_lib))
patched_index.reset_mock()
call_command('reindex_library', unicode(self._get_lib_key(self.second_lib)))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.second_lib))
patched_index.reset_mock()
call_command(
'reindex_library',
unicode(self._get_lib_key(self.first_lib)),
unicode(self._get_lib_key(self.second_lib))
)
expected_calls = self._build_calls(self.first_lib, self.second_lib)
self.assertEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_libraries(self):
""" Test that reindexes all libraries when --all key is given and confirmed """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = True
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
expected_calls = self._build_calls(self.first_lib, self.second_lib)
self.assertItemsEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_libraries_cancelled(self):
""" Test that does not reindex anything when --all key is given and cancelled """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = False
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_library', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
patched_index.assert_not_called()
def test_fail_fast_if_reindex_fails(self):
""" Test that fails on first reindexing exception """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index:
patched_index.side_effect = SearchIndexingError("message", [])
with self.assertRaises(SearchIndexingError):
call_command('reindex_library', unicode(self._get_lib_key(self.second_lib)))
|
magnastrazh/NEUCOGAR
|
nest/serotonin/research/C/nest-2.10.0/examples/nest/gap_junction/two_neurons.py
|
Python
|
gpl-2.0
| 2,414
| 0.008285
|
# -*- coding: utf-8 -*-
#
# two_neurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
This is a simple example of two hh_psc_alpha_gap neurons connected
by a gap-junction. Please note that gap junctions are two-way connections:
In order to create an accurate gap-junction connection between two
neurons i and j two connections are required.
"""
import nest
import pylab
import numpy
nest.ResetKernel()
nest.SetKernelStatus({'resolution': 0.05})
nest.SetKernelStatus({'max_num_prelim_iterations': 15, 'prelim_interpolation_order': 3, 'prelim_tol': 0.0001})
neuron = nest.Create('hh_psc_alpha_gap',2)
vm = nest.Create('voltmeter', params={ "to_file": False, 'withgid': True, 'withtime': True, 'interval': 0.1})
nest.SetStatus(neuron, {'I_e': 100.})
nest.SetStatus([neuron[0]], {'V_m': -10.})
nest.Connect(vm, neuron, 'all_to_all')
"""
Use 'all_to_all' to connect neurons.
This is equivalent to:
nest.Connect([neuron[0]],[neuron[1]], 'one_to_one', syn_spec={'model': 'gap_junction', 'weight': 0.5})
nest.Connect([neuron[1]],[neuron[0]], 'one_to_one', syn_spec={'model': 'gap_junction', 'weight': 0.5})
"""
nest.Connect(neuron,neuron, 'all_to_all', syn_spec={'model': 'gap_junction', 'weight': 0.5})
nest.Simulate(351.)
senders_vm = nest.GetStatus(vm, 'events')[0]['senders']
times_vm = nest.Ge
|
tStatus(vm, 'events')[0]['times']
V_vm = nest.GetStatus(vm, 'events')[0]['V_m']
V = [[] for i in range(2)]
times = [[] for i in range(2)]
for i in range(len(senders_vm)):
V[senders_vm[i]-1].append(V_vm[i])
times[senders_vm[i]-1].append(times_vm[i])
V = numpy.array(V)
times = numpy.array(times)
pylab.figure(1)
pylab.plot(times[0,:],V[0,:],
|
'r-')
pylab.plot(times[0,:],V[1,:],'g-')
pylab.xlabel('time (ms)')
pylab.ylabel('membrane potential (mV)')
pylab.show()
|
KrozekGimVic/2013-Fraktali
|
main.py
|
Python
|
mit
| 2,613
| 0.001914
|
from tkinter import *
from PIL import Image, ImageTk
from mandelbrot import *
from julia_set import *
class App(object):
def __init__(self, master):
# CANVAS
self.ulx, self.uly, self.drx, self.dry, self.def_width = default_settings()[
:5]
self.image = ImageTk.PhotoImage(make_fractal(*default_settings()))
self.canvas = Canvas(master, width=self.image.width(),
height=self.image.height())
self.canvas.grid(column=2, row=1)
self.canvas.create_image(0, 0, image=self.image, anchor=NW)
self.canvas.bind('<ButtonPress-1>', self.press)
self.canvas.bind('<ButtonRelease-1>', self.release)
self.canvas.bind('<B1-Motion>', self.motion)
# ITERATIONS
self.iterval =
|
IntVar(value=50)
self.iterslider = Scale(master, from_=0, to=2000, variable=sel
|
f.iterval,
orient=HORIZONTAL, length=250)
self.iterslider.grid(row=1, column=1)
self.iterslider.bind('<ButtonRelease-1>', self.update_image)
def press(self, event):
self.sx, self.sy = event.x, event.y
def release(self, event):
self.ex, self.ey = event.x, event.y
if self.ex == self.sx or self.ey == self.sy:
return
self.sx, self.ex = sorted([self.ex, self.sx])
self.sy, self.ey = sorted([self.ey, self.sy])
sysw = self.drx - self.ulx
sysh = self.uly - self.dry
imw, imh = self.image.width(), self.image.height()
oldx, oldy = self.ulx, self.dry
self.ulx = oldx + self.sx/imw*sysw
self.uly = oldy + self.ey/imh*sysh
self.drx = oldx + self.ex/imw*sysw
self.dry = oldy + self.sy/imh*sysh
self.update_image()
def motion(self, event):
if self.sx == -1:
return
ex, ey = event.x, event.y
try:
self.canvas.delete(self.rect)
except:
pass
finally:
self.rect = self.canvas.create_rectangle((self.sx, self.sy, ex, ey), fill='',
outline='white')
def update_image(self, *args):
img = make_fractal(self.ulx, self.uly, self.drx, self.dry, self.def_width,
self.iterval.get())
self.image = ImageTk.PhotoImage(img)
self.canvas.config(width=self.image.width(),
height=self.image.height())
self.canvas.create_image(0, 0, image=self.image, anchor=NW)
root = Tk()
root.wm_title("Fractal Explorer")
app = App(root)
root.mainloop()
|
jabbalaci/jabbapylib
|
tests/distance/test_dist.py
|
Python
|
gpl-3.0
| 358
| 0.00838
|
from jabbapylib.
|
distance.dist import lev_dist, ham_dist, similarity
def test_lev_dist():
assert lev_dist('ag-tcc', 'cgctca') == 3
assert lev_dist('GUMBO', 'GAMBOL') == 2
assert lev_dist('Google', 'Yahoo!') == 6
def test_ham_dist():
assert ham_dist('toned', 'roses') == 3
def test_similarity():
|
assert similarity('toned', 'roses') == 2
|
uny11/SE-Bigdata
|
bigdata.py
|
Python
|
gpl-3.0
| 43,107
| 0.008287
|
# Copyright (C) 2017, Isaac Porta "uny11"
#
# This file is part of SE-Bigdata.
#
# SE-Bigdata is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SE-Bigdata is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SE-Bigdata. If not, see <http://www.gnu.org/licenses/>.
from chpp import CHPPhelp
import bbdd
import send
from datetime import datetime, timedelta
import xml.etree.ElementTree as ET
import sqlite3
import os
from colorama import init, Fore, Back, Style
# print Bienvenida
init(autoreset=True)
print('\n')
print(Fore.GREEN + Back.BLACK + '''SE-BIGDATA v1.0.1''')
print('Copyright (C) 2017, "uny11"\nEste sencillo programa es software libre bajo la licencia GPL-v3')
print('\n')
print(Fore.GREEN + 'Bienvenido y Gracias por participar en este estudio!')
print('y no dudes en reportar algun fallo o duda (uny11)\n')
#Iniciamos base de datos de SE-Bigdata
bbddauth = 'auth.sqlite'
basedatos = 'bigdata.sqlite'
bbdd.init_base(basedatos, bbddauth)
# Iniciamos claves y funciones para acceder a los recursos CHPP de la API de Hatrick
helper = CHPPhelp()
# Buscamos si la App tiene la autorizacion CHPP del usuario
conn = sqlite3.connect(bbddauth)
cur = conn.cursor()
try:
cur.execute('SELECT key FROM keys WHERE id = 1')
test = cur.fetchone()[0]
# El test es OK! nada que hacer
except:
# El test es NO OK -> lanzamos proceso de autorizacion
print('\n')
print('Antes de nada, es necesario tu autorizacion-CHPP para recoger datos de www.hattrick.org')
print('Sigue las instruciones: \n')
helper.get_auth(bbddauth)
# Recuperamos tokens, user y equipos del user
cur.execute('SELECT key FROM keys WHERE id = 1 LIMIT 1')
user_key = cur.fetchone()[0]
cur.execute('SELECT key FROM keys WHERE id = 2 LIMIT 1')
user_secret = cur.fetchone()[0]
cur.execute('SELECT descripcion FROM info WHERE id = 1 LIMIT 1')
user = cur.fetchone()[0]
cur.execute('SELECT idHT,descripcion FROM info WHERE id > 1 LIMIT 3')
listaEquiposID = []
listaEquiposNombre = []
for row in cur:
listaEquiposID.append(row[0])
listaEquiposNombre.append(row[1])
cur.close()
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
cur.execute( 'SELECT max(MatchDate) FROM partidos')
fechamax = cur.fetchone()[0]
cur.close()
# Lanzamos MENU de la aplicacion
while True:
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat('bigdata.sqlite')
size = size/1024
print('\n')
print(Fore.GREEN + 'Que quieres hacer', Fore.YELLOW + Style.BRIGHT+str(user),Fore.GREEN + '?\n')
if fechamax == None: fechamax = 'Ningun partido en la base'
print(' 1.- Recuperar datos de Hattrick - Ultimo partido recuperado: '+Fore.GREEN+fechamax)
print(' 2.- Enviar datos al servidor para enriquecer el estudio')
print(' 3.- Ver tus estadisticas generales')
print(' 4.- Salir\n')
opcion = input('(por defecto 4) >> ')
if opcion == '1':
# Paso0 - Miramos si hay partidos en la base y si hay miramos fecha del ultimo
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
if fechamax == 'Ningun partido en la base': fechamax = datetime(2017,12,20,0,0,0)
# fechamax = datetime.today() - timedelta(days=90)
cur.close()
# Paso1 - Recuperamos lista de partidos nuevos
print('\n')
print('Buscando partidos en www.hattrick.org... ')
print('Paciencia, puede tardar un poco (sobretodo la primera vez)..\n')
num = 0
for team in listaEquiposID:
print('Para tu equipo <',Fore.YELLOW + Style.BRIGHT + str(listaEquiposNombre[num]),'>')
listaPartidos = bbdd.new_partidos(helper, basedatos, user_key, user_secret, fechamax, team)
# Paso1.2 - Recuperar detalle de los partidos nuevos para cada equipo
if len(listaPartidos) > 0:
print('Recuperando datos de los ',Back.WHITE + Fore.BLACK + Style.BRIGHT + str(len(listaPartidos)), Style.RESET_ALL + ' partidos nuevos de www.hattrick.org...')
for partido in listaPartidos:
# detalle partido, alineacion y sustituciones
bbdd.get_partido(helper, basedatos, user_key, user_secret, partido)
else:
None
# Paso 1.3 - Recuperamos habilidades de jugadores implicados en eventos
if len(listaPartidos) > 0:
print('Recuperando habilidades de tus jugadores implicados en eventos de los ',Back.WHITE + Fore.BLACK + Style.BRIGHT + str(len(listaPartidos)), Style.RESET_ALL + ' partidos nuevos encontrados.. \n')
for partido in listaPartidos:
# habilidades jugadores
bbdd.get_habilidades(helper, basedatos, user_key, user_secret, partido)
else:
None
num = num + 1
# Paso2: Recuperamos Porteros
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
cur.execute('SELECT DISTINCT count(a.MatchID) FROM partidos as a LEFT JOIN porteros as b ON a.MatchID=b.MatchID WHERE b.MatchID is null')
try:
PartidosSinPorteros = cur.fetchone()[0]
print(PartidosSinPorteros + ' partidos sin las habilidades de los porteros recuperadas\n')
except:
None
cur.close()
cur = conn.cursor()
ListaPartidosPorterosFaltantes = []
cur.execute('SELECT DISTINCT a.MatchID FROM partidos as a LEFT JOIN porteros as b ON a.MatchID=b.MatchID WHERE b.MatchID is null')
for row in cur:
ListaPartidosPorterosFaltantes.append(row[0])
cur.close()
porterosfaltantes = int(PartidosSinPorteros)*2
print('Recuperando las habilidades de los '+Back.WHITE + Fore.BLACK + Style.BRIGHT + str(porterosfaltantes), Style.RESET_ALL+' porteros que faltan en la base de datos.\n')
for partido in ListaPartidosPorterosFaltantes:
bbdd.get_porteros(helper, basedatos, user_key, user_secret, partido)
print(Fore.GREEN + 'SE-Bigdata está ahora actualizada!!')
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
cur.execute( 'SELECT max(MatchDate) FROM partidos')
fechamax = cur.fetchone()[0]
cur.close()
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
cur.execute('UPDATE eventos SET SubSpecialty=(SELECT Specialty FROM jugadores WHERE PlayerID=eventos.SubjectPlayerID) WHERE SubSpecialty=-99')
conn.commit()
cur.close()
elif opcion == '2':
# Recuperamos algunos datos de la base de datos
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat('bigdata.sqlite')
size = size/1024
conn = sqlite3.connect(basedatos)
cur = conn.cursor()
cur.execute('SELECT count(MatchID) FROM partidos')
numpartidos = cur.fetchone()[0]
cur.execute('SELECT count(SubPorteria) FROM eventos')
numeventos = cur.fetchone()[0]
cur.execute('SELECT count(PlayerID) FROM jugadores')
numjugadores = cur.fetchone()[0]
cur.execute('SELECT count(MatchID) FROM lesiones')
numlesiones = cur.fetchone()[0]
cur.execute('SELECT count(MatchID) FROM sustituciones')
numsus = cur.fetchone()[0]
cur.execute('SELECT count(MatchID) FROM tarjetas')
numtarjetas = cur.fetchone()[0]
cur.execute( 'SELECT max(MatchDate) FROM partidos')
fechamax = cur.fetchone()[0]
cur.close()
if numpartidos == 0:
fechamax = 'Ningun p
|
artido en la base'
print('\n')
print('Vamos a enviar el archivo '+Back.BLACK+Fore.GREEN+'"bigdata.sqlite"'+Style.RESET_ALL+' al servidor
|
.')
print('Este archivo es
|
smithfarm/ceph-auto-aws
|
susecon2015/init_lib.py
|
Python
|
bsd-3-clause
| 9,277
| 0.013905
|
#
# init_lib.py
#
# functions for initialization
#
from aws_lib import SpinupError
import base64
from boto import vpc, ec2
from os import environ
from pprint import pprint
import re
import sys
import time
from yaml_lib import yaml_attr
def read_user_data( fn ):
"""
Given a filename, returns the file's contents in a string.
"""
r = ''
with open( fn ) as fh:
r = fh.read()
fh.close()
return r
def get_tags( ec, r_id ):
"""
Takes EC2Connection object and resource ID. Returns tags associated
with that resource.
"""
return ec.get_all_tags(filters={ "resource-id": r_id })
def get_tag( ec, obj, tag ):
"""
Get the value of a tag associated with the given resource object.
Returns None if the tag is not set. Warning: EC2 tags are case-sensitive.
"""
tags = get_tags( ec, obj.id )
found = 0
for t in tags:
if t.name == tag:
found = 1
break
if found:
return t
else:
return None
def update_tag( obj, tag, val ):
"""
Given an EC2 resource object, a tag and a value, updates the given tag
to val.
"""
for x in range(0, 5):
error = False
try:
obj.add_tag( tag, val )
except:
error = True
e = sys.exc_info()[0]
print "Huh, trying again ({})".format(e)
time.sleep(5)
if not error:
print "Object {} successfully tagged.".format(obj)
break
return None
def init_region( r ):
"""
Takes a region string. Connects to that region. Returns EC2Connection
and VPCConnection objects in a tuple.
"""
# connect to region
c = vpc.connect_to_region( r )
ec = ec2.connect_to_region( r )
return ( c, ec )
def init_vpc( c, cidr ):
"""
Takes VPCConnection object (which is actually a connection to a
particular region) and a CIDR block string. Looks for our VPC in that
region. Returns the boto.vpc.vpc.VPC object corresponding to our VPC.
See:
http://boto.readthedocs.org/en/latest/ref/vpc.html#boto.vpc.vpc.VPC
"""
# look for our VPC
all_vpcs = c.get_all_vpcs()
found = 0
our_vpc = None
for v in all_vpcs:
if v.cidr_block == cidr:
our_vpc = v
found = 1
break
if not found:
raise SpinupError( "VPC {} not found".format(cidr) )
return our_vpc
def init_subnet( c, vpc_id, cidr ):
"""
Takes VPCConnection object, which is actually a connection to a
region, and a CIDR block string. Looks for our subnet in that region.
If subnet does not exist, creates it. Returns the subnet resource
object on success, raises exception on failure.
"""
# look for our VPC
all_subnets = c.get_all_subnets()
found = False
our_subnet = None
for s in all_subnets:
if s.cidr_block == cidr:
#print "Found subnet {}".format(cidr)
our_subnet = s
found = True
break
if not found:
our_subnet = c.create_subnet( vpc_id, cidr )
return our_subnet
def set_subnet_map_public_ip( ec, subnet_id ):
"""
Takes ECConnection object and SubnetId string. Attempts to set the
MapPublicIpOnLaunch attribute to True.
FIXME: give credit to source
"""
orig_api_version = ec.APIVersion
ec.APIVersion = '2014-06-15'
ec.get_status(
'ModifySubnetAttribute',
{'SubnetId': subnet_id, 'MapPublicIpOnLaunch.Value': 'true'},
verb='POST'
)
ec.APIVersion = orig_api_version
return None
def derive_ip_address( cidr_block, delegate, final8 ):
"""
Given a CIDR block string, a delegate number, and an integer
representing the final 8 bits of the IP address, construct and return
the IP address derived from this values. For example, if cidr_block is
10.0.0.0/16, the delegate number is 10, and the final8 is 8, the
derived IP address will be 10.0.10.8.
"""
result = ''
match = re.match( r'\d+\.\d+', cidr_block )
if match:
result = '{}.{}.{}'.format( match.group(0), delegate, final8 )
else:
raise SpinupError( "{} passed to derive_ip_address() is not a CIDR block!".format(cidr_block) )
return result
def get_master_instance( ec2_conn, subnet_id ):
"""
Given EC2Connection object and Master Subnet id, check that there is
just one instance running in that subnet - this is the Master. Raise
exception if the number of instances is != 0.
Return the Master instance object.
"""
instances = ec2_conn.get_only_instances( filters={ "subnet-id": subnet_id } )
if 1 > len(instances):
raise SpinupError( "There are no instances in the master subnet" )
if 1 < len(instances):
raise SpinupError( "There are too many instances in the master subnet" )
return instances[0]
def template_token_subst( buf, key, val ):
"""
Given a string (buf), a key (e.g. '@@MASTER_IP@@') and val, replace all
occurrences of key in buf with val. Return the new string.
"""
targetre = re.compile( re.escape( key ) )
return re.sub( targetre, str(val), buf )
def process_user_data( fn, vars = [] ):
"""
Given filename of user-data file and a list of environment
variable names, replaces @@...@@ tokens with the values of the
environment variables. Returns the user-data string on success
raises exception on failure.
"""
# Get user_data string.
buf = read_user_data( fn )
for e in vars:
if not e in environ:
raise SpinupError( "Missing environment variable {}!".format( e ) )
buf = template_token_subst( buf, '@@'+e+'@@', environ[e] )
return buf
def count_instances_in_subnet( ec, subnet_id ):
"""
Given EC2Connection object and subnet ID, count number of instances
in that subnet and return it.
"""
instance_list = ec.get_only_instances(
filters={ "subnet-id": subnet_id }
)
return len(instance_list)
def make_reservation( ec, ami_id, **kwargs ):
"""
Given EC2Connection object, delegate number, AMI ID, as well as
all the kwargs referred to below, make a reservation for an instance
and return the registration object.
"""
# extract arguments to be passed to ec.run_instances()
our_kwargs = {
"key_name": kwargs['key_name'],
|
"subnet_id": kwargs['subnet_id'],
"instance_type": kwargs['instance_type'],
"private_ip_address": kwargs['private_ip_address']
}
# Master or minion?
if kwargs['master']:
our_kwargs['user_data'] = kwargs['user_data']
else:
# perform token substitution in user-data string
u = kwargs['user_data']
u = template_token_subst( u, '@@MASTER_IP@@', kwargs['master_ip'] )
u = template_token
|
_subst( u, '@@DELEGATE@@', kwargs['delegate_no'] )
u = template_token_subst( u, '@@ROLE@@', kwargs['role'] )
u = template_token_subst( u, '@@NODE_NO@@', kwargs['node_no'] )
our_kwargs['user_data'] = u
# Make the reservation.
reservation = ec.run_instances( ami_id, **our_kwargs )
# Return the reservation object.
return reservation
def wait_for_running( ec2_conn, instance_id ):
"""
Given an instance id, wait for its state to change to "running".
"""
print "Waiting for {} running state".format( instance_id )
while True:
instances = ec2_conn.get_only_instances( instance_ids=[ instance_id ] )
print "Current state is {}".format( instances[0].state )
if instances[0].state != 'running':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
print "Waiting another 5 seconds for good measure"
time.sleep(5)
break
def wait_for_available( ec2_conn, volume_id ):
"""
Given a volume id, wait for its state to change to "available"
|
TeachForAustria/Naschmarkt
|
migrationscript/config.py
|
Python
|
gpl-3.0
| 1,043
| 0
|
"""
This is the config file for the Migration
There are 3 things to configure.
- the old Database to migrate from
- the new Database to save the migration
- FTP connection to save the files
"""
# Old Database.
# This is where the Data is taken from
dbOld = {
'host': "", # host ip
'port': 0, # port
'user': "", # username
|
'password': "", # password
'database': "" # name of the database
}
# New Database.
# This is where the Data will be stored
dbNew = {
'host': "", # host ip
'port': 0, # port
'user': "", # username
'password': "", # password
'database': "" # name of the database
}
# FTP connection to save the files
ftpConnection = {
'host': "", # host ip
'user': "", # username
'password': "", # password
'dire
|
ctory': "" # directory where to save the files to
}
# Every post with these tags will not be migrated.
# e.g. ['TFAktuell', 'AMS']
remove_tags = []
|
addisclinic/mobile-dispatch-server
|
sana/mrs/views.py
|
Python
|
bsd-3-clause
| 4,834
| 0.004758
|
import urllib
import telnetlib
import logging
import cjson
from models import BinaryResource
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.contrib.auth import authenticate, login
from sana.mrs.openmrs import sendToOpenMRS
from sana.mrs.util import enable_logging
from sana.mrs.models import Notification
from sana.mrs.util import enable_logging
def chunk( seq, size, pad=None ):
"""Slice a list into consecutive disjoint 'chunks' of
length equal to size. The last chunk is padded if necessary.
Example: ::
>>> list(chunk(range(1,10),3))
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> list(chunk(range(1,9),3))
[[1, 2, 3], [4, 5, 6], [7, 8, None]]
>>> list(chunk(range(1,8),3))
[[1, 2, 3], [4, 5, 6], [7, None, None]]
>>> list(chunk(range(1,10),1))
[[1], [2], [3], [4], [5], [6], [7], [8], [9]]
>>> list(chunk(range(1,10),9))
[[1, 2, 3, 4, 5, 6, 7, 8, 9]]
>>> for X in chunk([],3): print X
>>>
Parmeters:
seq
The sequence to slice
size
The size of each chunk
pad
The size to pad each chunk to.
"""
n = len(seq)
mod = n % size
for i in xrange(0, n-mod, size):
yield seq[i:i+size]
if mod:
padding = [pad] * (size-mod)
yield seq[-mod:] + padding
class FakeProcedureSubmitForm(forms.Form):
"""Encounter form for testing"""
responses = forms.CharField(required=True,
help_text='question,answer,question,answer,..')
procedure_id = forms.IntegerField(required=True, help_text="integers only")
phone_id = forms.CharField(max_length=255)
patient_id = forms.CharField(max_length=255)
#data = forms.FileField(required=True)
def procedure_submit(request):
"""For testing encounter submission"""
upload = request.FILES.get('data', None)
print upload
if request.method == 'POST' and upload is not None:
form = FakeProcedureSubmitForm(request.POST)
else:
form = FakeProcedureSubmitForm()
if form.is_valid():
print "valid"
print form.cleaned_data
phoneId = form.cleaned_data['phone_id']
patientId = form.cleaned_data['patient_id']
procedureId = form.cleaned_data['procedure_id']
responses = form.cleaned_data['responses']
binary = BinaryResource(element_id='test',
content_type='',
procedure=procedureId)
binary.data.save(upload.name, upload)
binary.save()
qas = {}
for q,a in chunk(responses.split(','),2, pad=''):
qas[q] = a
if procedureId == 1:
procedureId = "Diagnose Cervical Cancer"
sendToOpenMRS(patientId, phoneId, procedureId, str(binary.data.path), qas)
return render_to_response("procedure_submit.html",
{'form': form})
def notification_submit(request):
return render_to_response("notification_submit.html")
@enable_logging
def list_notifications(request):
"""For synching notifications with mobile clients.
Request Params
username
A valid username.
password
A valid password.
Parameters:
request
A client request for patient list
"""
logging.info("entering notification list proc")
username = request.REQUEST.get('username',None)
password = request.REQUEST.get('password',None)
user = authenticate(username=username, password=password)
if user is not None:
try:
data = Notification.objects.all()
logging.info("we finished getting the notification list")
response = {'status': 'SUCCESS',
'data': [cjson.decode(d.to_json()) for d in data],
|
}
except Exception, e:
et, val, tb = sys.exc_info()
trace = traceback.format_tb(tb)
error = "Exception : %s %s %s" % (et, val, trace[0])
for tbm in trace:
logging.error(tbm)
logging.error("Got exception while fetching notification list: %
|
s" % e)
response = {
'status': 'FAILURE',
'data': "Problem while getting notification list: %s" % e,
}
else:
logging.error('User not authenticated')
response = {
'status': 'FAILURE',
'data': 'User not authenticated',
}
return HttpResponse(cjson.encode(response), content_type=("application/json; charset=utf-8"))
def home(request):
"""Top level url
Displays ::
Sanamobile MDS : Online
"""
return HttpResponse('Sanamobile MDS : Online')
|
yuxng/DA-RNN
|
lib/networks/gru2d.py
|
Python
|
mit
| 2,570
| 0.003502
|
import tensorflow as tf
class GRU2DCell(tf.contrib.rnn.RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, channels):
self._num_units = num_units
self._channels = channels
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def make_var(self, name, shape, initializer=None, trainable=True):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
# inputs: [batch_size, height, width, channels]
# state: [batch_size, height, width, num_units]
def __call__(self, inputs, state, weights, scope=None):
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
inputs_shape = tf.shape(inputs)
inputs = tf.reshape(inputs, [inputs_shape[0], inputs_shape[1], inputs_shape[2], self._channels])
with tf.variable_scope("Gates"): # Reset gate and update gate.
# concat inputs and state
inputs_state = tf.concat(axis=3, values=[inputs, state])
# define the variables
init_kernel = tf.constant_initializer(0.0)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [1, 1, self._num_units + self._channels, self._num_units], init_kernel)
biases = self.make_var('biases', [self._num_units], init_biases)
# 2D convolution
conv = tf.nn.conv2d(inputs_state, kernel, [1, 1, 1, 1], padding='SAME')
u = tf.nn.sigmoid(tf.nn.bias_add(conv, biases))
# ru = tf.nn.sigmoid(ru)
# r, u = tf.split(3, 2, ru)
'''
with tf.variable_scope("Candidate"):
input
|
s_rstate = tf.concat(3, [inputs, tf.mul(r, state)])
|
# define the variables
init_biases_1 = tf.constant_initializer(0.0)
kernel_1 = self.make_var('weights', [3, 3, self._num_units + self._channels, self._num_units])
biases_1 = self.make_var('biases', [self._num_units], init_biases_1)
# 2D convolution
conv_1 = tf.nn.conv2d(inputs_rstate, kernel_1, [1, 1, 1, 1], padding='SAME')
c = tf.nn.tanh(tf.nn.bias_add(conv_1, biases_1))
'''
new_w = weights + u
new_h = tf.nn.relu(tf.div(weights * state + u * inputs, new_w))
return new_h, new_h, new_w
|
sikegame/udacity-project-4
|
main.py
|
Python
|
apache-2.0
| 1,600
| 0.000625
|
#!/usr/bin/env python
|
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 2
|
4
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.sget_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class SetFeaturedSpeakerHandler(webapp2.RequestHandler):
def post(self):
wsck = self.request.get('websafeConferenceKey')
ConferenceApi._cacheFeaturedSpeaker(wsck)
self.response.set_status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/set_featured_speaker', SetFeaturedSpeakerHandler),
], debug=True)
|
chrishan/twitter-bot
|
twitter-bot/twitter-bot.py
|
Python
|
bsd-3-clause
| 2,563
| 0.003512
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import simplejson as json
import tweepy
import bitly
import urllib2
import sqlite3
from local_settings import TwitterKey, BitlyKey
logging.basicConfig(filename='log.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
def run():
conn = sqlite3.connect('tweets.db')
# if table not exists, create table
cur = conn.cursor()
query = cur.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='tweet_table'")
if query.fetchone()[0] <= 0:
cur.execute("CREATE TABLE tweet_table(Id INTEGER PRIMARY KEY, reddit_id TEXT, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP)")
consumer_key = TwitterKey['consumer_key']
consumer_secret = TwitterKey['consumer_secret']
access_token = TwitterKey['access_token']
access_token_secret = TwitterKey['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
bot = tweepy.API(auth)
shortapi = bitly.Api(login=BitlyKey['login'], apikey=BitlyKey['apikey'])
url = 'http://www.reddit.com/r/programming/.json'
jsondata = json.loads(urllib2.urlopen(url).read())
if 'data' in jsondata and 'children' in jsondata['data']:
posts = jsondata['data']['children']
posts.reverse()
for ind, post in enumerate(posts):
entry = post['data']
# logging.debug(entry['permalink'] + ' ' +entry['url'])
postid = entry['id']
num_comments = entry['num_comments']
query = cur.exe
|
cute("SELECT * FROM tweet_table WHERE reddit_id = '%s'" % postid)
if len(query.fetchall()) == 0 and num_comments > 5:
title = entry['title']
score = entry['score']
downs = entry['downs']
ups = entry['ups']
permalink = shortapi.shorten('http://www.reddit.com' + entry['permalink'])
url = shortapi.shorten(entry['url'])
author = entry['author']
status = ' %s [%s by:
|
%s comments:%d score:%d]' % (url, permalink, author, num_comments, score)
status = title[:(135 - len(status))] + status
status = status.encode('utf-8')
logging.debug(status)
bot.update_status(status)
cur.execute("INSERT INTO tweet_table VALUES (?, ?, ?)", [None, postid, None])
conn.commit()
conn.close()
if __name__ == '__main__':
run()
|
Kami/sgrstats.com
|
sgrstats/stats/templatetags/tags.py
|
Python
|
apache-2.0
| 1,515
| 0.009901
|
from django.template import Library, Node, TemplateSyntaxError
fro
|
m stats.views import get_next_rank_title_and_exp_points
register = Library()
class SetVariable(Node):
def __init__(self, varname, nodelist):
self.varname = varname
self.nodelist = nodelist
def render(self,context):
context[self.varname] = self.nodelist.render(context)
return ''
@register.tag(name = 'setvar')
def setvar(parser, tok
|
en):
"""
Set value to content of a rendered block.
{% setvar var_name %}
....
{% endsetvar
"""
try:
# split_contents() knows not to split quoted strings.
tag_name, varname = token.split_contents()
except ValueError:
raise TemplateSyntaxError, "%r tag requires a single argument for variable name" % token.contents.split()[0]
nodelist = parser.parse(('endsetvar',))
parser.delete_first_token()
return SetVariable(varname, nodelist)
@register.simple_tag
def active(request, pattern):
if request.path.startswith(pattern):
return 'active'
return ''
@register.simple_tag
def next_rank(category, exp_current):
next_rank = get_next_rank_title_and_exp_points(category, exp_current)
if not next_rank:
return 'Next rank is unknown'
(title, exp_needed, exp_total) = next_rank
return '<strong>%d EXP</strong> needed to reach the rank <strong>%s</strong> (<strong>%d EXP</strong>)' % (exp_needed, title, exp_total)
|
Etxea/gestioneide
|
cambridge/urls.py
|
Python
|
gpl-3.0
| 7,898
| 0.018232
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from django.conf.urls import url
from django.contrib.auth.decorators import login_required, permission_required
from django.views.generic import TemplateView, ListView, CreateView, UpdateView, DeleteView
from cambri
|
dge.views import *
from cambridge.forms import *
from cambridge.models import *
urlpatterns = [
url(r'^list/$',login_required(RegistrationListView.as_view()), name="cambridge_list"),
url(r'^list/all$',login_required(RegistrationListViewAll.as_v
|
iew()), name="cambridge_list_all"),
url(r'^list/exam/(?P<exam_id>\d+)/$',login_required(RegistrationListViewExam.as_view()), name="cambridge_list_exam"),
url(r'^excel/$',login_required(RegistrationExcelView.as_view()), name="cambridge_excel"),
url(r'^excel/exam/(?P<exam_id>\d+)/$',login_required(RegistrationExcelView.as_view()), name="cambridge_excel_exam"),
url(r'^pay/(?P<pk>\d+)/$',RegistrationPayment,name="cambridge_pay"),
url(r'^edit/(?P<pk>\d+)/$',
login_required(UpdateView.as_view(
model=Registration,
success_url = '/cambridge/list',
form_class = RegistrationEditForm,
template_name='cambridge/registration_edit.html')), name="cambridge_edit"),
url(r'^delete/(?P<pk>\d+)/$',
login_required(DeleteView.as_view(
model=Registration,
success_url="/cambridge/list/")), name="cambridge_delete"),
url(r'^view/(?P<pk>\d+)/$', ver, name="cambridge_view"),
url(r'^print/(?P<pk>\d+)/$', imprimir_cambridge, name="cambridge_imprimir"),
url(r'^new/(?P<exam_id>\d+)/$',RegistrationExamCreateView.as_view(), name="cambridge_nueva_examen"),
url(r'^new/$',RegistrationCreateView.as_view(), name="cambridge_nueva"),
#Colegios
url(r'schools/exam/list/$', login_required(SchoolExamList.as_view()),name="cambridge_schools_exam_list"),
url(r'schools/exam/(?P<school_name>\w+)/new/$', login_required(SchoolExamCreate.as_view()),name="cambridge_schools_exam_new"),
url(r'schools/list/$', login_required(SchoolListView.as_view()),name="cambridge_schools_list"),
url(r'schools/registrations/list/$', login_required(SchoolRegistrationListView.as_view()),name="cambridge_schools_registration_list"),
url(r'schools/new/(?P<school_name>\w+)/(?P<school_password>\w+)/$', SchoolRegistrationCreateView.as_view(),name="cambridge_schools_new_registration"),
url(r'schools/new/$', SchoolCreateView.as_view(),name="cambridge_schools_new"),
url(r'berriotxoa/$', TemplateView.as_view( template_name = 'cambridge/berriotxoa.html' ),name="cambridge_berriotxoa"),
url(r'schools/fuentefresnedo/$', TemplateView.as_view( template_name = 'cambridge/fuentefresnedo.html' ),name="cambridge_fuentefresnedo"),
#Venues
url(r'venue/exam/list/$', login_required(VenueExamList.as_view()),name="cambridge_venues_exam_list"),
url(r'venue/exam/new/$', login_required(VenueExamCreate.as_view()),name="cambridge_venues_exam_new"),
url(r'venue/list/$', login_required(VenueListView.as_view()),name="cambridge_venues_list"),
url(r'venue/registrations/list/$', login_required(VenueRegistrationListView.as_view()),name="cambridge_venues_registration_list"),
url(r'venue/new/(?P<venue_name>\w+)/$', VenueRegistrationCreateView.as_view(),name="cambridge_venues_new_registration"),
#Linguaskill
url(r'linguaskill/new/$', LinguaskillRegistrationCreateView.as_view(),name="cambridge_linguaskill_new_registration"),
url(r'linguaskill/list/$', LinguaskillRegistrationListView.as_view(),name="cambridge_linguaskill_registration_list"),
## Prep Center
url(r'prepcenter/$', PrepCenterHomeView.as_view(),name="cambridge_prepcenter_home"),
url(r'prepcenter/pay/registrations/$', PrepCenterRegistrationsPayView.as_view(),name="cambridge_prepcenters_registrations_pays"),
url(r'prepcenter/new/center/$', PrepCenterCreateView.as_view(),name="cambridge_prepcenters_new"),
url(r'prepcenter/update/center/(?P<pk>\d+)/$', PrepCenterUpdateView.as_view(),name="cambridge_prepcenters_update"),
url(r'prepcenter/list/$', login_required(PrepCenterListView.as_view()),name="cambridge_prepcenters_list"),
#url(r'prepcenter/exam/list/$', login_required(PrepCenterExamList.as_view()),name="cambridge_prepcenters_exam_list"),
# url(r'prepcenter/exam/new/$', PrepCenterExamCreate.as_view(),name="cambridge_prepcenters_exam_new"),
url(r'prepcenter/registrations/new/$', PrepCenterRegistrationCreateView.as_view(),name="cambridge_prepcenters_registration_new"),
url(r'prepcenter/registrations/new/exam/(?P<exam_id>\d+)/(?P<form_num>\d+)/$',PrepCenterRegistrationExamCreateView.as_view(),name="cambridge_prepcenters_registration_exam_new"),
url(r'prepcenter/registrations/new/exam/(?P<exam_id>\d+)/$',PrepCenterRegistrationExamCreateView.as_view(),name="cambridge_prepcenters_registration_exam_new"),
url(r'prepcenter/registrations/delete/(?P<pk>\d+)/$',PrepCenterRegistrationDeleteView.as_view(), name="prepcenter_registration_delete"),
#url(r'prepcenter/registrations/list/$', login_required(PrepCenterRegistrationListView.as_view()),name="cambridge_prepcenters_registration_list"),
url(r'prepcenter/passwordreset/(?P<pk>\d+)/$',PrepCenterPasswordResetView.as_view(), name="prepcenter_passwordreset"),
url(r'prepcenter/createuser/(?P<pk>\d+)/$',PrepCenterCreateUserView.as_view(), name="prepcenter_createuser"),
url(r'prepcenter/disableuser/(?P<pk>\d+)/$',PrepCenterDisableUserView.as_view(), name="prepcenter_disableuser"),
url(r'prepcenter/enableuser/(?P<pk>\d+)/$',PrepCenterEnableUserView.as_view(), name="prepcenter_enableuser"),
url(r'prepcenter/detalle/(?P<pk>\d+)/$',PrepCenterDetailView.as_view(), name="prepcenter_detalle"),
url(r'prepcenter/delete/(?P<pk>\d+)/$',PrepCenterDeleteView.as_view(), name="prepcenter_delete"),
url(r'prepcenter/registrations/pay/(?P<pk>\d+)/$',PrepCenterPayRegistrations.as_view(), name="prepcenter_registrations_admin_pay"),
## Genericas
url(r'thanks/$', TemplateView.as_view( template_name = 'cambridge/gracias.html' ),name="cambridge_gracias"),
url(r'error/$', TemplateView.as_view( template_name = 'cambridge/error.html' ),name="cambridge_error"),
##For the exams
url(r'^exam/list/$',login_required(
ListView.as_view(model=Exam,template_name='cambridge/exam_list.html')
), name="cambridge_exam_list"),
url(r'^exam/delete/(?P<pk>\d+)/$',
login_required(DeleteView.as_view(
model=Exam,
success_url="/cambridge/exam/list/")), name="cambridge_exam_delete"),
url(r'^exam/new/$', login_required(
CreateView.as_view(
model=Exam,
form_class = ExamForm,
success_url = '/cambridge/exam/list',
template_name='cambridge/exam_form.html')), name="cambridge_exam_new"),
url(r'^exam/edit/(?P<pk>\d+)/$',
login_required(UpdateView.as_view(
model=Exam,
fields = '__all__',
success_url = '/cambridge/exam/list',
template_name='cambridge/exam_edit.html')), name="cambridge_exam_edit"),
url(r'^$', IndexExamList.as_view(),name="cambridge"),
]
|
dterei/Scraps
|
perf/syscall-latency.py
|
Python
|
bsd-3-clause
| 1,275
| 0.017255
|
# perf script event handlers, generated by perf script -g python
# Licensed under the terms of the GNU GPL License version 2
|
# The common_* event handler fields are the most useful fields common to
# all events. They don't necessarily correspond to the 'commo
|
n_*' fields
# in the format files. Those fields not available as handler params can
# be retrieved using Python functions of the form common_*(context).
# See the perf-trace-python Documentation for the list of available functions.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
syscalls = autodict()
def trace_begin():
pass
def trace_end():
pass
def raw_syscalls__sys_exit(event_name, context, cpu,
s, ns, pid, comm, callchain, syscall_id, args):
if pid not in syscalls or syscall_id not in syscalls[pid]:
return
latency = nsecs(s, ns) - syscalls[pid][syscall_id]
print "[%04d] %04d => %9uns" % (pid, syscall_id, latency)
def raw_syscalls__sys_enter(event_name, context, cpu,
s, ns, pid, comm, callchain, syscall_id, ret):
syscalls[pid][syscall_id] = nsecs(s, ns)
def trace_unhandled(event_name, context, event_fields_dict):
pass
|
LarryHillyer/PoolHost
|
PoolHost/nfl/division/forms.py
|
Python
|
gpl-3.0
| 1,479
| 0.025693
|
from django import forms
from django.forms import ModelForm
from django.db import models
from app.models import NFL_Division
from app.models import NFL_Conference_Choices
class NFL_DivisionForm_
|
Create(ModelForm):
name = forms.CharField(max_length = 100,
widget = forms.TextInput({
'class':'form-control',
'placeholder': 'Enter Division Name'}))
conference_id = forms.ChoiceField(choices = NFL_Conference_Choices.make_conference_choices,
widget = forms.Select({'class':'form-control'}))
filter = forms.IntegerField(widget = forms.Hi
|
ddenInput())
class Meta:
model = NFL_Division
fields = ['name', 'conference_id']
class NFL_DivisionForm_Edit(ModelForm):
id = forms.IntegerField(widget = forms.HiddenInput())
name = forms.CharField(max_length = 100,
widget = forms.TextInput({
'class':'form-control',
'placeholder': 'Enter Division Name'}))
conference_id = forms.ChoiceField(choices = NFL_Conference_Choices.make_conference_choices,
widget = forms.Select({'class':'form-control'}))
filter = forms.IntegerField(widget = forms.HiddenInput())
class Meta:
model = NFL_Division
fields = ['id', 'name', 'conference_id']
|
MiracleWong/PythonBasic
|
PyH/demo.py
|
Python
|
mit
| 635
| 0.031496
|
from pyh import *
list=[[1,'Lucy',25],[2,'Tom',30],[3,'Lily',20]]
page = PyH('Test')
page<<div(style="text-align:center")<<h4('Test table')
mytab = page << table(border="1",cellpadding="3",cellspacing="0",style="margin:auto")
tr1 = mytab << tr(bgcolor="lightgrey")
tr1 << th('id') + th('name')+t
|
h('age')
for i in range(len(list)):
tr2 = mytab << tr()
for j in range(3):
tr2 << td(list[i][j])
if list[i][j]=='Tom':
tr2.attributes['bgcolor']='yellow'
if list[i][j]=='Lily':
tr2[1].attributes['style']='color:red'
pag
|
e.printOut('/Users/miraclewong/github/PythonBasic/PyH/demo.html')
|
esikachev/scenario
|
sahara/utils/openstack/heat.py
|
Python
|
apache-2.0
| 2,155
| 0
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import client as heat_client
from oslo_config import cfg
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.utils.openstack import base
opts = [
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL requests to heat.'),
cfg.StrOpt('ca_file',
help='Location of ca certificates file to use for heat '
'client requests.')
]
heat_group = cfg.OptGroup(name='heat',
title='Heat client options')
CONF = cfg.CONF
CONF.register_group(heat_group)
CONF.register_opts(opts, group=heat_group)
def client():
|
ctx = context.current()
heat_url = base.url_for(ctx.service_catalog, 'orchestration')
return heat_client.Client('1', heat_url, token=ctx.auth_token,
cert_file=
|
CONF.heat.ca_file,
insecure=CONF.heat.api_insecure)
def get_stack(stack_name):
heat = client()
for stack in heat.stacks.list():
if stack.stack_name == stack_name:
return stack
raise ex.NotFoundException(_('Failed to find stack %(stack)s')
% {'stack': stack_name})
def wait_stack_completion(stack):
# NOTE: expected empty status because status of stack
# maybe is not set in heat database
while stack.status in ['IN_PROGRESS', '']:
context.sleep(1)
stack.get()
if stack.status != 'COMPLETE':
raise ex.HeatStackException(stack.stack_status)
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/pango/WrapMode.py
|
Python
|
gpl-2.0
| 659
| 0.009105
|
# encoding: utf-8
# module pango
# from /usr/lib/python2.7/dist-packages/gtk-2.0/pango.so
# by generator 1.135
# no doc
# imports
import gobject as __gobject
import gobject._gobject as __gob
|
ject__gobject
class WrapMode(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__di
|
ct__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
}
__gtype__ = None # (!) real value is ''
|
sputnick-dev/weboob
|
modules/entreparticuliers/module.py
|
Python
|
agpl-3.0
| 2,203
| 0
|
# -*- coding: utf-8 -*-
# Copyright(C) 2015 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.backend import Module
from weboob.capabilities.housing import CapHousing, Housing, HousingPhoto
from .browser import EntreparticuliersBrowser
__all__ = ['EntreparticuliersModule']
class EntreparticuliersModule(Module, CapHousing):
NAME = 'entreparticuliers'
DESCRIPTION = u'entreparticuliers.com website'
MAINTAINER = u'Bezleputh'
EMAIL = 'carton_ben@yahoo.fr'
LICE
|
NSE = 'AGPLv3+'
VERSION = '1.1'
BROWSER = EntreparticuliersBrowser
def search_city(self, pattern):
return self.browser.search_city(pattern)
def search_housings(self, query):
cities = [c.id for c in que
|
ry.cities if c.backend == self.name]
if len(cities) == 0:
return list([])
return self.browser.search_housings(query.type, cities, query.nb_rooms,
query.area_min, query.area_max,
query.cost_min, query.cost_max,
query.house_types)
def get_housing(self, _id):
return self.browser.get_housing(_id)
def fill_housing(self, housing, fields):
return self.browser.get_housing(housing.id, housing)
def fill_photo(self, photo, fields):
if 'data' in fields and photo.url and not photo.data:
photo.data = self.browser.open(photo.url).content
return photo
OBJECTS = {Housing: fill_housing, HousingPhoto: fill_photo}
|
pzia/keepmydatas
|
misc/testmagic.py
|
Python
|
mit
| 344
| 0.014535
|
#!/usr/bin/pyth
|
on
|
import magic
import sys
m = magic.open(magic.MIME_TYPE)
m.load()
for f in sys.argv[1:]:
try :
print(f, m.file(f))
except :
print("Except with %s" % f)
|
mablae/weblate
|
weblate/trans/tests/test_machine.py
|
Python
|
gpl-3.0
| 10,441
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import httpretty
from django.test import TestCase
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.models.unit import Unit
from weblate.trans.machine.base import MachineTranslationError
from weblate.trans.machine.dummy import DummyTranslation
from weblate.trans.machine.glosbe import GlosbeTranslation
from weblate.trans.machine.mymemory import MyMemoryTranslation
from weblate.trans.machine.opentran import OpenTranTranslation
from weblate.trans.machine.apertium import ApertiumTranslation
from weblate.trans.machine.tmserver import AmagamaTranslation
from weblate.trans.machine.microsoft import MicrosoftTranslation
from weblate.trans.machine.google import (
GoogleWebTranslation, GoogleTranslation
)
from weblate.trans.machine.weblatetm import (
WeblateSimilarTranslation, WeblateTranslation
)
GLOSBE_JSON = u'''
{
"result":"ok",
"authors":{
"1":{"U":"http://en.wiktionary.org","id":1,"N":"en.wiktionary.org"}
},
"dest":"ces",
"phrase":"world",
"tuc":[
{
"authors":[1],
"meaningId":-311020347498476098,
"meanings":[
{
"text":"geographic terms (above country level)",
"language":"eng"
}
],
"phrase":{"text":"svět","language":"ces"}}],
"from":"eng"
}
'''.encode('utf-8')
MYMEMORY_JSON = u'''
{"responseData":{"translatedText":"svět"},"responseDetails":"",
"responseStatus":200,
"matches":[
{"id":"428492143","segment":"world","translation":"svět","quality":"",
"reference":"http://aims.fao.org/standards/agrovoc",
"usage-count":15,"subject":"Agriculture_and_Farming",
"created-by":"MyMemoryLoader",
"last-updated-by":"MyMemoryLoader","create-date":"2013-06-12 17:02:07",
"last-update-date":"2013-06-12 17:02:07","match":1},
{"id":"424273685","segment":"World view","translation":"Světový názor",
"quality":"80",
"reference":"//cs.wikipedia.org/wiki/Sv%C4%9Btov%C3%BD_n%C3%A1zor",
"usage-count":1,"subject":"All","created-by":"","last-updated-by":"Wikipedia",
"create-date":"2012-02-22 13:23:31","last-update-date":"2012-02-22 13:23:31",
"match":0.85},
{"id":"428493395","segment":"World Bank","translation":"IBRD","quality":"",
"reference":"http://aims.fao.org/standards/agrovoc",
"usage-count":1,"subject":"Agriculture_and_Farming",
"created-by":"MyMemoryLoader","last-updated-by":"MyMemoryLoader",
"create-date":"2013-06-12 17:02:07",
"last-update-date":"2013-06-12 17:02:07","match":0.84}
]}
'''.encode('utf-8')
AMAGAMA_JSON = u'''
[{"source": "World", "quality": 80.0, "target": "Svět", "rank": 100.0}]
'''.encode('utf-8')
GOOGLE_JSON = u'''
[
[["svět","world","",""]],
[[
"noun",["svět","země","společnost","lidstvo"],
[
["svět",["world","earth"],null,0.465043187],
["země",["country","land","ground","nation","soil","world"]
,null,0.000656803953],
["lidstvo",["humanity","mankind","humankind","people","world"]
,null,0.000148860636]
],
"world",1
]],
"en",null,
[["svět",[4],1,0,1000,0,1,0]],
[[
"world",4,[["svět",1000,1,0],
["World",0,1,0],
["Světová",0,1,0],
["světě",0,1,0],
["světa",0,1,0]],
[[0,5]],"world"]],
null,null,[],2
]
'''.encode('utf-8')
OPENTRAN_JSON = u'''
[{
"count":4,
"projects":[{
"count":4,"flags":0,"name":"KDE","orig_phrase":" World",
"path":"K/step_qt"
}],
"text":"Svět","value":1
}]
'''.encode('utf-8')
class MachineTranslationTest(TestCase):
'''
Testing of machine translation core.
'''
def test_support(self):
machine_translation = DummyTranslation()
self.assertTrue(machine_translation.is_supported('cs'))
self.assertFalse(machine_translation.is_supported('de'))
def test_translate(self):
machine_translation = DummyTranslation()
self.assertEqual(
machine_translation.translate('cs', 'Hello', None, None),
[]
)
self.assertEqual(
len(
machine_translation.translate(
'cs', 'Hello, world!', None, None
)
),
2
)
|
def assertTranslate
|
(self, machine, lang='cs', word='world', empty=False):
translation = machine.translate(lang, word, None, None)
self.assertIsInstance(translation, list)
if not empty:
self.assertTrue(len(translation) > 0)
@httpretty.activate
def test_glosbe(self):
httpretty.register_uri(
httpretty.GET,
'http://glosbe.com/gapi/translate',
body=GLOSBE_JSON
)
machine = GlosbeTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_mymemory(self):
httpretty.register_uri(
httpretty.GET,
'http://mymemory.translated.net/api/get',
body=MYMEMORY_JSON
)
machine = MyMemoryTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_opentran(self):
httpretty.register_uri(
httpretty.GET,
'http://open-tran.eu/json/supported',
body='["en","cs"]'
)
httpretty.register_uri(
httpretty.GET,
'http://en.cs.open-tran.eu/json/suggest/world',
body=OPENTRAN_JSON
)
machine = OpenTranTranslation()
self.assertTranslate(machine)
@httpretty.activate
def test_opentran_wrong_lang(self):
httpretty.register_uri(
httpretty.GET,
'http://open-tran.eu/json/supported',
body='["en","cs"'
)
machine = OpenTranTranslation()
# Prevent cache issues
machine.mtid += 'wrong_lang'
self.assertTranslate(machine, empty=True)
@httpretty.activate
def test_opentran_wrong(self):
httpretty.register_uri(
httpretty.GET,
'http://open-tran.eu/json/supported',
body='["en","cs"]'
)
httpretty.register_uri(
httpretty.GET,
'http://en.cs.open-tran.eu/json/suggest/world',
body='['
)
machine = OpenTranTranslation()
# Prevent cache issues
machine.mtid += 'wrong'
self.assertRaises(
MachineTranslationError,
self.assertTranslate,
machine
)
@httpretty.activate
def test_apertium(self):
httpretty.register_uri(
httpretty.GET,
'http://api.apertium.org/json/listPairs',
body='{"responseStatus": 200, "responseData":'
'[{"sourceLanguage": "en","targetLanguage": "es"}]}'
)
httpretty.register_uri(
httpretty.GET,
'http://api.apertium.org/json/translate',
body='{"responseData":{"translatedText":"Mundial"},'
'"responseDetails":null,"responseStatus":200}'
)
machine = ApertiumTranslation()
self.assertTranslate(machine, 'es')
@httpretty.activate
def test_microsoft(self):
httpretty.register_uri(
httpretty.POST,
'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13',
body='{"access_token":"TOKEN"}'
)
httpretty.register_uri(
ht
|
hanks/Second_Hackson_Demo
|
BeaconCharityServer/app-server/models.py
|
Python
|
mit
| 1,796
| 0.002227
|
# coding: utf-8
from __future__ import division
class CharityItem(object):
def __init__(self, name, short_desc, long_desc, image_name, detail_image_name, rating, major, minor, objective_money, actual_money):
self.name = name
self.short_desc = short_desc
|
self.long_desc = long_desc
self.image_name = image_name
self.detail_image_name = detail_image_name
self.ratin
|
g = rating
self.minor = minor
self.major = major
self.objective_money = objective_money
self.actual_money = actual_money
def to_dict(self):
return {
"name": self.name,
"short_desc": self.short_desc,
"long_desc": self.long_desc,
"image_name": self.image_name,
"detail_image_name": self.detail_image_name,
"minor": self.minor,
"major": self.major,
"rating": self.rating,
"objective_money": self.objective_money,
"actual_money": self.actual_money,
}
@property
def accomplishment_rate(self):
return self.actual_money / self.objective_money
@classmethod
def from_dict(cls, json_data):
name = json_data["name"]
short_desc = int(json_data["short_desc"])
long_desc = json_data["long_desc"]
image_name = json_data["image_name"]
detail_image_name = json_data["detail_image_name"]
minor = int(json_data["minor"])
major = int(json_data["major"])
rating = int(json_data["rating"])
objective_money = int(json_data["objective_money"])
actual_money = int(json_data["actual_money"])
return cls(name, short_desc, long_desc, image_name, detail_image_name, rating, major, minor, objective_money, actual_money)
|
rht/zulip
|
zerver/webhooks/opsgenie/tests.py
|
Python
|
apache-2.0
| 7,350
| 0.002041
|
from zerver.lib.test_classes import WebhookTestCase
class OpsGenieHookTests(WebhookTestCase):
STREAM_NAME = "opsgenie"
URL_TEMPLATE = "/api/v1/external/opsgenie?&api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "opsgenie"
def test_acknowledge_alert(self) -> None:
|
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: Acknowledge
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"acknowledge",
expected_topic,
expected_message,
|
content_type="application/x-www-form-urlencoded",
)
def test_addnote_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddNote
* **Note**: note to test alert
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"addnote",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_addrecipient_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddRecipient
* **Recipient**: team2_escalation
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"addrecipient",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_addtags_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddTags
* **Tags added**: tag1,tag2,tag3
* **Message**: test alert
* **Tags**: `tag1`, `tag2`, `tag3`
""".strip()
self.check_webhook(
"addtags",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_addteam_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AddTeam
* **Team added**: team2
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"addteam",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_assignownership_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: AssignOwnership
* **Assigned owner**: user2@ifountain.com
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"assignownership",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_close_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: Close
* **Message**: test alert
""".strip()
self.check_webhook(
"close",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_create_alert(self) -> None:
expected_topic = "Webhook"
expected_message = """
[OpsGenie alert for Webhook](https://app.opsgenie.com/alert/V2#/show/ec03dad6-62c8-4c94-b38b-d88f398e900f):
* **Type**: Create
* **Message**: another alert
* **Tags**: `vip`
""".strip()
self.check_webhook(
"create",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_customaction_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: TestAction
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"customaction",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_delete_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: Delete
* **Message**: test alert
""".strip()
self.check_webhook(
"delete",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_escalate_alert(self) -> None:
expected_topic = "Webhook_Test"
expected_message = """
[OpsGenie alert for Webhook_Test](https://app.opsgenie.com/alert/V2#/show/7ba97e3a-d328-4b5e-8f9a-39e945a3869a):
* **Type**: Escalate
* **Escalation**: test_esc
""".strip()
self.check_webhook(
"escalate",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_removetags_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: RemoveTags
* **Tags removed**: tag3
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"removetags",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_takeownership_alert(self) -> None:
expected_topic = "Webhook"
expected_message = """
[OpsGenie alert for Webhook](https://app.opsgenie.com/alert/V2#/show/8a745a79-3ed3-4044-8427-98e067c0623c):
* **Type**: TakeOwnership
* **Message**: message test
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"takeownership",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_unacknowledge_alert(self) -> None:
expected_topic = "Integration1"
expected_message = """
[OpsGenie alert for Integration1](https://app.opsgenie.com/alert/V2#/show/052652ac-5d1c-464a-812a-7dd18bbfba8c):
* **Type**: UnAcknowledge
* **Message**: test alert
* **Tags**: `tag1`, `tag2`
""".strip()
self.check_webhook(
"unacknowledge",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
|
dibaunaumh/fcs-skateboard
|
fcs_aux/mies/__init__.py
|
Python
|
agpl-3.0
| 47
| 0
|
# TO
|
DO separate into different package modul
|
es
|
markgw/jazzparser
|
lib/nltk/cluster/__init__.py
|
Python
|
gpl-3.0
| 4,217
| 0.000474
|
# Natural Language Toolkit: Clusterers
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
This module contains a number of basic clustering algorithms. Clustering
describes the task of discovering groups of similar items with a large
collection. It is also describe as unsupervised machine learning, as the data
from which it learns is unannotated with class information, as is the case for
supervised learning. Annotated data is difficult and expensive to obtain in
the quantities required for the majority of supervised learning algorithms.
This problem, the knowledge acquisition bottleneck, is common to most natural
language processing tasks, thus fueling the need for quality unsupervised
approaches.
This module contains a k-means clusterer, E-M clusterer and a group average
agglomerative clusterer (GAAC). All these clusterers involve finding good
cluster groupings for a set of vectors in multi-dimensional space.
The K-means clusterer starts with k arbitrary chosen means then allocates each
vector to the cluster with the closest mean. It then recalculates the means of
each cluster as the centroid of the vectors in the cluster. This process
repeats until the cluster memberships stabilise. This is a hill-climbing
algorithm which may converge to a local maximum. Hence the clustering is
often repeated w
|
ith random initial means and the most commonly occurring
output means are chosen.
The GAAC clusterer starts with each of the M{N} vectors as singleton clusters.
It then iteratively merges pairs of clusters which have the closest centroids.
This continues until there is only one cluster. The order of merges gives rise
to a dendrogram - a tree with the earlier merges lo
|
wer than later merges. The
membership of a given number of clusters M{c}, M{1 <= c <= N}, can be found by
cutting the dendrogram at depth M{c}.
The Gaussian EM clusterer models the vectors as being produced by a mixture
of k Gaussian sources. The parameters of these sources (prior probability,
mean and covariance matrix) are then found to maximise the likelihood of the
given data. This is done with the expectation maximisation algorithm. It
starts with k arbitrarily chosen means, priors and covariance matrices. It
then calculates the membership probabilities for each vector in each of the
clusters - this is the 'E' step. The cluster parameters are then updated in
the 'M' step using the maximum likelihood estimate from the cluster membership
probabilities. This process continues until the likelihood of the data does
not significantly increase.
They all extend the ClusterI interface which defines common operations
available with each clusterer. These operations include.
- cluster: clusters a sequence of vectors
- classify: assign a vector to a cluster
- classification_probdist: give the probability distribution over cluster memberships
The current existing classifiers also extend cluster.VectorSpace, an
abstract class which allows for singular value decomposition (SVD) and vector
normalisation. SVD is used to reduce the dimensionality of the vector space in
such a manner as to preserve as much of the variation as possible, by
reparameterising the axes in order of variability and discarding all bar the
first d dimensions. Normalisation ensures that vectors fall in the unit
hypersphere.
Usage example (see also demo())::
from nltk import cluster
from nltk.cluster import euclidean_distance
from numpy import array
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
# initialise the clusterer (will also assign the vectors to clusters)
clusterer = cluster.KMeansClusterer(2, euclidean_distance)
clusterer.cluster(vectors, True)
# classify a new vector
print clusterer.classify(array([3, 3]))
Note that the vectors must use numpy array-like
objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
efficiency when required.
"""
from util import *
from kmeans import *
from gaac import *
from em import *
__all__ = ['KMeansClusterer', 'GAAClusterer', 'EMClusterer',
'VectorSpaceClusterer', 'Dendrogram']
|
JohnyEngine/CNC
|
heekscnc/nc/cad_iso_read.py
|
Python
|
apache-2.0
| 8,450
| 0.011006
|
################################################################################
# iso_read.py
#
# Simple ISO NC code parsing
#
# Hirutso Enni, 2009-01-13
""" use this script to backplot nc files to *.scr file for autocad,bricscad,
draftsight,progecad,ares commander, etc....
usage: python cad_iso_read.py temp.nc temp.scr
"""
import cad_nc_read as nc
import re
import sys
################################################################################
class Parser(nc.Parser):
def __init__(self, writer):
nc.Parser.__init__(self, writer)
self.pattern_main = re.compile('([(!;].*|\s+|[a-zA-Z0-9_:](?:[+-])?\d*(?:\.\d*)?|\w\#\d+|\(.*?\)|\#\d+\=(?:[+-])?\d*(?:\.\d*)?)')
#if ( or ! or ; at least one space or a letter followed by some character or not followed by a +/- followed by decimal, with a possible decimal point
# followed by a possible deimcal, or a letter followed by # with a decimal . deimcal
# add your character here > [(!;] for comments char
# then look for the 'comment' function towards the end of the file and add another elif
def ParseWord(self, word):
if (word[0] == 'A' or word[0] == 'a'):
self.col = "axis"
self.a = eval(word[1:])
self.move = True
elif (word[0] == 'B' or word[0] == 'b'):
self.col = "axis"
self.b = eval(word[1:])
self.move = True
elif (word[0] == 'C' or word[0] == 'c'):
self.col = "axis"
self.c = eval(word[1:])
self.move = True
elif (word[0] == 'F' or word[0] == 'f'):
self.col = "axis"
self.f = eval(word[1:])
self.move = True
elif (word == 'G0' or word == 'G00' or word == 'g0' or word == 'g00'):
self.path_col = "rapid"
self.col = "rapid"
self.arc = 0
elif (word == 'G1' or word == 'G01' or word == 'g1' or word == 'g01'):
self.path_col = "feed"
self.col = "feed"
self.arc = 0
elif (word == 'G2' or word == 'G02' or word == 'g2' or word == 'g02' or word == 'G12' or word == 'g12'):
self.path_col = "feed"
self.col = "feed"
self.arc = -1
elif (word == 'G3' or word == 'G03' or word == 'g3' or word == 'g03' or word == 'G13' or word == 'g13'):
self.path_col = "feed"
self.col = "feed"
self.arc = +1
elif (word == 'G10' or word == 'g10'):
self.no_move = True
elif (word == 'L1' or word == 'l1'):
self.no_move = True
elif (word == 'G61.1' or word == 'g61.1' or word == 'G61' or word == 'g61' or word == 'G64' or word == 'g64'):
self.no_move = True
elif (word == 'G20' or word == 'G70'):
self.col = "prep"
self.set_mode(units=25.4)
elif (word == 'G21' or word == 'G71'):
self.col = "prep"
|
self.set_mode(units=1.0)
elif (word == '
|
G81' or word == 'g81'):
self.drill = True
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G82' or word == 'g82'):
self.drill = True;
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G83' or word == 'g83'):
self.drill = True
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G90' or word == 'g90'):
self.absolute()
elif (word == 'G91' or word == 'g91'):
self.incremental()
elif (word[0] == 'G') : col = "prep"
elif (word[0] == 'I' or word[0] == 'i'):
self.col = "axis"
self.i = eval(word[1:])
self.move = True
elif (word[0] == 'J' or word[0] == 'j'):
self.col = "axis"
self.j = eval(word[1:])
self.move = True
elif (word[0] == 'K' or word[0] == 'k'):
self.col = "axis"
self.k = eval(word[1:])
self.move = True
elif (word[0] == 'M') : self.col = "misc"
elif (word[0] == 'N') : self.col = "blocknum"
elif (word[0] == 'O') : self.col = "program"
elif (word[0] == 'P' or word[0] == 'p'):
if (self.no_move != True):
self.col = "axis"
self.p = eval(word[1:])
self.move = True
elif (word[0] == 'Q' or word[0] == 'q'):
if (self.no_move != True):
self.col = "axis"
self.q = eval(word[1:])
self.move = True
elif (word[0] == 'R' or word[0] == 'r'):
self.col = "axis"
self.r = eval(word[1:])
self.move = True
elif (word[0] == 'S' or word[0] == 's'):
self.col = "axis"
self.s = eval(word[1:])
self.move = True
elif (word[0] == 'T') :
self.col = "tool"
self.set_tool( eval(word[1:]) )
elif (word[0] == 'X' or word[0] == 'x'):
self.col = "axis"
self.x = eval(word[1:])
self.move = True
elif (word[0] == 'Y' or word[0] == 'y'):
self.col = "axis"
self.y = eval(word[1:])
self.move = True
elif (word[0] == 'Z' or word[0] == 'z'):
self.col = "axis"
self.z = eval(word[1:])
self.move = True
elif (word[0] == '(') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == '!') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == ';') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == '#') : self.col = "variable"
elif (word[0] == ':') : self.col = "blocknum"
elif (ord(word[0]) <= 32) : self.cdata = True
def Parse(self, name, oname=None):
self.files_open(name,oname)
#self.begin_ncblock()
#self.begin_path(None)
#self.add_line(z=500)
#self.end_path()
#self.end_ncblock()
self.path_col = None
self.f = None
self.arc = 0
while (self.readline()):
self.a = None
self.b = None
self.c = None
self.i = None
self.j = None
self.k = None
self.p = None
self.q = None
self.r = None
self.s = None
self.x = None
self.y = None
self.z = None
#self.begin_ncblock()
self.move = False
self.drill = False
self.no_move = False
words = self.pattern_main.findall(self.line)
for word in words:
self.col = None
self.cdata = False
self.ParseWord(word)
self.add_text(word, self.col, self.cdata)
if (self.drill):
self.begin_path("rapid")
self.add_line(self.x, self.y, self.r)
self.end_path()
self.begin_path("feed")
self.add_line(self.x, self.y, self.z)
self.end_path()
self.begin_path("feed")
self.add_line(self.x, self.y, self.r)
self.end_path()
else:
if (self.move and not self.no_move):
self.begin_path(self.path_col)
if (self.arc==-1):
self.add_arc(self.x, self.y, self.z, self.i, self.j, self.k, self.r, self.arc)
elif (self.arc==1):
#self.add_arc(x, y, z, i, j, k, -r, arc) #if you want to use arcs with R values uncomment the first part of this line and comment the next one
self.add_arc(self.x, self.y, self.z, self.i, self.j, self.k, self.r, self.arc)
else : self.add_line(self.x, self.y, self.z, self.a, self.b, self.c)
self.end_path()
self.end_ncblock()
|
kane-chen/headFirstPython
|
src/com/lasho/headfirst/chap4/module_import_test.py
|
Python
|
gpl-3.0
| 231
| 0.012987
|
'''
Creat
|
ed on 2014-1-21
@author: Administrator
'''
#import class/method
from athelets import get_data_filelist, get_data_in_file
james = get_data_in_file('james2.txt')
print(james.name);
print(get_data_filelist(['jame
|
s2.txt']));
|
andela-ggikera/regex
|
findall.py
|
Python
|
mit
| 700
| 0.001453
|
"""Findall regex operations in python.
findall(string[, pos[, endpos]])
Returns a list:
not like search and match which retu
|
rns objects
Otherwise, it returns an empty list.
"""
import re
# look for every word in a string
pattern = re.compile(r"\w+")
result = pattern.findall("hey bro")
print result
patt = re.compile(r"a*b")
# returns ['ab', 'ab', 'ab', 'b']
res = patt.findall("abababb")
print res
# match a group of words onto a tuple
p = re.compile(r"(\w
|
+) (\w+)")
rv = p.findall("Hello world, i lived")
print rv
# Using unicode characters
print re.findall(ur"\w+", u"这是一个例子", re.UNICODE)
# using named groups inside pattern itself
patt = re.compile(r"(?P<word>\w+) (?P=word)")
|
COIN-L4D/L4D-intranet
|
server/intranet/views.py
|
Python
|
gpl-3.0
| 1,951
| 0.003588
|
from django.shortcuts import redirect, get_object_or_404, render
from django.views.generic import TemplateView, View
from django.http import HttpResponse
from django.views.decorators.clickjacking import xframe_options_exempt
import json
from .models import Page, CurrentGame, VisiblePage
from .game import Manager
class ClosedView(TemplateView):
template_name = 'intranet/closed.html'
def dispatch(self, *args, **kwargs):
if Manager().is_started():
return redirect('home')
|
return super(ClosedView, self).dispatch(*args, **kwargs)
class IntranetBaseView(View):
""" View accesible only if a game is running """
def dispatch(self, *args, **kwargs):
if not Manager().is_started():
return redirect('closed')
return super(IntranetBaseView, self).dispatch(*args, **kwargs)
class HomeView(IntranetBaseView, TemplateView):
template_name = 'intranet/home.html'
class DeniedView(IntranetBaseView, TemplateView):
template_na
|
me = 'intranet/denied.html'
class PageView(IntranetBaseView):
""" Base view for intranet page (those used in iframe) """
def fetch_url_name(self, **kwargs):
self.url_name = kwargs['url_name']
return self.url_name
def fetch_page(self):
self.page = get_object_or_404(Page, url_name=self.url_name)
return self.page
def page_is_visible(self):
try:
VisiblePage.objects.get(page=self.page)
return True
except VisiblePage.DoesNotExist:
return False
@xframe_options_exempt
def dispatch(self, request, *args, **kwargs):
return super(PageView, self).dispatch(request,*args,**kwargs)
def get(self, request, *args, **kwargs):
self.fetch_url_name(**kwargs)
self.fetch_page()
if self.page_is_visible():
return render(request, self.page.template_file)
else:
return redirect('denied')
|
beeva-fernandocerezal/rasa_nlu
|
rasa_nlu/utils/__init__.py
|
Python
|
apache-2.0
| 3,329
| 0.003004
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import errno
from typing import List
from typing import Optional
from typing import Text
def relative_normpath(f, path):
# type: (Optional[Text], Text) -> Optional[Text]
"""Return the path of file relative to `path`."""
if f is not None:
return os.path.normpath(os.path.relpath(f, path))
else:
return None
def create_dir(dir_path):
|
# type: (Text) -> None
"""Creates a directory and its super paths. Succeeds even if the path already exists."""
try:
os.makedirs(dir_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def create_dir_for_file(file_path):
# type: (Text) -> None
"""Creates any missing parent directories of this files path."""
try:
os
|
.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def recursively_find_files(resource_name):
# type: (Optional[Text]) -> List[Text]
"""Traverse directory hierarchy to find files.
`resource_name` can be a folder or a file. In both cases we will return a list of files."""
if not resource_name:
raise ValueError("Resource name '{}' must be an existing directory or file.".format(resource_name))
elif os.path.isfile(resource_name):
return [resource_name]
elif os.path.isdir(resource_name):
resources = [] # type: List[Text]
# walk the fs tree and return a list of files
nodes_to_visit = [resource_name]
while len(nodes_to_visit) > 0:
# skip hidden files
nodes_to_visit = [f for f in nodes_to_visit if not f.split("/")[-1].startswith('.')]
current_node = nodes_to_visit[0]
# if current node is a folder, schedule its children for a visit. Else add them to the resources.
if os.path.isdir(current_node):
nodes_to_visit += [os.path.join(current_node, f) for f in os.listdir(current_node)]
else:
resources += [current_node]
nodes_to_visit = nodes_to_visit[1:]
return resources
else:
raise ValueError("Could not locate the resource '{}'.".format(os.path.abspath(resource_name)))
def lazyproperty(fn):
"""Allows to avoid recomputing a property over and over. Instead the result gets stored in a local var.
Computation of the property will happen once, on the first call of the property. All succeeding calls will use
the value stored in the private property."""
attr_name = '_lazy_' + fn.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
def list_to_str(l, delim=", ", quote="'"):
return delim.join([quote + e + quote for e in l])
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
|
xuy/readinglists
|
md_gen/parse.py
|
Python
|
mit
| 1,942
| 0.009269
|
import re
import string
import sys
sys.path.append('/Users/exu/PlayGround/readinglists/')
from key.keys import *
from amazon.api import AmazonAPI
from html2text import html2text
pattern = re.compile("https?://.*amazon.com/gp/product/([0-9]+)/.*")
amazon = AmazonAPI(AMAZON_ACCESS_KEY_ID, AMAZON_SECRET_ACCESS_KEY, AMAZON_ASSOC_TAG, MaxQPS=0.9)
def uprint(s):
print s.encode('utf-8')
def get_asin(url):
global pattern
m = pattern.match(url)
if m and len(m.groups()) > 0:
return m.groups()[0]
def read_file():
if (len(sys.argv) < 1):
print "Please provide a file that includes a list of Amazon links."
sys.exit(-1)
fname = sys.argv[1]
f = open(fname, 'r')
products = []
for l in f.readlines():
product = amazon.lookup(ItemId=get_asin(l))
products.append([product.title, product.editorial_review, product.large_image_url, product.offer_url])
print "Got product", product.title
return products
rtitle = re.compile('(.*)(\(.*\))')
def normalize_title(title):
""" Book titles are long. We crop out the last part that is in (part)"""
splits = re.findall(rtitle, title)
if splits:
new_title = splits[0][0]
else:
new_
|
title = title
return new_title
def sanitize_text(t):
s = html2text(t)
s = string.replace(s, "'", "’")
s = string.replace(s, "**", "*")
return s
if __name__ == '__main__':
import os.path
import cPickle
pickle_file = 'products.pickle'
products = None
if os.path.isfile(pickle_file):
products = cPickle.load(open(pickle_file, 'r'))
|
else:
products = read_file()
f = open(pickle_file, "wb")
cPickle.dump(products, f)
for product in products:
title = normalize_title(product[0])
uprint(title)
print '=' * len(title)
review = sanitize_text(product[1])
uprint(review)
print
|
uclouvain/osis_louvain
|
base/forms/education_group_admission.py
|
Python
|
agpl-3.0
| 2,239
| 0.000894
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#
|
GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the
|
source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from ckeditor.fields import RichTextFormField
from django import forms
from base.models.admission_condition import CONDITION_ADMISSION_ACCESSES
class UpdateLineForm(forms.Form):
admission_condition_line = forms.IntegerField(widget=forms.HiddenInput())
section = forms.CharField(widget=forms.HiddenInput())
language = forms.CharField(widget=forms.HiddenInput())
diploma = forms.CharField(widget=forms.Textarea, required=False)
conditions = forms.CharField(widget=forms.Textarea, required=False)
access = forms.ChoiceField(choices=CONDITION_ADMISSION_ACCESSES, required=False)
remarks = forms.CharField(widget=forms.Textarea, required=False)
class UpdateTextForm(forms.Form):
PARAMETERS_FOR_RICH_TEXT = dict(required=False, config_name='minimal')
text_fr = RichTextFormField(**PARAMETERS_FOR_RICH_TEXT)
text_en = RichTextFormField(**PARAMETERS_FOR_RICH_TEXT)
section = forms.CharField(widget=forms.HiddenInput())
|
anryko/ansible
|
lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_info.py
|
Python
|
gpl-3.0
| 4,728
| 0.00423
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group_info
short_description: Get information about log_group in CloudWatchLogs
description:
- Lists the specified log groups. You can list all your log groups or filter the results by prefix.
- This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change.
version_added: "2.5"
author:
- Willian Ricardo (@willricardo) <willricardo@gmail.com>
requirements: [ botocore, boto3 ]
options:
log_group_name:
description:
- The name or prefix of the log group to filter by.
type: str
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- cloudwatchlogs_log_group_info:
log_group_name: test-log-group
'''
RETURN = '''
log_groups:
description: Return the list of complex objects representing log groups
returned: success
type: complex
contains:
log_group_name:
description: The name of the log group.
returned: always
type: str
creation_time:
description: The creation time of the log group.
returned: always
type: int
retention_in_days:
description: The number of days to retain the log events in the specified log group.
returned: always
type: int
metric_filter_count:
description: The number of metric filters.
returned: always
type: int
arn:
description: The Amazon Resource Name (ARN) of the log group.
returned: always
type: str
stored_bytes:
description: The number of bytes stored.
returned: always
type: str
kms_key_id:
description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
returned: always
type: str
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def describe_log_group(client, log_group_name, module):
params = {}
if log_group_name:
params['logGroupNamePrefix'] = log_group_name
try:
paginator = client.get_paginator('describe_log_groups')
desc_log_group = paginator.paginate(**params).build_full_result()
return desc_log_group
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
log_group_name=dict(),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'cloudwatchlogs_log_group_facts':
module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
re
|
gion, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
desc_log_group = describe_log_group(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
final_log_group_snake = []
for log_group in desc_log_group['logGroups']:
final_log_group_snake.append(camel_dict_to_snake_dict(log_group))
desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
module.exit_json(**desc_log_group_result)
if __name__ == '__main__':
main()
|
Nowis75/crazyflie-pc-client-leapmotion
|
lib/leapmotion/__init__.py
|
Python
|
gpl-2.0
| 1,076
| 0.000929
|
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __
|
/ / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
|
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
rjleveque/tsunami_benchmarks
|
nthmp_currents_2015/problem2/maketopo.py
|
Python
|
bsd-3-clause
| 2,646
| 0.027967
|
"""
Module to create topo and qinit data files for this example.
"""
from clawpack.geoclaw import topotools
from pylab import *
def maketopo_hilo():
x = loadtxt('x.txt')
y = loadtxt('y.txt')
z = loadtxt('z.txt')
# modify x and y so that cell size is truly uniform:
dx = 1. / (3.*3600.) # 1/3"
xx = linspace(x[0], x[-1], len(x))
yy = linspace(y[-1], y[0], len(y))
zz = flipud(z)
topo = topotools.Topography()
topo.x = xx
topo.y = yy
topo.Z = zz
topo.write('hilo_flattened.tt2',topo_type=2)
def maketopo_flat():
"""
Output topography file for the entire domain
"""
nxpoints = 201
nypoints = 301
xlower = 204.812
xupper = 205.012
ylower = 19.7
yupper = 20.0
outfile= "flat.tt2"
topotools.topo2writer(outfile,topo_flat,xlower,xupper,ylower,yupper,nxpoints,nypoints)
def topo_flat(x,y):
"""
flat
"""
z = where(x < 204.91213, 30., -30.)
return z
def plot_topo_big():
figure(figsize=(8,12))
topo1 = topotools.Topography()
topo1.read('flat.tt2',2)
contourf(topo1.x,topo1.y,topo1.Z,linspace(-30,20,51), extend='both')
topo2 = topotools.Topography()
topo2.read('hilo_flattened.tt2',2)
contourf(topo2.x,topo2.y,topo2.Z,linspace(-30,20,51), extend='both')
x1 = 204.90028
x2 = 204.96509
y1 = 19.71
y2 = 19.95
plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1],'w')
axis('scaled')
colorbar()
def plot_topo():
figure(figsi
|
ze=(12,8))
topo1 = topotools.Topography()
topo1.read('flat.tt2',2)
contourf(topo1.x,topo1.y,topo1.Z,linspace(-30,20,51), extend='both')
topo2 = topotools.Topography()
topo2.read('hilo_flattened.tt2',
|
2)
contourf(topo2.x,topo2.y,topo2.Z,linspace(-30,20,51), extend='both')
colorbar()
x1 = 204.9
x2 = 204.955
y1 = 19.715
y2 = 19.755
axis([x1,x2,y1,y2])
gca().set_aspect(1./cos(y1*pi/180.))
ticklabel_format(format='plain',useOffset=False)
contour(topo2.x,topo2.y,topo2.Z,[0.],colors='k')
plot([204.9447],[19.7308], 'ko') # from BM description
plot([204.9437],[19.7307], 'ro') # closer to pier
# from <http://tidesandcurrents.noaa.gov/stationhome.html?id=1617760>
# location is listed as: 19 degrees 43.8' N, 155 degrees, 3.3' W
xg = 360 - (155 + 3.3/60.)
yg = 19 + 43.8/60.
plot([xg],[yg], 'bo')
#gauges.append([1125, 204.91802, 19.74517, 0., 1.e9]) #Hilo
#gauges.append([1126, 204.93003, 19.74167, 0., 1.e9]) #Hilo
#gauges.append([3333, 204.93, 19.7576, 0., 1.e9])
if __name__=='__main__':
maketopo_hilo()
maketopo_flat()
|
dlazz/ansible
|
lib/ansible/module_utils/network/aci/msc.py
|
Python
|
gpl-3.0
| 13,894
| 0.002159
|
# -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
# Copyright: (c) 2018, Dag Wieers <dag@wieers.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule, json
from ansible.module_utils.six.moves.urllib.parse import urlencode, urljoin
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native, to_bytes
def issubset(subset, superset):
''' Recurse through nested dictionary
|
and comp
|
are entries '''
# Both objects are the same object
if subset is superset:
return True
# Both objects are identical
if subset == superset:
return True
# Both objects have a different type
if type(subset) != type(superset):
return False
for key, value in subset.items():
# Item from subset is missing from superset
if key not in superset:
return False
# Item has different types in subset and superset
if type(superset[key]) != type(value):
return False
# Compare if item values are subset
if isinstance(value, dict):
if not issubset(superset[key], value):
return False
elif isinstance(value, list):
if not set(value) <= set(superset[key]):
return False
elif isinstance(value, set):
if not value <= superset[key]:
return False
else:
if not value == superset[key]:
return False
return True
def update_qs(params):
''' Append key-value pairs to self.filter_string '''
accepted_params = dict((k, v) for (k, v) in params.items() if v is not None)
return '?' + urlencode(accepted_params)
def msc_argument_spec():
return dict(
host=dict(type='str', required=True, aliases=['hostname']),
port=dict(type='int', required=False),
username=dict(type='str', default='admin'),
password=dict(type='str', required=True, no_log=True),
output_level=dict(type='str', default='normal', choices=['normal', 'info', 'debug']),
timeout=dict(type='int', default=30),
use_proxy=dict(type='bool', default=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
)
class MSCModule(object):
def __init__(self, module):
self.module = module
self.params = module.params
self.result = dict(changed=False)
self.headers = {'Content-Type': 'text/json'}
# normal output
self.existing = dict()
# info output
self.previous = dict()
self.proposed = dict()
self.sent = dict()
# debug output
self.filter_string = ''
self.method = None
self.path = None
self.response = None
self.status = None
self.url = None
# Ensure protocol is set
self.params['protocol'] = 'https' if self.params.get('use_ssl', True) else 'http'
# Set base_uri
if 'port' in self.params and self.params['port'] is not None:
self.baseuri = '{protocol}://{host}:{port}/api/v1/'.format(**self.params)
else:
self.baseuri = '{protocol}://{host}/api/v1/'.format(**self.params)
if self.module._debug:
self.module.warn('Enable debug output because ANSIBLE_DEBUG was set.')
self.params['output_level'] = 'debug'
if self.params['password']:
# Perform password-based authentication, log on using password
self.login()
else:
self.module.fail_json(msg="Parameter 'password' is required for authentication")
def login(self):
''' Log in to MSC '''
# Perform login request
self.url = urljoin(self.baseuri, 'auth/login')
payload = {'username': self.params['username'], 'password': self.params['password']}
resp, auth = fetch_url(self.module,
self.url,
data=json.dumps(payload),
method='POST',
headers=self.headers,
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
# Handle MSC response
if auth['status'] != 201:
self.response = auth['msg']
self.status = auth['status']
self.fail_json(msg='Authentication failed: {msg}'.format(**auth))
payload = json.loads(resp.read())
self.headers['Authorization'] = 'Bearer {token}'.format(**payload)
def request(self, path, method=None, data=None, qs=None):
''' Generic HTTP method for MSC requests. '''
self.path = path
if method is not None:
self.method = method
self.url = urljoin(self.baseuri, path)
if qs is not None:
self.url = self.url + update_qs(qs)
resp, info = fetch_url(self.module,
self.url,
headers=self.headers,
data=json.dumps(data),
method=self.method,
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'],
)
self.response = info['msg']
self.status = info['status']
# 200: OK, 201: Created, 202: Accepted, 204: No Content
if self.status in (200, 201, 202, 204):
output = resp.read()
# if self.method in ('DELETE', 'PATCH', 'POST', 'PUT') and self.status in (200, 201, 204):
# self.result['changed'] = True
if output:
return json.loads(output)
# 404: Not Found
elif self.method == 'DELETE' and self.status == 404:
return {}
# 400: Bad Request, 401: Unauthorized, 403: Forbidden,
# 405: Method Not Allowed, 406: Not Acceptable
# 500: Internal Server Error, 501: Not Implemented
elif self.status >= 400:
try:
payload = json.loads(resp.read())
except Exception:
payload = json.loads(info['body'])
if 'code' in payload:
self.fail_json(msg='MSC Error {code}: {message}'.format(**payload), data=data, info=info, payload=payload)
else:
self.fail_json(msg='MSC Error:'.format(**payload), data=data, info=info, payload=payload)
return {}
d
|
relic7/prodimages
|
python/regex_matcherator_naturectr.py
|
Python
|
mit
| 1,999
| 0.010505
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
patterns = [r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bc_jpg_makerDrop/(crop_fullsize_pad_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_center)/?.*?/(\d{9}(.*?))\.(.*?)$',
r'^.*?/bfly_jpg_makerDrop/(crop_fullsize_anchor)/?.*?/(\d{9}(.*?))\.(.*?)$']*10
strings = ["/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470409.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470408_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_anchor/346470407_alt01.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bc_jpg_makerDrop/crop_fullsize_pad_center/346470406_1.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/346880405.png",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_center/346470404_1.jpg",
"/mnt/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_center/346470403.png",
"/mn
|
t/Post_Complete/Complete_to_Load/nature_center/bfly_jpg_makerDrop/crop_fullsize_anchor/336470402.jpg"]*10
def matches_pattern(str, patterns):
for pattern in patterns:
if pattern.match(str):
return pattern.match(str), pattern
return False
def regex_matcherator(strings,patterns):
import re
compiled_patterns = list(map(re.com
|
pile, patterns))
for s in strings:
if matches_pattern(s, compiled_patterns):
print matches_pattern(s, compiled_patterns)[1].pattern
print '--'.join(s.split('/')[-2:])
print matches_pattern(s, compiled_patterns)[0].groups()
print '\n'
r = regex_matcherator(strings,patterns)
#print r.next()
|
miyakogi/m2r
|
dodo.py
|
Python
|
mit
| 448
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Doit task definitions."""
DOIT_CONFIG = {
'default_tasks': [
'flake8',
'docs',
],
'continue': True,
'verbosity': 1,
'num_process'
|
: 2,
'par_type': 'thread',
}
def task_flake8():
return {
'actions': ['flake8 m2r tests'],
}
def task_docs():
return {
'actions': ['sphinx-build -q -W -E -n -b html docs docs/_build/html'],
}
| |
ZettaIO/pswingw2py
|
pswingw2/__init__.py
|
Python
|
mit
| 334
| 0
|
"""Convenient imports"""
from pswingw2.client import send_simple_message # noqa
from pswingw2.client import send # n
|
oqa
from pswingw2.client import send_single # noqa
from pswingw2.client import send_batch # noqa
from pswingw2.client import Client # noqa
from pswingw2.config_defaults import get_sim
|
ple_config as config # noqa
|
muneebalam/scrapenhl2
|
docs/source/conf.py
|
Python
|
mit
| 4,961
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# scrapenhl2 documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 1 17:47:07 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys
sys.path.insert(0, '../../')
sys.path.insert(0, '../')
sys.path.insert(0, './')
sys.path.insert(0, '../../scrapenhl2/')
sys.path.insert(0, '../../scrapenhl2/scrape/')
sys.path.insert(0, '../../scrapenhl2/manipulate/')
sys.path.insert(0, '../../scrapenhl2/plot/')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'scrapenhl2'
copyright = '2017, Muneeb Alam'
author = 'Muneeb Alam'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# Th
|
e name of the Pygm
|
ents (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scrapenhl2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scrapenhl2.tex', 'scrapenhl2 Documentation',
'Muneeb Alam', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scrapenhl2', 'scrapenhl2 Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scrapenhl2', 'scrapenhl2 Documentation',
author, 'scrapenhl2', 'One line description of project.',
'Miscellaneous'),
]
|
YannChemin/wxGIPE
|
proc_modis_qc.py
|
Python
|
unlicense
| 23,032
| 0.039293
|
###############################################################################
# $Id$
#
# Project: Sub1 project of IRRI
# Purpose: Quality Assessment extraction from MODIS
# Author: Yann Chemin, <yann.chemin@gmail.com>
#
###############################################################################
# Copyright (c) 2008, Yann Chemin <yann.chemin@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
#!/usr/bin/python
import wx
import wx.lib.filebrowsebutton as filebrowse
import os
# For Image Processing
import numpy
|
as N
from osgeo import gdalnumeric
from osgeo import gda
|
l
from osgeo import gdal_array
from osgeo.gdalconst import *
# For icons, pngs, etc coming from images.py
from wx import ImageFromStream, BitmapFromImage, EmptyIcon
import cStringIO
import images
# Define satellite bands
# Based on Landsat channels
qc = ''
# Define output file name
output = ''
# Define list of MODIS types
NameMOD = ['250','500']
# Define list of QA types
NameQC = ['modland_qa_bits','cloud','data_quality','atcorr','adjcorr','diff_orbit_from_500m']
# Define band number
bandno = ['1','2','3','4','5','6','7']
# Define Info Message
overview = """MODIS Quality Assessment Extractor
Makes Human-readable images of Quality Assessment binary bits from MOD09 products.
500m does not have "cloud" and "diff_orbit_from_500m" options.
# MODLAND QA Bits 250m Unsigned Int bits[0-1]
#00 -> class 0: Corrected product produced at ideal quality -- all bands
#01 -> class 1: Corrected product produced at less than idel quality -- some or all bands
#10 -> class 2: Corrected product NOT produced due to cloud effect -- all bands
#11 -> class 3: Corrected product NOT produced due to other reasons -- some or all bands maybe fill value (Note that a value of [11] overrides a value of [01])
# Cloud State 250m Unsigned Int bits[2-3]
#00 -> class 0: Clear -- No clouds
#01 -> class 1: Cloudy
#10 -> class 2: Mixed
#11 -> class 3: Not Set ; Assumed Clear
# Band-wise Data Quality 250m Unsigned Int bits[4-7][8-11]
# Band-wise Data Quality 500m long Int bits[2-5][6-9][10-13][14-17][18-21][22-25][26-29]
#0000 -> class 0: highest quality
#0111 -> class 1: noisy detector
#1000 -> class 2: dead detector; data interpolated in L1B
#1001 -> class 3: solar zenith >= 86 degrees
#1010 -> class 4: solar zenith >= 85 and < 86 degrees
#1011 -> class 5: missing input
#1100 -> class 6: internal constant used in place of climatological data for at least one atmospheric constant
#1101 -> class 7: correction out of bounds, pixel constrained to extreme allowable value
#1110 -> class 8: L1B data faulty
#1111 -> class 9: not processed due to deep ocean or cloud
#Class 10-15: Combination of bits unused
# Atmospheric correction 250m Unsigned Int bit[12]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
# Adjacency correction 250m Unsigned Int bit[13]
#0 -> class 0: Not Corrected product
#1 -> class 1: Corrected product
# Different orbit from 500m product, 250m Unsigned Int bit[14]
#0 -> class 0: same orbit as 500m
#1 -> class 1: different orbit from 500m
"""
class MyFrame(wx.Frame):
def __init__(self,parent, id=-1, title='MODIS Quality Bits Extractor',
pos=(0,0),
size=(400,650),
style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
ico = images.getPngGipeIcon()
self.SetIcon(ico)
self.lognull = wx.LogNull()
# Input Filenames
self.qc = qc
self.qc_type = 'modland_qa_bits'
self.pixelres = '250'
self.band_no = '1'
self.NameMOD = NameMOD
self.NameQC = NameQC
self.bandno = bandno
self.output = output
# Construct Interface
self.make_text()
self.make_buttons()
self.make_radiobuttons1()
self.make_radiobuttons2()
self.make_radiobuttons3()
self.make_fb()
self.mbox = wx.BoxSizer(wx.VERTICAL)
self.mbox.Add((10,10))
self.mbox.Add(self.text, 1, wx.EXPAND|wx.CENTER, 10)
self.mbox.Add(self.cc2, 1, wx.EXPAND, 0)
self.mbox.Add(self.cc6, 1, wx.EXPAND, 0)
self.mbox.Add(self.rbox1, 1, wx.CENTER, 0)
self.mbox.Add(self.rbox2, 1, wx.CENTER, 0)
self.mbox.Add(self.rbox3, 1, wx.CENTER, 0)
self.mbox.Add((10,10))
self.mbox.Add((50,10))
self.mbox.Add(self.bbox, 1, wx.CENTER, 10)
self.mbox.Add((10,10))
self.SetSizer(self.mbox)
self.bindEvents()
# Process Equations, Handling and saving of output
def OnOK(self,event):
#print "qc: ", self.qc
#print "out:", self.output
if(self.qc==''):
self.OnFileInError()
else:
self.qcF = gdal.Open(self.qc)
self.bqc = self.qcF.GetRasterBand(1)
self.test = gdal.Open(self.qc)
self.CrAr( self.qc, self.output, 'GTiff' )
self.result = gdal.Open(self.output, GA_Update)
for self.y in range(self.bqc.YSize - 1, -1, -1):
print self.y
self.scanline1=self.bqc.ReadAsArray(0, self.y, self.bqc.XSize, 1, self.bqc.XSize, 1)
for self.x in range(0, self.bqc.XSize - 1, 1):
self.pix1 = self.scanline1[0][self.x]
self.scanline1[0][self.x]=self.qcbits(self.pix1,self.qc_type,int(self.pixelres),int(self.band_no))
self.result.GetRasterBand(1).WriteArray(N.reshape(self.scanline1,(1,self.bqc.XSize)), 0, self.y)
self.Destroy()
#def bin(self,i):
#"""
#Convert Binary to Integer Bit Field
#Manish Jethani (manish.j at gmx.net)
#http://bytes.com/forum/thread20381.html
#"""
#b = ''
#while i > 0:
#j = i & 1
#b = str(j) + b
#i >>= 1
#return b
def qcbits(self,qcbit,qcflag,pixres,bandno):
outclas = 0
#calculate modland QA bits extraction
if (qcflag=="modland_qa_bits"):
if (pixres==500):
# 500m product
outclas = self.qc500a(qcbit)
else:
# 250m product
outclas = self.qc250a(qcbit)
#calculate cloud state
elif (qcflag=="cloud"):
if (pixres==500):
# 500m product
# Signal user that the flag name is badly written
# therefore not understood by the application
print "flag name unavailable for 500m, please restart"
self.OnQCInError()
else:
# ONLY 250m product!
outclas = self.qc250b(qcbit)
#calculate modland QA bits extraction
elif (qcflag=="data_quality"):
if (pixres==500):
# 500m product
outclas = self.qc500c(qcbit, bandno)
else:
# 250m product
outclas = self.qc250c(qcbit, bandno)
#calculate atmospheric correction flag
elif (qcflag=="atcorr"):
if (pixres==500):
# 500m product
outclas = self.qc500d(qcbit)
else:
# 250m product
outclas = self.qc250d(qcbit)
#calculate adjacency correction flag
elif (qcflag=="adjcorr"):
if (pixres==500):
# 500m product
outclas = self.qc500e(qcbit)
else:
# 250m product
outclas = self.qc250e(qcbit)
#calculate different orbit from 500m flag
elif (qcflag=="diff_orbit_from_500m"):
if (pixres==500):
# 500m product
# Signal user that the flag name is badly written
# therefore not understood by the application
print "flag name unavailable for 500m, please restart"
self.OnQCInError()
else:
# ONLY 250m product!
outclas = self.qc250f(qcbit)
else:
# Signal user that the flag name is badly written
|
uclouvain/OSIS-Louvain
|
program_management/forms/custom_xls.py
|
Python
|
agpl-3.0
| 3,162
| 0.002847
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class CustomXlsForm(forms.Form):
required_entity = forms.BooleanField(required=False, label=_('Requirement entity'))
allocation_entity = forms.BooleanField(required=False, label=_('Attribution entity'))
credits = forms.BooleanField(required=False, label=_('Credits'))
periodicity = forms.BooleanField(required=False, label=_('Periodicity'))
active = forms.BooleanField(req
|
uired=False, label=_('Active'))
quadrimester = forms.BooleanField(required=False, label=_('Quadrimester'))
session_derogation = forms.BooleanField(required=False, label=_('Session derogation'))
volume = forms.BooleanField(required=False, labe
|
l=_('Volume'))
teacher_list = forms.BooleanField(required=False, label=_('Tutors (scores responsibles included)'))
proposition = forms.BooleanField(required=False, label=_('Proposals'))
english_title = forms.BooleanField(required=False, label=_('Title in English'))
language = forms.BooleanField(required=False, label=_('Language'))
specifications = forms.BooleanField(required=False, label=_('Specifications'))
description_fiche = forms.BooleanField(required=False, label=_('Description fiche'))
force_majeure = forms.BooleanField(required=False, label=_('Description fiche (including force majeure)'))
def __init__(self, *args, year: int = None, code: str = None, **kwargs):
super().__init__(*args, **kwargs)
self.url_action = reverse('education_group_learning_units_contains',
kwargs={'year': year, 'code': code})
def get_optional_data(self):
data = []
if self.is_valid():
for field in self.fields:
if self.cleaned_data[field]:
data.append(field)
return data
|
global-humanitarians-unite/ghu
|
ghu_web/ghu_main/models.py
|
Python
|
apache-2.0
| 3,814
| 0.00236
|
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
from ordered_model.models import OrderedModel
from django.contrib.auth.models import User, Group
from django.conf import settings
# Useful for attempting full-text search on fields
class SearchManager(models.Manager):
def __init__(self, search_fields):
super().__init__()
self.search_fields = search_fields
def search(self, terms):
# Return everything for an empty search
if not terms.strip():
return self.all()
# Currently, field__search='foo' (full text search) is supported
# only on postgres, but fake it on other backends
if settings.HAS_FULL_TEXT_SEARCH:
suffix = '__search'
else:
suffix = '__icontains'
query = None
for search_field in self.search_fields:
q = Q(**{search_field + suffix: terms})
if query is None:
query = q
else:
query = query | q
return self.filter(query)
class NavbarEntry(OrderedModel):
URL_CHOICES = (('ghu_main:toolkits', 'Toolkits listing'),
('ghu_main:organizations', 'Organizations'))
label = models.CharField(max_length=256)
page = models.ForeignKey('Page', on_delete=models.CASCADE, null=True,
blank=True)
url = models.CharField(max_length=256, verbose_name='Special page',
choices=URL_CHOICES, blank=True)
class Meta(OrderedModel.Meta):
verbose_name = 'Navigation bar entry'
verbose_name_plural = 'Navigation bar entries'
def __str__(self):
return '{}, {}, {}'.format(self.label, self.order, self.page)
def clean(self):
if (not self.page and not self.url) or (self.page and self.url):
raise ValidationError('Must specify either a Page or Special '
'page, but not both')
class Page(models.Model):
slug = models.SlugField(blank=True, unique=True)
title = models.CharField(max_length=256)
contents = models.TextField()
template = models.ForeignKey('PageTemplate', null=True, blank=True)
def __str__(self):
return 'Page "{}": /{}/'.f
|
ormat(self.title, self.slug)
class PageTemplate(models.Model):
|
name = models.CharField(max_length=256, verbose_name='User-friendly title')
template = models.CharField(max_length=256, verbose_name='Template to execute')
def __str__(self):
return '{} ({})'.format(self.name, self.template)
class Toolkit(models.Model):
slug = models.SlugField(unique=True)
title = models.CharField(max_length=256)
summary = models.TextField()
def __str__(self):
return 'Toolkit: {}'.format(self.title)
class ToolkitPage(OrderedModel):
toolkit = models.ForeignKey(Toolkit, related_name='pages')
slug = models.SlugField()
title = models.CharField(max_length=256)
contents = models.TextField()
order_with_respect_to = 'toolkit'
class Meta(OrderedModel.Meta):
unique_together = (('toolkit', 'slug'),)
def __str__(self):
return '{}. Order: {}'.format(self.toolkit, self.order)
class OrgProfile(models.Model):
slug = models.SlugField(blank=True, unique=True)
name = models.CharField(max_length=256)
email = models.EmailField(max_length=254)
location = models.CharField(max_length=256, null=True)
phone = models.CharField(max_length=256)
summary = models.CharField(max_length=256, null=True)
description = models.TextField()
objects = SearchManager(('name', 'summary', 'description'))
def __str__(self):
return 'OrgProfile: {}, slug: {}'.format(self.name, self.slug)
|
Motwani/firefox-ui-tests
|
.travis/create_venv.py
|
Python
|
mpl-2.0
| 3,764
| 0.000797
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
The script can be used to setup a virtual environment for running Firefox UI Tests.
It will automatically install the firefox ui test package, all its dependencies,
and optional packages if specified.
"""
import argparse
import os
import shutil
import subprocess
import sys
import urllib2
import zipfile
# Link to the folder, which contains the zip archives of virtualenv
VIRTUALENV_URL = 'https://github.com/pypa/virtualenv/archive/%(VERSION)s.zip'
VIRTUALENV_VERSION = '12.1.1'
here = os.path.dirname(os.path.abspath(__file__))
venv_script_path = 'Scripts' if sys.platform == 'win32' else 'bin'
venv_activate = os.path.join(venv_script_path, 'activate')
venv_activate_this = os.path.join(venv_script_path, 'activate_this.py')
venv_python_bin = os.path.join(venv_script_path, 'python')
usage_message = """
***********************************************************************
To run the Firefox UI Tests, activate the virtual environment:
{}{}
See firefox-ui-tests --help for all options
***********************************************************************
"""
def download(url, target):
"""Downloads the specified url to the given target."""
response = urllib2.urlopen(url)
with open(target, 'wb') as f:
f.write(response.read())
return target
def create_virtualenv(target, python_bin=None):
script_path = os.path.join(here, 'virtualenv-%s' % VIRTUALENV_VERSION,
'virtualenv.py')
print 'Downloading virtualenv %s' % VIRTUALENV_VERSION
|
zip_path = download(VIRTUALENV_URL % {'VERSION': VIRTUALENV_VERSION},
os.path.join(here, 'virtualenv.zip'))
try:
with zipfile.ZipFile(zip_path, 'r') as f:
f.extractall(here)
print 'Creating new virtual environment'
cmd_args = [sys.executable, script_path, target]
if python_bin:
cmd_args.extend(['-p', python_bin])
subprocess.check_call(cmd_args)
finally:
try:
os.remove(zip_pa
|
th)
except OSError:
pass
shutil.rmtree(os.path.dirname(script_path), ignore_errors=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python',
dest='python',
metavar='BINARY',
help='The Python interpreter to use.')
parser.add_argument('venv',
metavar='PATH',
help='Path to the environment to be created.')
args = parser.parse_args()
# Remove an already existent virtual environment
if os.path.exists(args.venv):
print 'Removing already existent virtual environment at: %s' % args.venv
shutil.rmtree(args.venv, True)
create_virtualenv(args.venv, python_bin=args.python)
# Activate the environment
venv = os.path.join(args.venv, venv_activate_this)
execfile(venv, dict(__file__=venv))
# Install Firefox UI tests, dependencies and optional packages
command = ['pip', 'install',
'-r', 'requirements.txt',
'-r', 'requirements_optional.txt',
]
print 'Installing Firefox UI Tests and dependencies...'
print 'Command: %s' % command
subprocess.check_call(command, cwd=os.path.dirname(here))
# Print the user instructions
print usage_message.format('' if sys.platform == 'win32' else 'source ',
os.path.join(args.venv, venv_activate))
if __name__ == "__main__":
main()
|
GreedyOsori/Chat
|
jaeeun/server.py
|
Python
|
mit
| 3,583
| 0.00653
|
# -*- coding:utf-8 -*-
from socket import socket
import threading
import json
# id : [사용자 이름]
# action : [create | join | send_msg | broadcast | out ]
# action_value : [action에 따른 수행 값]
class Server :
def __init__(self):
self.server_sock = socket()
self.clients = []
self.rooms = {} #{ room : [clients] }
def __client_th__(self, client_sock):
while True :
data = client_sock.recv()
protocol = json.loads(data)
#json 유효성 검사를 해야할듯
id = protocol['id']
action = protocol['action']
value = protocol['action_value']
response = {'id': id,
'action': '',
'action_value': ''}
if action == 'create':
response['action'] = 'resp'
if value not in self.rooms:
self.rooms[value] = [client_sock]
client_sock.room = value
response['action_value'] = 'OK'
else:
response['action_value'] = 'ERR'
client_sock.send(json.dumps(response))
elif action == 'join':
response['action'] = 'resp'
if value in self.rooms:
self.rooms[value].append(client_sock)
client_sock.room = value
response['action_value'] = 'OK'
else:
response['action_value'] = 'ERR'
client_sock.send(json.dumps(response))
elif action == 'send_msg':
response['action'] = action
response['action_value'] = value
msg = json.dumps(response)
if hasattr(client_sock, 'room') :
for client in self.rooms :
if client != client_sock :
client.send(msg)
else: #client가 join|craete 후에만 하면 이럴일 없지
pass #잘못된 프로토콜이라는 리스폰을 줄 필요있을까? 프로그래밍 잘못하면 에러가 나지만, 사용자의 반응에 의해 이런 예외가 발생할 일은 없다.
elif action == 'broadcast':
response['action'] = action
response['action_value'] = v
|
alue
msg = json.dumps(response)
for client in self.clients:
if client != client_sock :
client.send(msg)
elif action == 'exit':
if hasattr(client_sock, 'room'):
self.room
|
s[client_sock.room].remove(client_sock)
client_sock.close()
elif action == 'out' : #방장이 나가면 방장위임문제도 생기네~~
pass
else :
pass # 잘못된 protocol
def run(self, ip, port, backlog=10):
self.server_sock.bind((ip, port))
self.server_sock.listen(backlog)
while True:
client = self.server_sock.accept()
clients.append(client)
threading.Thread(target=self.__client_th__, args=client[0]).start()
HOST = ''
PORT = 8000
clients = [] #socket list
s = socket()
s.bind((HOST, PORT))
s.listen(10)
while True :
client_socket = s.accept()
client_name = client_socket[0].recv(1024) # reccive name
clients.append(client_socket[0])
threading.Thread(target=client_th, args=(client_socket[0], client_name)).start()
|
kotejante/light-distribution-platform
|
libs/couchdb/tools/replication_helper_test.py
|
Python
|
lgpl-2.1
| 1,676
| 0.00179
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Jan lehnardt <jan@apache.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Simple functional test for the replication notification trigger"""
import time
from couchdb import client
def set_up_database(server, database):
"""Deletes and creates a `database` on a `server`"""
if database in server:
del server[database]
return server.create(database)
def run_tests():
"""Inserts a doc into database a, waits and tries to read it back from
database b
|
"""
# set things up
database = 'replication_notification_test'
server_a = client.Server('http://localhost:5984')
server_b = client.Server('http://localhost:5985')
# server_c = client.Server('http://localhost:5986')
db_a = set_up_database(server_a, database)
db_b = set_up_database(server_b, database)
# db_c = set_up_database(server_c, database)
doc = {'jan':'cool'}
docId = 'testdoc'
#
|
add doc to node a
print 'Inserting document in to database "a"'
db_a[docId] = doc
# wait a bit. Adjust depending on your --wait-threshold setting
time.sleep(5)
# read doc from node b and compare to a
try:
db_b[docId] == db_a[docId] # == db_c[docId]
print 'SUCCESS at reading it back from database "b"'
except client.ResourceNotFound:
print 'FAILURE at reading it back from database "b"'
def main():
print 'Running functional replication test...'
run_tests()
print 'Done.'
if __name__ == '__main__':
main()
|
strummerTFIU/TFG-IsometricMaps
|
LAStools/ArcGIS_toolbox/scripts_production/lastilePro.py
|
Python
|
mit
| 4,971
| 0.009254
|
#
# lastilePro.py
#
# (c) 2013, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# uses lastile.exe to compute a tiling for a folder
# worth of LiDAR files with a user-specified tile
# size (and an optional buffer)
#
# LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM
# LiDAR output: LAS/LAZ/BIN/TXT
#
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lastile production ...")
### get
|
number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work:
|
" + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lastile executable
lastile_path = lastools_path+"\\lastile.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lastile.exe at " + lastile_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastile_path + " ...")
### create the command string for lastile.exe
command = ['"'+lastile_path+'"']
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### counting up the arguments
c = 1
### add input LiDAR
wildcards = sys.argv[c+1].split()
for wildcard in wildcards:
command.append("-i")
command.append('"' + sys.argv[c] + "\\" + wildcard + '"')
c = c + 2
### maybe the input files are flightlines
if sys.argv[c] == "true":
command.append("-files_are_flightlines")
c = c + 1
### maybe use a user-defined tile size
if sys.argv[c] != "1000":
command.append("-tile_size")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe create a buffer around the tiles
if sys.argv[c] != "0":
command.append("-buffer")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### maybe the output will be over 2000 tiles
if sys.argv[c] == "true":
command.append("-extra_pass")
c = c + 1
### maybe an output format was selected
if sys.argv[c] != "#":
if sys.argv[c] == "las":
command.append("-olas")
elif sys.argv[c] == "laz":
command.append("-olaz")
elif sys.argv[c] == "bin":
command.append("-obin")
elif sys.argv[c] == "txt":
command.append("-otxt")
elif sys.argv[c] == "xyzi":
command.append("-otxt")
command.append("-oparse")
command.append("xyzi")
elif sys.argv[c] == "txyzi":
command.append("-otxt")
command.append("-oparse")
command.append("txyzi")
c = c + 1
### maybe an output file name was selected
if sys.argv[c] != "#":
command.append("-o")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output directory was selected
if sys.argv[c] != "#":
command.append("-odir")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe there are additional input options
if sys.argv[c] != "#":
additional_options = sys.argv[c].split()
for option in additional_options:
command.append(option)
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lastile
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lastile failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lastile done.")
|
birdchan/project_euler
|
problems/022/run.py
|
Python
|
mit
| 762
| 0.023622
|
import csv
def get_name_score(name):
score = 0
for ch in name:
score += ord(ch.lower()) - ord('a') + 1
return score
def find_total_name_scores_from_file(filename):
# read/parse from file
names = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
names = row # just one row in this file
names.sort()
# calc/add name scores
total_score = 0
for i, name in enumerate(names):
position = i+1
name_score = get_name_score(name)
total_score += position * nam
|
e_score
return total_score
###################################################
if __name__ == '__main__':
file
|
name = "p022_names.txt"
print find_total_name_scores_from_file(filename)
|
HeinerTholen/Varial
|
varial/test/test_histotoolsbase.py
|
Python
|
gpl-3.0
| 2,422
| 0.012386
|
from ROOT import TH1I, gROOT, kRed, kBlue
import unittest
import tempfile
import shutil
import os
from varial.extensions.cmsrun import Sample
from varial.wrappers import HistoWrapper
from varial.history import History
from varial import analysis
from varial import settings
from varial import diskio
class TestHistoToolsBase(unittest.TestCase):
def setUp(self):
super(TestHistoToolsBase, self).setUp()
test_fs = "fileservice/"
if not os.path.exists(test_fs):
test_fs = "varial/test/" + test_fs
settings.DIR_FILESERVICE = test_fs
if (not os.path.exists(test_fs + "tt.root")) \
or (not os.path.exists(test_fs + "ttgamma.root")) \
or (not os.path.exists(test_fs + "zjets.root")):
self.fail("Fileservice testfiles not present!")
# create samples
analysis.all_samples["tt"] = Sample(
|
name = "tt",
is_data = True,
lumi = 3.,
legend = "pseudo data",
input_files = ["none"],
)
analysis.all_samples["ttgamma"] = Sample(
name = "ttgamma",
lumi = 4.,
legend = "tt gamma",
input_files = ["none"],
)
analysis.all_samples["zjets"] = Sample(
name = "zjets",
lumi = 0.1,
legend = "z jets",
input_files = ["none"],
)
analysis.colors = {
"tt gamma": kRed,
"z jets": kBlue
}
settings.stacking_order = [
"tt gamma",
"z jets"
]
analysis.active_samples = analysis.all_samples.keys()
# create a test wrapper
h1 = TH1I("h1", "H1", 2, .5, 4.5)
h1.Fill(1)
h1.Fill(3,2)
hist = History("test_op") # create some fake history
hist.add_args([History("fake_input_A"), History("fake_input_B")])
hist.add_kws({"john": "cleese"})
self.test_wrp = HistoWrapper(
h1,
name="Nam3",
title="T1tl3",
history=hist
)
self.test_dir = tempfile.mkdtemp()
analysis.cwd = self.test_dir
def tearDown(self):
super(TestHistoToolsBase, self).tearDown()
del self.test_wrp
diskio.close_open_root_files()
gROOT.Reset()
if os.path.exists(self.test_dir):
os.system('rm -r %s' % self.test_dir)
|
|
willseward/django-dynamic-preferences
|
dynamic_preferences/managers.py
|
Python
|
bsd-3-clause
| 4,981
| 0.001004
|
import collections
from .settings import preferences_settings
from .exceptions import CachedValueNotFound, DoesNotExist
class PreferencesManager(collections.Mapping):
"""Handle retrieving / caching of preferences"""
def __init__(self, model, registry, **kwargs):
self.model = model
self.registry = registry
self.queryset = self.model.objects.all()
self.instance = kwargs.get('instance')
if self.instance:
self.queryset = self.queryset.filter(instance=self.instance)
@property
def cache(self):
from django.core.cache import caches
return caches['default']
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
section, name = self.parse_lookup(key)
self.update_db_pref(section=section, name=name, value=value)
def __repr__(self):
return repr(self.all())
def __iter__(self):
return self.all().__iter__()
def __len__(self):
return len(self.all())
def get_cache_key(self, section, name):
"""Return the cache key corresponding to a given preference"""
if not self.instance:
return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name)
return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, section, name, self.instance.pk)
def from_cache(self, section, name):
"""Return a preference raw_value from cache"""
cached_value = self.cache.get(
self.get_cache_key(section, name), CachedValueNotFound)
if cached_value is CachedValueNotFound:
raise CachedValueNotFound
return self.registry.get(section=section, name=name).serializer.deserialize(cached_value)
def to_cache(self, pref):
"""Update/create the cache value for the given preference model instance"""
self.cache.set(
self.get_cache_key(pref.section, pref.name), pref.raw_value, None)
def pref_obj(self, section, name):
return self.registry.get(section=section, name=name)
def parse_lookup(self, lookup):
try:
section, name = lookup.split(
preferences_settings.SECTION_KEY_SEPARATOR)
except ValueError:
name = lookup
section = None
return section, name
def get(self, key, model=False):
"""Return the value of a single preference using a dotted path key"""
section, name = self.parse_lookup(key)
if model:
return self.get_db_pref(setion=section, name=name)
try:
return self.from_cache(section, name)
except CachedValueNotFound:
pass
db_pref = self.get_db_pref(section=section, name=name)
self.to_cache(db_pref)
return db_pref.value
def get_db_pref(self, section, name):
try:
pref = self.queryset.get(section=section, name=name)
except self.model.DoesNotExist:
pref_obj = self.pref_obj(section=section, name=name)
pref
|
= self.create_db_pref(
section=section, name=name, value=pref_obj.default)
return pref
def update_db_
|
pref(self, section, name, value):
try:
db_pref = self.queryset.get(section=section, name=name)
db_pref.value = value
db_pref.save()
except self.model.DoesNotExist:
return self.create_db_pref(section, name, value)
return db_pref
def create_db_pref(self, section, name, value):
if self.instance:
db_pref = self.model(
section=section, name=name, instance=self.instance)
else:
db_pref = self.model(section=section, name=name)
db_pref.value = value
db_pref.save()
return db_pref
def all(self):
"""Return a dictionnary containing all preferences by section
Loaded from cache or from db in case of cold cache
"""
a = {}
try:
for preference in self.registry.preferences():
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
except CachedValueNotFound:
return self.load_from_db()
return a
def load_from_db(self):
"""Return a dictionnary of preferences by section directly from DB"""
a = {}
db_prefs = {p.preference.identifier(): p for p in self.queryset}
for preference in self.registry.preferences():
try:
db_pref = db_prefs[preference.identifier()]
except KeyError:
db_pref = self.create_db_pref(
section=preference.section, name=preference.name, value=preference.default)
self.to_cache(db_pref)
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
return a
|
pavlenk0/my-catalog
|
catalog/migrations/0007_auto_20170316_1730.py
|
Python
|
bsd-3-clause
| 649
| 0.001541
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-16 15:30
from __future__ import unicode_literals
from django.db imp
|
ort migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_auto_20170316_1444'),
]
operations = [
migrations.AlterField(
model_name='book',
name='isbn',
field=models.CharField(help_text='''13 Character\n
<a href="https
|
://www.isbn-international.org/content/what-isbn">ISBN number</a>''',
max_length=13, verbose_name='ISBN'),
),
]
|
PyCon/pc-bot
|
pycon_bot/modes/base.py
|
Python
|
bsd-3-clause
| 12,485
| 0.004806
|
from __future__ import division
import importlib
import re
import time
class SkeletonMode(object):
"""Skeleton (base) mode.
This mode can take two commands:
- change to another mode
- print help
It is also able to send messages to the channel.
This mode must superclass all other modes, or you
will likely get undesired behavior."""
def __init__(self, bot):
self.bot = bot
# information about where we are in the meeting
self._in_meeting = False
def msg(self, channel, msg, *args):
"""Send a message to the given channel."""
# Unicode makes Twisted (or SOMETHING) sad. ASCII.
self.bot.msg(channel, (msg % args).encode('ascii', 'ignore'))
def exec_command(self, command, command_type, user, channel, *args):
"""Execute an arbitrary command, provided it is found on the mode."""
# if this is a command beginning with a comma,
# then inform the user that the comma is superfluous
if command.startswith(','):
self.msg(user, 'A leading comma is only necessary for chair '
'commands.')
return
# find the correct command and execute it
method = '%s_%s' % (command_type, command)
if hasattr(self, method):
if command_type == 'chair':
return getattr(self, method)(user, channel, *args)
else:
return getattr(self, method)(user, *args)
# whups, we clearly weren't able to find the command...bork out
help_command = 'help'
if command_type == 'chair':
help_command = ',' + help_command
self.msg(channel, "Sorry, I don't recognize that command. Issue `%s` for a command list." % help_command)
else:
self.msg(user, "Sorry, I don't recognize that command. Issue `%s` for a command list." % help_command)
def chair_mode(self, user, channel, new_mode=None, _silent=False):
"""Set the channel's mode. If no mode is provided,
print out the mode we're in now.
If the requested mode is "none", then set us into
the base mode."""
# if no argument is given, print out the mode that
# we are in now
if not new_mode:
mode_name = self.bot.mode.__class__.__module__.__name__.lower()
if mode_name == 'base':
mode_name = '(none)'
self.msg(channel, "Current mode: %s" % mode_name[:-4])
return
# okay, we were asked to *set* the mode -- do that now
# sanity check: however, if we were given "none", that just
# means set in base mode
if new_mode.lower() == 'none':
self.bot.mode = SkeletonMode(self.bot)
if not _silent:
self.msg(channel, 'Mode deactivated.')
return
try:
mod = importlib.import_module('pycon_bot.modes.%s' % new_mode)
self.bot.mode = mod.Mode(self.bot)
self.msg(channel, 'Activated %s mode.' % new_mode)
except (ImportError, AttributeError) as e:
self.msg(channel, 'Unable to load mode `%s`: %s' % (new_mode, e))
def chair_help(self, user, channel, command=None):
"""Return a list of chair commands that we currently understand.
If a specific command is given, print its docstring."""
return self._help(user, channel, 'chair', command=command)
def private_help(self, user, command=None):
"""Return a list of private message commands that we currently understand.
If a specific command is specified, print its docstring."""
return self._help(user, user, 'private', command=command)
def _help(self, user, channel, command_type, command=None):
# if an argument is given, print help about that specific command
if command:
command = command.replace(',', '')
method = getattr(self, '%s_%s' % (command_type, command), None)
# sanity check: does this method actually exist?
if not method:
help_command = 'help'
if command_type == 'chair':
help_command = ',%s' % help_command
self.msg(channel, 'This command does not exist. Issue `%s` by itself for a command list.' % help_command)
return
# okay, now take the docstring and present it as help; however
# we need to reformat my docstrings to be more IRC friendly -- specifically:
# - change single `\n` to just spaces
# - change double `\n` to single `\n`
help_text = method.__doc__
help_text = re.sub(r'\\n[ ]+\\n', '|---|', help_text)
help_text = re.sub(r'\s+', ' ', help_text)
help_text = help_text.replace('|---|', '\n')
self.msg(channel, help_text)
return
# okay, give a list of the commands available
commands = []
for attr in dir(self):
if callable(getattr(self, attr)) and attr.startswith('%s_' % command_type):
if command_type == 'chair':
command_name = ',%s' % attr[len(command_type) + 1:]
else:
command_name = attr[len(command_type) + 1:]
commands.append(command_name)
commands.sort()
# now print out the list of commands to the channel
self.msg(channel, 'I recognize the following %s commands:' % command_type)
msg_queue = ' '
for i in range(0, len(commands)):
command = commands[i]
msg_queue += command
if i % 3 != 2 and i != len(commands) - 1:
msg_queue += (' ' * (20 - (len(command) * 2)))
else:
self.msg(channel, msg_queue)
msg_queue = ' '
class BaseMode(SkeletonMode):
"""Base class for all modes, handling all the base commands."""
def __init__(self, bot):
super(BaseMode, self).__init__(bot)
self.reported_in = set()
self.nonvoters = set()
@property
def nonvoter_list(self):
return ', '.join(self.nonvoters) if self.nonvoters else 'none'
def names(self, channel):
"""Prompt everyone in the channel to write their names.
Note who has done so in order to easily compile a non-voter list."""
self.msg(channel, 'Please write your full name in the channel, for the meeting records.')
self.bot.state_handler = self.handler_user_names
def chair_nonvoter(self, user, channel, *users):
"""Set the given use
|
r to a non-voter. If no user is specified,
then print the list of all non-voters.
|
Exception: If we're just starting the meeting, then set anyone
who has not reported in to be a non-voter."""
# this is a special command if we're in the "reporting in" phase;
# set as a non-voter everyone who hasn't reported in yet
# note: also adds as a non-voter the person who ran the command
if self.bot.state_handler == self.handler_user_names and not users:
def _(names):
laggards = set(names) - self.reported_in - self.nonvoters
laggards.remove(self.bot.nickname)
laggards.add(user)
if laggards:
self.nonvoters.update(laggards)
self.msg(channel, 'Will no longer pester %s.' % ', '.join(laggards))
self.bot.names(channel).addCallback(_)
return
# run normally
users = set(users)
users.discard(self.bot.nickname)
if not users:
self.msg(channel, "Nonvoters: %s.", self.nonvoter_list)
return
self.nonvoters.update(users)
self.msg(channel, "Will no longer pester %s.", ', '.join(users
|
hickerson/bbn
|
fable/fable_sources/libtbx/command_line/find_files.py
|
Python
|
mit
| 3,114
| 0.019268
|
from __future__ import division
from libtbx.path import walk_source_tree
from libtbx.str_utils import show_string
from libtbx.utils import Sorry
from libtbx.option_parser import option_parser
from fnmatch import fnmatch
import re
import sys, os
def read_lines_if_possible(file_path):
try: f = open(file_path, "r")
except IOError: return []
return f.read().splitlines()
def run(args, command_name="libtbx.find_files"):
if (len(args) == 0): args = ["--help"]
command_line = (option_parser(
usage="%s [options] pattern ..." % command_name,
description="Recursively finds all files matching patterns,\n"
"excluding CVS and .svn directories and .pyc files.")
.option("-t", "--top",
action="append",
type="string",
metavar="PATH",
help="top-level directory where search starts"
" (default is current working directory)")
.option("-g", "--grep",
action="append",
type="string",
metavar="PATTERN",
help="find regular expression pattern in each file (multiple"
" -g/--grep options can be given)")
.option("-i", "--ignore_case",
action="store_true",
default=False,
help="with -g/--grep: case-insensitive match")
.o
|
ption("-f", "--file_names_only",
action="store_true",
default=False,
help="with -g/--grep: show file names only, not the matching lines")
.option("-q", "--quote",
action="store_true",
default=False,
help="quote file names")
).process(args=args)
fn_patterns = command_line.args
co = command_line.options
grep_flags = 0
if (co.ignore_case):
grep_flags |= re.IGNORECASE
if (len(fn_patterns) == 0):
fn_patterns
|
= ["*"]
tops = co.top
if (tops is None):
tops = ["."]
for top in tops:
if (not os.path.isdir(top)):
raise Sorry("Not a directory: %s" % show_string(top))
for file_path in walk_source_tree(top=top):
file_name = os.path.basename(file_path)
for fn_pattern in fn_patterns:
if (fnmatch(file_name, fn_pattern)):
if (co.quote): fp = show_string(file_path)
else: fp = file_path
if (co.grep is None):
print fp
else:
is_binary_file = co.file_names_only
for line in read_lines_if_possible(file_path=file_path):
if (not is_binary_file):
is_binary_file = "\0" in line
def line_matches_all_grep_patterns():
for grep_pattern in co.grep:
if (re.search(
pattern=grep_pattern,
string=line,
flags=grep_flags) is None):
return False
return True
if (line_matches_all_grep_patterns()):
if (co.file_names_only):
print fp
break
elif (is_binary_file):
print "%s: match in binary file" % fp
break
else:
print "%s: %s" % (fp, line)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
meidli/yabgp
|
yabgp/message/attribute/atomicaggregate.py
|
Python
|
apache-2.0
| 1,948
| 0
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
from yabgp.message.attribute import Attribute
from yabgp.message.attribute import AttributeID
from yabgp.message.attribute import AttributeFlag
from yabgp.common import constants as bgp_cons
from yabgp.common import exception as excep
class AtomicAggregate(Attribute):
"""
ATOMIC_AGGREGATE is a well-known discretionary attribute of length 0.
"""
ID = AttributeID.ATOMIC_AGGREGATE
FLAG = AttributeFlag.TRANSITIVE
@classmethod
def parse(cls, value):
"""
parse bgp ATOMIC_AGGREGATE attribute
:param value:
"""
if not value:
# return value
# return str(value, encoding="utf-8")
return bytes.decode(value)
else:
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_OPTIONAL_ATTR,
data=value)
@classmethod
def construct(cls, value):
"""construct a ATOMIC_AGGREGATE path attribute
:param value:
"""
if value:
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_OPTIONAL_ATTR,
data='')
else:
|
value = 0
return struct.pack('!B', cls.FLAG) + str
|
uct.pack('!B', cls.ID) \
+ struct.pack('!B', value)
|
operepo/ope
|
laptop_credential/winsys/tests/test_fs/test_fs.py
|
Python
|
mit
| 1,100
| 0.030909
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
import tempfile
from winsys._compat import unittest
import uuid
import win32file
from winsys.tests.test_fs import utils
from winsys import fs
class TestFS (unittest.TestCase):
filenames = ["%d" % i for i in range (5)]
def setUp (self):
utils.mktemp ()
for filename in self.filenames:
with open (os.path.join (utils.TEST_ROOT, filename), "w"):
pass
def tearDown (self):
utils.rmtemp ()
def test_glob (self):
import glob
pattern = os.path.join (utils.TEST_ROOT, "*")
self.assertEquals (list (fs.glob (pattern)), glob.glob (pattern))
def test_listdir (self):
import os
fs_version = list (fs.listdir (utils.TEST_ROOT))
os_version = os.listdir (utils.TEST_ROOT
|
)
self.assertEquals (fs_version, os_version, "%s differs from %s" % (fs_version, os_version))
#
# All the other module-level functions are hand-offs
# to the correspondi
|
ng Entry methods.
#
if __name__ == "__main__":
unittest.main ()
if sys.stdout.isatty (): raw_input ("Press enter...")
|
PeterLauris/aifh
|
vol1/python-examples/lib/aifh/train.py
|
Python
|
apache-2.0
| 13,698
| 0.003066
|
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import sys
import numpy as np
class Train(object):
""" Basic training class. Allows for either minimization or maximization, though all implementations may not
support both.
"""
def __init__(self, goal_minimize=True):
self.max_iterations = 100000
self.position = []
self.best_score = 0
self.goal_minimize = goal_minimize
self.display_final = True
self.display_iteration = False
self.stop_score = None
def better_than(self, is_this, than_that):
"""Determine if one score is better than the other, based on minimization settings.
@param is_this: The first score to compare.
@param than_that: The second score to compare.
@return: True, if the first score is better than the second.
"""
if self.goal_minimize:
return is_this < than_that
else:
return is_this > than_that
def should_stop(self, iteration, best_score):
""" Determine if we should stop.
@param iteration: The current iteration.
@param best_score: The current best score.
@return: True, if we should stop.
"""
if iteration > self.max_iterations:
return True
if self.stop_score is not None:
if self.better_than(best_score, self.stop_score):
return True
return False
class TrainGreedRandom(Train):
"""
The Greedy Random learning algorithm is a very primitive random-walk algorithm that only takes steps that serve
to move the Machine Learning algorithm to a more optimal position. This learning algorithm essentially chooses
random locations for the long term memory until a better set is found.
http://en.wikipedia.org/wiki/Random_walk
"""
def __init__(self, low, high, goal_minimize=True):
"""
Construct a greedy random trainer.
@param low: The low end of random numbers to generate.
@param high: The high end of random numbers to generate.
@param goal_minimize: Is the goal to minimize?
"""
self.high = low
self.low = high
Train.__init__(self, goal_minimize)
def train(self, x0, funct):
"""
Train with the specified score function.
@param x0: The initial vector for long-term memory.
@param funct: The score function. We attempt to minimize or maximize this.
@return: The trained long-term memory vector.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
while not self.should_stop(iteration_number, self.best_score):
# Clone current position, create a new array of same size.
trial_position = list(self.position)
# Randomize trial position.
self.perform_randomization(trial_position)
# Obtain new trial score.
trial_score = funct(trial_position)
if self.better_than(trial_score, self.best_score):
self.best_score = trial_score
self.position = trial_position
current = funct(self.position)
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
def perform_randomization(self, vec):
for i in xrange(0, len(vec)):
vec[i] = np.random.uniform(self.low, self.high)
class TrainHillClimb(Train):
"""
Train using hill climbing. Hill climbing can be used to optimize the long term memory of a Machine Learning
Algorithm. This is done by moving the current long term memory values to a new location if that new location
gives a better score from the scoring function.
http://en.wikipedia.org/wiki/Hill_climbing
"""
def __init__(self, goal_minimize=True):
Train.__init__(self, goal_minimize)
def train(self, x0, funct, acceleration=1.2, step_size=1.0):
"""
Train up to the specified maximum number of iterations using hill climbing.
@param x0: The initial vector for long-term memory.
@param funct: The score function. We attempt to minimize or maximize this.
@param acceleration: The acceleration (default=1.2)
@param step_size: The step size (default=1.0)
@return: The trained long-term memory vector.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
step_size = [step_size] * len(x0)
candidate = [0] * 5
candidate[0] = -acceleration
candidate[1] = -1 / acceleration
candidate[2] = 0
candidate[3] = 1 / acceleration
candidate[4] = acceleration
while not self.should_stop(iteration_number, self.best_score):
if self.goal_minimize:
best_step_score = sys.float_info.max
else:
best_step_score = sys.float_info.min
for dimension in xrange(0, len(self.position)):
best = -1
for i in xrange(0, len(candidate)):
# Take a step
self.position[dimension] += candidate[i] * step_size[dimension]
# Obtain new trial score.
trial_score = funct(self.position)
# Step back, we only want to try movement in one dimension.
self.position[dimension] -= candidate[i] * step_size[dimension]
# Record best step taken
if self.better_than(trial_score, best_step_score):
best_step_score = trial_score
best = i
if best != -1:
self.best_score = best_step_score
self.position[dimension] += candidate[best] * step_size[dimension]
step_size[dimension] += candidate[best]
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
class TrainAnneal(Train):
"""
Train a Machine Learning Algorithm using Simulated Annealing. Simulated Annealing is a Monte Carlo algorithm
that is based on annealing in metallurgy, a technique involving heating and controlled cooling of a
material to increase the size of its crystals and reduce their defects, both are attributes of the material
that depend on its thermodynamic free energy.
The Simulated Annealing algori
|
thm works by randomly changing a v
|
ector of doubles. This is the long term memory
of the Machine Learning algorithm. While this happens a temperature is slowly decreased. When this
t
|
bewiwi/puppetboard
|
puppetboard/default_settings.py
|
Python
|
apache-2.0
| 1,121
| 0.005352
|
import os
PUPPETDB_HOST = 'localhost'
PUPPETDB_PORT = 8080
PUPPETDB_SSL_VERIFY = True
PUPPETDB_KEY = None
PUPPETDB_CERT = None
PUPPETDB_TIMEOUT = 20
SECRET_KEY = os.urandom(24)
DEV_LI
|
STEN_HOST = '127.0.0.1'
DEV_LISTEN_PORT = 5000
DEV_COFFEE_LOCATION = 'coffee'
UNRESPONSIVE_HOURS = 2
ENABLE_QUERY = True
LOCALISE_TIMESTAMP = True
LOGLEVEL = 'info'
REPORTS_COUNT = 10
OFFLINE_MODE = False
ENABLE_CATALOG = Fa
|
lse
GRAPH_FACTS = ['architecture',
'domain',
'lsbcodename',
'lsbdistcodename',
'lsbdistid',
'lsbdistrelease',
'lsbmajdistrelease',
'netmask',
'osfamily',
'puppetversion',
'processorcount']
INVENTORY_FACTS = [ ('Hostname', 'fqdn' ),
('IP Address', 'ipaddress' ),
('OS', 'lsbdistdescription'),
('Architecture', 'hardwaremodel' ),
('Kernel Version', 'kernelrelease' ),
('Puppet Version', 'puppetversion' ), ]
|
n00bsys0p/altcoin-abe
|
test/test_btc200.py
|
Python
|
agpl-3.0
| 98,158
| 0.00272
|
# Copyright(C) 2014 by Abe developers.
# test_btc200.py: test Abe loading through Bitcoin Block 200.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
import pytest
from db import testdb
import Abe.util
import Abe.Chain
@pytest.fixture(scope="module")
def btc200(testdb):
btc_chain = Abe.Chain.create('Bitcoin')
blocks = []
for hex in _blocks():
ds = Abe.util.str_to_ds(hex.decode('hex'))
hash = btc_chain.ds_block_header_hash(ds)
b = btc_chain.ds_parse_block(ds)
b['hash'] = hash
blocks.append(b)
store = testdb.store
btc_chain = store.get_chain_by_name(btc_chain.name)
for b in blocks:
store.import_block(b, chain = btc_chain)
return store
def test_block_number(btc200):
assert btc200.get_block_number(1) == 200
@pytest.fixture(scope="module")
def coinbase_200(btc200):
return btc200.export_tx(tx_hash = '2b1f06c2401d3b49a33c3f5ad5864c0bc70044c4068f9174546f3cfc1887d5ba')
def test_coinbase_hash(coinbase_200):
assert coinbase_200['hash'] == '2b1f06c2401d3b49a33c3f5ad5864c0bc70044c4068f9174546f3cfc1887d5ba'
def test_coinbase_in(coinbase_200):
assert len(coinbase_200['in']) == 1
assert coinbase_200['vin_sz'] == 1
def test_coinbase_lock_time(coinbase_200):
assert coinbase_200['lock_time'] == 0
def test_coinbase_prev_out(coinbase_200):
assert coinbase_200['in'][0]['prev_out'] == {
"hash": "0000000000000000000000000000000000000000000000000000000000000000",
"n": 4294967295
}
def test_coinbase_raw_scriptSig(coinbase_200):
assert coinbase_200['in'][0]['raw_scriptSig'] == "04ffff001d0138"
def test_coinbase_out(coinbase_200):
assert len(coinbase_200['out']) == 1
assert coinbase_200['vout_sz'] == 1
def test_coinbase_raw_scriptPubKey(coinbase_200):
assert coinbase_200['out'][0]['raw_scriptPubKey'] == \
"41045e071dedd1ed03721c6e9bba28fc276795421a378637fb41090192bb9f208630dcbac5862a3baeb9df3ca6e4e256b7fd2404824c20198ca1b004ee2197866433ac"
def test_coinbase_value(coinbase_200):
assert coinbase_200['out'][0]['value'] == "50.00000000"
def test_coinbase_size(coinbase_200):
assert coinbase_200['size'] == 134
def test_coinbase_ver(coinbase_200):
assert coinbase_200['ver'] == 1
@pytest.fixture(scope="module")
def b182t1(btc200):
return btc200.export_tx(
tx_hash = '591e91f809d716912ca1d4a9295e70c3e78bab077683f79350f101da64588073',
format = 'browser')
def test_tx_hash(b182t1):
assert b182t1['hash'] == '591e91f809d716912ca1d4a9295e70c3e78bab077683f79350f101da64588073'
def test_tx_version(b182t1):
assert b182t1['version'] == 1
def test_tx_lockTime(b182t1):
assert b182t1['lockTime'] == 0
def test_tx_size(b182t1):
assert b182t1['size'] == 275
def test_tx_cc(b182t1):
assert len(b182t1['chain_candidates']) == 1
def test_tx_chain_name(b182t1):
assert b182t1['chain_candidates'][0]['chain'].name == 'Bitcoin'
def test_tx_in_longest(b182t1):
assert b182t1['chain_candidates'][0]['in_longest']
def test_tx_block_nTime(b182t1):
assert b182t1['chain_candidates'][0]['block_nTime'] == 1231740736
def test_tx_block_height(b182t1):
assert b182t1['chain_candidates'][0]['block_height'] == 182
def test_tx_block_hash(b182t1):
assert b182t1['chain_candidates'][0]['block_hash'] == \
'0000000054487811fc4ff7a95be738aa5ad9320c394c482b27c0da28b227ad5d'
def test_tx_tx_pos(b182t1):
assert b182t1['chain_candidates'][0]['tx_pos'] == 1
def test_tx_in(b182t1):
assert len(b182t1['in']) == 1
def test_tx_in_pos(b182t1):
assert b182t1['in'][0]['pos'] == 0
def test_tx_in_binscript(b182t1):
assert b182t1['in'][0]['binscript']
|
== '47304402201f27e5
|
1caeb9a0988a1e50799ff0af94a3902403c3ad4068b063e7b4d1b0a76702206713f69bd344058b0dee55a9798759092d0916dbbc3e592fee43060005ddc17401'.decode('hex')
def test_tx_in_value(b182t1):
assert b182t1['in'][0]['value'] == 3000000000
def test_tx_in_prev_out(b182t1):
assert b182t1['in'][0]['o_hash'] == 'a16f3ce4dd5deb92d98ef5cf8afeaf0775ebca408f708b2146c4fb42b41e14be'
assert b182t1['in'][0]['o_pos'] == 1
def test_tx_in_script_type(b182t1):
assert b182t1['in'][0]['script_type'] == Abe.Chain.SCRIPT_TYPE_PUBKEY
def test_tx_in_binaddr(b182t1):
assert b182t1['in'][0]['binaddr'] == '11b366edfc0a8b66feebae5c2e25a7b6a5d1cf31'.decode('hex')
def test_tx_out(b182t1):
assert len(b182t1['out']) == 2
def test_tx_out_pos(b182t1):
assert b182t1['out'][0]['pos'] == 0
assert b182t1['out'][1]['pos'] == 1
def test_tx_out_binscript(b182t1):
assert b182t1['out'][0]['binscript'] == '410401518fa1d1e1e3e162852d68d9be1c0abad5e3d6297ec95f1f91b909dc1afe616d6876f92918451ca387c4387609ae1a895007096195a824baf9c38ea98c09c3ac'.decode('hex')
assert b182t1['out'][1]['binscript'] == '410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac'.decode('hex')
def test_tx_out_value(b182t1):
assert b182t1['out'][0]['value'] == 100000000
assert b182t1['out'][1]['value'] == 2900000000
def test_tx_out_redeemed(b182t1):
assert b182t1['out'][0]['o_hash'] is None
assert b182t1['out'][0]['o_pos'] is None
assert b182t1['out'][1]['o_hash'] == '12b5633bad1f9c167d523ad1aa1947b2732a865bf5414eab2f9e5ae5d5c191ba'
assert b182t1['out'][1]['o_pos'] == 0
def test_tx_out_binaddr(b182t1):
assert b182t1['out'][0]['binaddr'] == 'db3b465a2b678e0bdc3e4944bb41abb5a795ae04'.decode('hex')
assert b182t1['out'][1]['binaddr'] == '11b366edfc0a8b66feebae5c2e25a7b6a5d1cf31'.decode('hex')
def test_tx_value_in(b182t1):
assert b182t1['value_in'] == 3000000000
def test_tx_value_out(b182t1):
assert b182t1['value_out'] == 3000000000
def _blocks():
"""Bitcoin Blocks 0-200 as an array of hex strings."""
return [
'0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c0101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff0100f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000',
'010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e362990101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000',
'010000004860eb18bf1b1620e37e9490fc8a427514416fd75159ab86688e9a8300000000d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9bb0bc6649ffff001d08d2bd610101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff001d010bffffffff0100f2052a010000004341047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77ac00000000',
'01000000bddd99ccfda39da1b108ce1a5d70038d0a967bacb68b6b63065f626a0000000044f672226090d85db9a9f2fbfe5f0f9609b387af7be5b7fbb7a1767c831c9e995dbe6649ffff001d05e0ed6d0101000000010000000000000000000000000000000000000000000000000000000000000000fffffff
|
openstax/openstax-cms
|
pages/tests.py
|
Python
|
agpl-3.0
| 9,623
| 0.002286
|
from django.test import TestCase
from django.core.management import call_command
from wagtail.tests.utils import WagtailTestUtils, WagtailPageTests
from wagtail.core.models import Page
from pages.models import (HomePage,
HigherEducation,
ContactUs,
AboutUsPage,
GeneralPage,
Supporters,
MapPage,
Give,
TermsOfService,
AP,
FAQ,
Support,
GiveForm,
Accessibility,
Licensing,
CompCopy,
AdoptForm,
InterestForm,
Technology,
ErrataList,
PrivacyPolicy,
PrintOrder,
ResearchPage,
TeamPage,
Careers,
Impact,
InstitutionalPartnership,
HeroJourneyPage,
InstitutionalPartnerProgramPage,
CreatorFestPage,
PartnersPage,
WebinarPage,
MathQuizPage,
LLPHPage,
TutorMarketing,
TutorLanding,
Subjects,
Subject)
from news.models import NewsIndex, PressIndex
from books.models import BookIndex
from shared.test_utilities import assertPathDoesNotRedirectToTrailingSlash
class HomePageTests(WagtailPageTests):
def test_cant_create_homepage_under_homepage(self):
self.assertCanNotCreateAt(HomePage, HomePage)
def test_homepage_return_correct_page(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_can_create_homepage(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
retrieved_page = Page.object
|
s.get(id=homepage.id)
self.assertEqual(retrieved_page.title, "Hello World")
def test_allowed_subpages(self):
self.assertAllowedSubpageTypes(HomePage, {
HigherEducation,
ContactUs,
AboutUsPage,
GeneralPage,
NewsIndex,
Press
|
Index,
BookIndex,
Supporters,
MapPage,
Give,
TermsOfService,
AP,
FAQ,
Support,
GiveForm,
Accessibility,
Licensing,
CompCopy,
AdoptForm,
InterestForm,
Technology,
ErrataList,
PrivacyPolicy,
PrintOrder,
ResearchPage,
TeamPage,
Careers,
Impact,
InstitutionalPartnership,
HeroJourneyPage,
InstitutionalPartnerProgramPage,
CreatorFestPage,
PartnersPage,
WebinarPage,
MathQuizPage,
LLPHPage,
TutorMarketing,
TutorLanding,
Subjects
})
class PageTests(WagtailPageTests):
def setUp(self):
root_page = Page.objects.get(title="Root")
self.homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=self.homepage)
def test_can_create_ipp_page(self):
self.assertCanCreateAt(HomePage, InstitutionalPartnerProgramPage)
def test_can_create_llph_page(self):
llph_page = LLPHPage(title="LLPH",
heading="Heading",
subheading="Subheading",
signup_link_href="http://rice.edu",
signup_link_text="Click me",
info_link_slug="/llph-slug",
info_link_text="Click me",
book_heading="Book heading",
book_description="I should accept <b>HTML</b>.")
self.homepage.add_child(instance=llph_page)
self.assertCanCreateAt(HomePage, LLPHPage)
retrieved_page = Page.objects.get(id=llph_page.id)
self.assertEqual(retrieved_page.title, "LLPH")
def test_can_create_team_page(self):
team_page = TeamPage(title="Team Page",
header="Heading",
subheader="Subheading",
team_header="Our Team")
self.homepage.add_child(instance=team_page)
self.assertCanCreateAt(HomePage, TeamPage)
revision = team_page.save_revision()
revision.publish()
team_page.save()
retrieved_page = Page.objects.get(id=team_page.id)
self.assertEqual(retrieved_page.title, "Team Page")
class ErrataListTest(WagtailPageTests):
def test_can_create_errata_list_page(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
errata_list_page = ErrataList(title="Errata List Template",
correction_schedule="Some sample correction schedule text.",
new_edition_errata_message="New edition correction text.",
deprecated_errata_message="Deprecated errata message.",
about_header="About our correction schedule.",
about_text="Errata receieved from March through...",
about_popup="Instructor and student resources..."
)
homepage.add_child(instance=errata_list_page)
retrieved_page = Page.objects.get(id=errata_list_page.id)
self.assertEqual(retrieved_page.title, "Errata List Template")
class SubjectsPageTest(WagtailPageTests):
def test_can_create_subjects_page(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
subjects_page = Subjects(title="Subjects",
heading="Testing Subjects Page",
description="This is a Subjects page test",
philanthropic_support="Please support us",
)
homepage.add_child(instance=subjects_page)
retrieved_page = Page.objects.get(id=subjects_page.id)
self.assertEqual(retrieved_page.title, "Subjects")
class SubjectPageTest(WagtailPageTests):
def test_can_create_subject_page(self):
root_page = Page.objects.get(title="Root")
homepage = HomePage(title="Hello World",
slug="hello-world",
)
root_page.add_child(instance=homepage)
subjects_page = Subjects(title="Subjects",
heading="Testing Subjects Page",
description="This is a Subjects page test",
philanthropic_support="Please support us",
)
homepage.add_child(instance=subjects_page)
subject_page = Subject(title="Business",
page_description="Business page",
os_textbook_heading="OpenStax Business Textbooks",
|
jorik041/weevely3
|
core/sessions.py
|
Python
|
gpl-3.0
| 7,336
| 0.004635
|
from core import messages
from core.weexceptions import FatalException
from mako import template
from core.config import sessions_path, sessions_ext
from core.loggers import log, stream_handler
from core.module import Status
import os
import yaml
import glob
import logging
import urlparse
import atexit
import ast
print_filters = [
'debug',
'channel'
]
set_filters = [
'debug',
'channel'
]
class Session(dict):
def _session_save_atexit(self):
yaml.dump(
dict(self),
open(self['path'], 'w'),
default_flow_style = False
)
def print_to_user(self, module_filter = ''):
for mod_name, mod_value in self.items():
if isinstance(mod_value, dict):
mod_args = mod_value.get('stored_args')
# Is a module, print all the storable stored_arguments
for argument, arg_value in mod_args.items():
if not module_filter or ("%s.%s" % (mod_name, argument)).startswith(module_filter):
log.info("%s.%s = '%s'" % (mod_name, argument, arg_value))
else:
# If is not a module, just print if matches with print_filters
if any(f for f in print_filters if f == mod_name):
log.info("%s = '%s'" % (mod_name, mod_value))
def get_connection_info(self):
return template.Template(messages.sessions.connection_info).render(
url = self['url'],
user = self['system_info']['results'].get('whoami', ''),
host = self['system_info']['results'].get('hostname', ''),
path = self['file_cd']['results'].get('cwd', '.')
)
def action_debug(self, module_argument, value):
if value:
stream_handler.setLevel(logging.DEBUG)
else:
stream_handler.setLevel(logging.INFO)
def set(self, module_argument, value):
"""Called by user to set or show the session variables"""
# I safely evaluate the value type to avoid to save only
# strings type. Dirty but effective.
# TODO: the actual type of the argument could be acquired
# from modules[module].argparser.
try:
value = ast.literal_eval(value)
except Exception as e:
# If is not evalued, just keep it as string
pass
# If action_<module_argument> function exists, trigger the action
action_name = 'action_%s' % (module_argument.replace('.','_'))
if hasattr(self, action_name):
action_func = getattr(self, action_name)
if hasattr(action_func, '__call__'):
action_func(module_argument, value)
if module_argument.count('.') == 1:
module_name, arg_name = module_argument.split('.')
if arg_name not in self[module_name]['stored_args']:
log.warn(messages.sessions.error_storing_s_not_found % ( '%s.%s' % (module_name, arg_name) ))
else:
self[module_name]['stored_args'][arg_name] = value
log.info("%s.%s = '%s'" % (module_name, arg_name, value))
else:
module_name = module_argument
if module_name not in self or module_name not in set_filters:
log.warn(messages.sessions.error_storing_s_not_found % (module_name))
else:
self[module_name] = value
log.info("%s = %s" % (module_name, value))
# If the channel is changed, the basic shell_php is moved
# to IDLE and must be setup again.
if module_name == 'channel':
self['shell_php']['status'] = Status.IDLE
class SessionFile(Session):
def __init__(self, dbpath, volatile = False):
try:
sessiondb = yaml.load(open(dbpath, 'r').read())
except Exception as e:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, str(e)))
raise FatalException(messages.sessions.error_loading_sessions)
saved_url = sessiondb.get('url')
saved_password = sessiondb.get('password')
if saved_url and saved_password:
if not vol
|
atile:
# Register dump at exit and return
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
log.warn(
mes
|
sages.sessions.error_loading_file_s %
(dbpath, 'no url or password'))
raise FatalException(messages.sessions.error_loading_sessions)
class SessionURL(Session):
def __init__(self, url, password, volatile = False):
if not os.path.isdir(sessions_path):
os.makedirs(sessions_path)
# Guess a generic hostfolder/dbname
hostname = urlparse.urlparse(url).hostname
if not hostname:
raise FatalException(messages.generic.error_url_format)
hostfolder = os.path.join(sessions_path, hostname)
dbname = os.path.splitext(os.path.basename(urlparse.urlsplit(url).path))[0]
# Check if session already exists
sessions_available = glob.glob(
os.path.join(
hostfolder,
'*%s' %
sessions_ext))
for dbpath in sessions_available:
try:
sessiondb = yaml.load(open(dbpath, 'r').read())
except Exception as e:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, str(e)))
else:
saved_url = sessiondb.get('url')
saved_password = sessiondb.get('password')
if not saved_url or not saved_password:
log.warn(
messages.generic.error_loading_file_s_s %
(dbpath, 'no url or password'))
if saved_url == url and saved_password == password:
# Found correspondent session file.
# Register dump at exit and return
if not volatile:
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
# If no session was found, create a new one with first available filename
index = 0
while True:
dbpath = os.path.join(
hostfolder, '%s_%i%s' %
(dbname, index, sessions_ext))
if not os.path.isdir(hostfolder):
os.makedirs(hostfolder)
if not os.path.exists(dbpath):
sessiondb = {}
sessiondb.update(
{ 'path': dbpath,
'url': url,
'password': password,
'debug': False,
'channel' : None,
'default_shell' : None
}
)
# Register dump at exit and return
if not volatile:
atexit.register(self._session_save_atexit)
self.update(sessiondb)
return
else:
index += 1
raise FatalException(messages.sessions.error_loading_sessions)
|
rusty1s/embedded_gcnn
|
lib/layer/chebyshev_gcnn.py
|
Python
|
mit
| 1,356
| 0
|
from six.moves import xrange
import tensorflow as tf
from .var_layer import VarLayer
from ..tf import rescaled_laplacian
def conv(features, adj, weights):
K = weights.get_shape()[0].value - 1
# Create and rescale normalized laplacian.
lap = rescaled_laplacian(adj)
Tx_0 = features
output = tf.matmul(Tx_0, weights[0])
if K > 0:
Tx_1 = tf.sparse_tensor_dense_matmul(lap, features)
output += tf.matmul(Tx_1, weights[1])
for k in xrange(2, K + 1):
Tx_2 = 2 * tf.sparse_tensor_dense_matmul(lap, Tx_1) - Tx_0
output += tf.matmul(Tx_2, weights[k])
Tx_0, Tx_1 = Tx_1, Tx_2
return output
class ChebyshevGCNN(VarLayer):
def __init__(self, in_channels, out_channels, adjs, degree, **kwargs):
self.adjs = adjs
super(ChebyshevGCNN, sel
|
f).__init__(
weight_shape=[degree + 1, in_channels, out_channels],
bias_shape=[out_channels],
**kwargs)
def _call(self, inputs):
batch_size = len(inputs)
outputs = []
for i in xrange(batch_size):
output = conv(inputs[i], self.
|
adjs[i], self.vars['weights'])
if self.bias:
output = tf.nn.bias_add(output, self.vars['bias'])
output = self.act(output)
outputs.append(output)
return outputs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.