repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Stan1989/volatility | refs/heads/master | contrib/plugins/aspaces/__init__.py | 12133432 | |
aetilley/scikit-learn | refs/heads/master | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
|
mediawiki-utilities/python-mwpersistence | refs/heads/master | mwpersistence/mwpersistence.py | 1 | """
This script provides access to a set of utilities for extracting content
persistence-based measurements from MediaWiki XML dumps.
* dump2diffs -- (1) Converts XML dumps to diff information (XML --> JSON)
* diffs2persistence -- (2) Converts diff information to token persistence
information (JSON --> JSON)
* persistence2stats -- (3) Converts token persistence information to
revision-level stats (JSON --> JSON)
* dump2stats -- (1,2,3) Full pipeline. From XML dumps to revision-level
stats (XML --> JSON)
Usage:
mwpersistence -h | --help
mwpersistence <utility> [-h | --help]
Options:
-h | --help Prints this documentation
<utility> The name of the utility to run
"""
import sys
import traceback
from importlib import import_module
USAGE = """Usage:
mwpersistence -h | --help
mwpersistence <utility> [-h | --help]\n"""
def main():
if len(sys.argv) < 2:
sys.stderr.write(USAGE)
sys.exit(1)
elif sys.argv[1] in ("-h", "--help"):
sys.stderr.write(__doc__ + "\n")
sys.exit(1)
elif sys.argv[1][:1] == "-":
sys.stderr.write(USAGE)
sys.exit(1)
module_name = sys.argv[1]
try:
module = import_module(".utilities." + module_name,
package="mwpersistence")
except ImportError:
sys.stderr.write(traceback.format_exc())
sys.stderr.write("Could not load utility {0}.\n".format(module_name))
sys.exit(1)
module.main(sys.argv[2:])
|
chanijjani/TizenRT | refs/heads/master | external/iotivity/iotivity_1.2-rel/tools/scons/URLDownload.py | 29 | # -*- coding: utf-8 -*-
# -- Dual Licence ----------------------------------------------------------
############################################################################
# GPL License #
# #
# This file is a SCons (http://www.scons.org/) builder #
# Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
# --------------------------------------------------------------------------
############################################################################
# BSD 3-Clause License #
# #
# This file is a SCons (http://www.scons.org/) builder #
# Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are #
# met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
############################################################################
# the URLDownload-Builder can be download any data from an URL into a target file
# and can replace the target file name with the URL filename (the setting variable
# within the environment object is a boolean type with the name "URLDOWNLOAD_USEURLFILENAM",
# default setting replaces the target name with the URL filename)
import urllib2, urlparse
import SCons.Builder, SCons.Node, SCons.Errors
# define an own node, for checking the data behind the URL,
# we must download only than, if the data is changed, the
# node derivates from the Python.Value node
class URLNode(SCons.Node.Python.Value) :
# overload the get_csig (copy the source from the
# Python.Value node and append the data of the URL header
def get_csig(self, calc=None):
try:
return self.ninfo.csig
except AttributeError:
pass
try :
response = urllib2.urlopen( str(self.value) ).info()
except Exception, e :
raise SCons.Errors.StopError( "%s [%s]" % (e, self.value) )
contents = ""
if "Last-Modified" in response :
contents = contents + response["Last-Modified"]
if "Content-Length" in response :
contents = contents + response["Content-Length"]
if not contents :
contents = self.get_contents()
self.get_ninfo().csig = contents
return contents
# creates the downloading output message
# @param s original message
# @param target target name
# @param source source name
# @param env environment object
def __message( s, target, source, env ) :
print "downloading [%s] to [%s] ..." % (source[0], target[0])
# the download function, which reads the data from the URL
# and writes it down to the file
# @param target target file on the local drive
# @param source URL for download
# @@param env environment object
def __action( target, source, env ) :
try :
stream = urllib2.urlopen( str(source[0]) )
file = open( str(target[0]), "wb" )
file.write(stream.read())
file.close()
stream.close()
except Exception, e :
raise SCons.Errors.StopError( "%s [%s]" % (e, source[0]) )
# defines the emitter of the builder
# @param target target file on the local drive
# @param source URL for download
# @param env environment object
def __emitter( target, source, env ) :
# we need a temporary file, because the dependency graph
# of Scons need a physical existing file - so we prepare it
target[0].prepare()
if not env.get("URLDOWNLOAD_USEURLFILENAME", False) :
return target, source
try :
url = urlparse.urlparse( urllib2.urlopen( str(source[0]) ).geturl() )
except Exception, e :
raise SCons.Errors.StopError( "%s [%s]" % (e, source[0]) )
return url.path.split("/")[-1], source
# generate function, that adds the builder to the environment,
# the value "DOWNLOAD_USEFILENAME" replaces the target name with
# the filename of the URL
# @param env environment object
def generate( env ) :
env["BUILDERS"]["URLDownload"] = SCons.Builder.Builder( action = __action, emitter = __emitter, target_factory = SCons.Node.FS.File, source_factory = URLNode, single_source = True, PRINT_CMD_LINE_FUNC = __message )
env.Replace(URLDOWNLOAD_USEURLFILENAME = True )
# existing function of the builder
# @param env environment object
# @return true
def exists(env) :
return 1 |
ewandor/home-assistant | refs/heads/dev | tests/helpers/test_condition.py | 44 | """Test the condition helper."""
from unittest.mock import patch
from homeassistant.helpers import condition
from homeassistant.util import dt
from tests.common import get_test_home_assistant
class TestConditionHelper:
"""Test condition helpers."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_and_condition(self):
"""Test the 'and' condition."""
test = condition.from_config({
'condition': 'and',
'conditions': [
{
'condition': 'state',
'entity_id': 'sensor.temperature',
'state': '100',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_and_condition_with_template(self):
"""Test the 'and' condition."""
test = condition.from_config({
'condition': 'and',
'conditions': [
{
'condition': 'template',
'value_template':
'{{ states.sensor.temperature.state == "100" }}',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_or_condition(self):
"""Test the 'or' condition."""
test = condition.from_config({
'condition': 'or',
'conditions': [
{
'condition': 'state',
'entity_id': 'sensor.temperature',
'state': '100',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_or_condition_with_template(self):
"""Test the 'or' condition."""
test = condition.from_config({
'condition': 'or',
'conditions': [
{
'condition': 'template',
'value_template':
'{{ states.sensor.temperature.state == "100" }}',
}, {
'condition': 'numeric_state',
'entity_id': 'sensor.temperature',
'below': 110,
}
]
})
self.hass.states.set('sensor.temperature', 120)
assert not test(self.hass)
self.hass.states.set('sensor.temperature', 105)
assert test(self.hass)
self.hass.states.set('sensor.temperature', 100)
assert test(self.hass)
def test_time_window(self):
"""Test time condition windows."""
sixam = dt.parse_time("06:00:00")
sixpm = dt.parse_time("18:00:00")
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=3)):
assert not condition.time(after=sixam, before=sixpm)
assert condition.time(after=sixpm, before=sixam)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=9)):
assert condition.time(after=sixam, before=sixpm)
assert not condition.time(after=sixpm, before=sixam)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=15)):
assert condition.time(after=sixam, before=sixpm)
assert not condition.time(after=sixpm, before=sixam)
with patch('homeassistant.helpers.condition.dt_util.now',
return_value=dt.now().replace(hour=21)):
assert not condition.time(after=sixam, before=sixpm)
assert condition.time(after=sixpm, before=sixam)
|
fkolacek/FIT-VUT | refs/heads/master | bp-revok/python/lib/python2.7/re.py | 20 | #
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
"U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum = {}
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
_alphanum[c] = 1
del c
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i in range(len(pattern)):
c = pattern[i]
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
cachekey = (type(key[0]),) + key
p = _cache.get(cachekey)
if p is not None:
return p
pattern, flags = key
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError('Cannot process flags argument with a compiled pattern')
return pattern
if not sre_compile.isstring(pattern):
raise TypeError, "first argument must be string or compiled pattern"
try:
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error, v:
raise error, v # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copy_reg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
|
caseylucas/ansible-modules-core | refs/heads/devel | cloud/google/gce.py | 11 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/products/compute-engine) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance
required: false
default: "debian-7"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
required: false
default: null
service_account_email:
version_added: "1.5.1"
description:
- service account email
required: false
default: null
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
required: false
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
default: null
required: false
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
required: false
default: null
name:
description:
- identifier when working with a single instance
required: false
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
required: false
default: null
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
required: false
default: null
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
zone:
description:
- the GCE zone to use
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to true if the instance can forward ip packets (useful for
gateways)
required: false
default: "false"
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a list of fixed gce ips or ip names can be given (if there is not enough specified ip, 'ephemeral' will be used)
required: false
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
required: false
default: "true"
preemptible:
version_added: "2.1"
description:
- if set to true, instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
required: false
default: "false"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(name) or I(instance_names) is required.
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 7 instance in the
# us-central1-a Zone of n1-standard-1 machine type.
- local_action:
module: gce
name: test-instance
zone: us-central1-a
machine_type: n1-standard-1
image: debian-7
# Example using defaults and with metadata to create a single 'foo' instance
- local_action:
module: gce
name: foo
metadata: '{"db":"postgres", "group":"qa", "id":500}'
# Launch instances from a control node, runs some tasks on the new instances,
# and then terminate them
# This example uses JSON credentials with the credentials_file parameter
# rather than the deprecated pem_file option with PEM formatted credentials.
- name: Create a sandbox instance
hosts: localhost
vars:
names: foo,bar
machine_type: n1-standard-1
image: debian-6
zone: us-central1-a
service_account_email: unique-email@developer.gserviceaccount.com
credentials_file: /path/to/json_file
project_id: project-id
tasks:
- name: Launch instances
local_action: gce instance_names={{names}} machine_type={{machine_type}}
image={{image}} zone={{zone}}
service_account_email={{ service_account_email }}
credentials_file={{ credentials_file }}
project_id={{ project_id }}
register: gce
- name: Wait for SSH to come up
local_action: wait_for host={{item.public_ip}} port=22 delay=10
timeout=60 state=started
with_items: {{gce.instance_data}}
- name: Configure instance(s)
hosts: launched
become: True
roles:
- my_awesome_role
- my_awesome_tasks
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: gce
state: 'absent'
instance_names: {{gce.instance_names}}
# The deprecated PEM file credentials can be used as follows
- name: Create a sandbox instance with PEM credentials
hosts: localhost
vars:
names: foo,bar
machine_type: n1-standard-1
image: debian-6
zone: us-central1-a
service_account_email: unique-email@developer.gserviceaccount.com
pem_file: /path/to/pem_file
project_id: project-id
tasks:
- name: Launch instances
local_action: gce instance_names={{names}} machine_type={{machine_type}}
image={{image}} zone={{zone}}
service_account_email={{ service_account_email }}
pem_file={{ pem_file }}
project_id={{ project_id }}
register: gce
- name: Wait for SSH to come up
local_action: wait_for host={{item.public_ip}} port=22 delay=10
timeout=60 state=started
with_items: {{gce.instance_data}}
'''
import socket
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
service_account_permissions = module.params.get('service_account_permissions')
service_account_email = module.params.get('service_account_email')
if external_ip == "none":
instance_external_ip = None
elif not isinstance(external_ip, basestring):
try:
if len(external_ip) != 0:
instance_external_ip = external_ip.pop(0)
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
else:
instance_external_ip = 'ephemeral'
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name']))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type)
lc_zone = gce.ex_get_zone(zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP.keys():
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
for name in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % name, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(None, "%s" % name, image=lc_image())
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
inst = None
try:
inst = gce.ex_get_node(name, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
name, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (name, e.value))
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i+1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
if inst:
new_instances.append(inst)
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, zone_name, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone_name: the zone where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
changed_instance_names = []
for name in instance_names:
inst = None
try:
inst = gce.ex_get_node(name, zone_name)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if inst and state in ['absent', 'deleted']:
gce.destroy_node(inst)
changed_instance_names.append(inst.name)
changed = True
elif inst and state == 'started' and \
inst.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(inst)
changed_instance_names.append(inst.name)
changed = True
elif inst and state in ['stopped', 'terminated'] and \
inst.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(inst)
changed_instance_names.append(inst.name)
changed = True
return (changed, changed_instance_names)
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(default='debian-7'),
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
name = dict(),
network = dict(default='default'),
subnetwork = dict(),
persistent_boot_disk = dict(type='bool', default=False),
disks = dict(type='list'),
state = dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags = dict(type='list'),
zone = dict(default='us-central1-a'),
service_account_email = dict(),
service_account_permissions = dict(type='list'),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
ip_forward = dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete = dict(type='bool', default=True),
preemptible = dict(type='bool', default=None),
)
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
instance_names = module.params.get('instance_names')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
name = module.params.get('name')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
preemptible = module.params.get('preemptible')
changed = False
inames = []
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames.append(name)
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, changed_instance_names) = change_instance_state(
module, gce, inames, zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names:
json_output['instance_names'] = changed_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
self.image = self.gce.ex_get_image(self.name)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
|
alexandrucoman/vbox-neutron-agent | refs/heads/master | neutron/tests/retargetable/__init__.py | 12133432 | |
HM2MC/Webfront | refs/heads/master | m2m/request/templatetags/__init__.py | 12133432 | |
mjtamlyn/django | refs/heads/master | tests/i18n/sampleproject/sampleproject/settings.py | 12133432 | |
t794104/ansible | refs/heads/devel | test/units/executor/__init__.py | 12133432 | |
krez13/scikit-learn | refs/heads/master | sklearn/cluster/tests/__init__.py | 12133432 | |
alextruberg/custom_django | refs/heads/master | tests/dispatch/tests/__init__.py | 116 | """
Unit-tests for the dispatch project
"""
from __future__ import absolute_import
from .test_dispatcher import DispatcherTests, ReceiverTestCase
from .test_saferef import SaferefTests
|
felliott/osf.io | refs/heads/develop | api/waffle/__init__.py | 12133432 | |
ningchi/scikit-learn | refs/heads/master | sklearn/tests/__init__.py | 12133432 | |
renyi533/tensorflow | refs/heads/master | tensorflow/python/training/proximal_gradient_descent_test.py | 22 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Proximal Gradient Descent operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import proximal_gradient_descent
class ProximalGradientDescentOptimizerTest(test.TestCase):
def doTestProximalGradientDescentwithoutRegularization(
self, use_resource=False):
with self.cached_session() as sess:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
else:
var0 = variables.Variable([0.0, 0.0])
var1 = variables.Variable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps Proximal Gradient Descent.
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-0.9, -1.8]), v0_val)
self.assertAllClose(np.array([-0.09, -0.18]), v1_val)
@test_util.run_deprecated_v1
def testProximalGradientDescentwithoutRegularization(self):
self.doTestProximalGradientDescentwithoutRegularization(use_resource=False)
@test_util.run_deprecated_v1
def testResourceProximalGradientDescentwithoutRegularization(self):
self.doTestProximalGradientDescentwithoutRegularization(use_resource=True)
@test_util.run_deprecated_v1
def testProximalGradientDescentwithoutRegularization2(self):
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 3 steps Proximal Gradient Descent
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([0.1, 0.2]), v0_val)
self.assertAllClose(np.array([3.91, 2.82]), v1_val)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = proximal_gradient_descent.ProximalGradientDescentOptimizer(
1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testProximalGradientDescentWithL1_L2(self):
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps Proximal Gradient Descent
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-0.0495, -0.0995]), v0_val)
self.assertAllClose(np.array([-0.0045, -0.0095]), v1_val)
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[1.0], [2.0]])
var1 = variables.Variable([[3.0], [4.0]])
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1]),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.02], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
else:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = self.evaluate([var0, var1])
if is_sparse:
self.assertAllClose([[1.0], [2.0]], v0_val)
self.assertAllClose([[3.0], [4.0]], v1_val)
else:
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
return v0_val, v1_val
@test_util.run_deprecated_v1
def testEquivSparseGradientDescentwithoutRegularization(self):
with self.cached_session():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
@test_util.run_deprecated_v1
def testEquivGradientDescentwithoutRegularization(self):
with self.cached_session():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
anudeepsharma/autorest | refs/heads/master | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Http/autoresthttpinfrastructuretestservice/operations/http_client_failure_operations.py | 14 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpClientFailureOperations(object):
"""HttpClientFailureOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head400(
self, custom_headers=None, raw=False, **operation_config):
"""Return 400 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get400(
self, custom_headers=None, raw=False, **operation_config):
"""Return 400 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 400 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 400 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 400 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 400 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head401(
self, custom_headers=None, raw=False, **operation_config):
"""Return 401 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/401'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get402(
self, custom_headers=None, raw=False, **operation_config):
"""Return 402 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/402'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get403(
self, custom_headers=None, raw=False, **operation_config):
"""Return 403 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/403'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put404(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 404 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/404'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch405(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 405 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/405'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post406(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 406 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/406'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete407(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 407 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/407'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put409(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 409 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/409'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head410(
self, custom_headers=None, raw=False, **operation_config):
"""Return 410 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/410'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get411(
self, custom_headers=None, raw=False, **operation_config):
"""Return 411 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/411'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get412(
self, custom_headers=None, raw=False, **operation_config):
"""Return 412 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/412'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put413(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 413 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/413'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch414(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 414 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/414'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post415(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 415 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/415'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get416(
self, custom_headers=None, raw=False, **operation_config):
"""Return 416 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/416'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete417(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 417 status code - should be represented in the client as an
error.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/417'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head429(
self, custom_headers=None, raw=False, **operation_config):
"""Return 429 status code - should be represented in the client as an
error.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/failure/client/429'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
hassaanm/stock-trading | refs/heads/master | src/pybrain/supervised/evolino/gfilter.py | 5 | __author__ = 'Michael Isik'
from variate import UniformVariate, GaussianVariate
class Filter(object):
""" Base class for all kinds of operators on the population during the
evolutionary process like mutation, selection or evaluation.
"""
def __init__(self):
pass
def apply(self, population):
""" Applies an operation on a population. """
raise NotImplementedError()
def isiter(obj):
try:
iter(obj)
return True
except TypeError:
return False
class SimpleGenomeManipulation(Filter):
""" Abstract filter class for simple genome manipulation. """
def __init__(self):
Filter.__init__(self)
def _manipulateGenome(self, genome, manfunc=None):
""" Manipulates the genome inplace by calling the abstract _manipulateValue()
method on each float found.
:key genome: Arbitrary netsted iterateable container whose leaf
elements may be floats or empty containers.
E.g. [ [1.] , [1. , 2. , 2 , [3. , 4.] ] , [] ]
:key manfunc: function that manipulates the found floats.
If omitted, self._manipulateValue() is used.
See its documentation for the signature description.
"""
assert isiter(genome)
if manfunc is None: manfunc = self._manipulateValue
for i, v in enumerate(genome):
if isiter(v):
self._manipulateGenome(v, manfunc)
else:
genome[i] = manfunc(v)
def _manipulateValue(self, value):
""" Abstract Method, which should manipulate a value.
Should return the manipulated value
"""
raise NotImplementedError()
class SimpleMutation(SimpleGenomeManipulation):
mutationVariate = None
""" A simple mutation filter, which uses a gaussian variate per default
for mutation.
"""
def __init__(self):
""" :key kwargs: See setArgs() method documentation
"""
SimpleGenomeManipulation.__init__(self)
self.mutationVariate = GaussianVariate()
self.mutationVariate.alpha = 0.1
self.verbosity = 0
def apply(self, population):
""" Apply the mutation to the population
:key population: must implement the getIndividuals() method
"""
for individual in population.getIndividuals():
self._mutateIndividual(individual)
def _mutateIndividual(self, individual):
""" Mutate a single individual
:key individual: must implement the getGenome() method
"""
genome = individual.getGenome()
self._manipulateGenome(genome)
def _manipulateValue(self, value):
""" Implementation of the abstract method of class SimpleGenomeManipulation
Set's the x0 value of the variate to value and takes a new sample
value and returns it.
"""
self.mutationVariate.x0 = value
newval = self.mutationVariate.getSample()
# print "MUTATED: ", value, "--->", newval
return newval
class Randomization(SimpleGenomeManipulation):
""" Randomizes the genome of all individuals of a population
Uses UniformVariate to do so.
"""
def __init__(self, minval=0., maxval=1.):
SimpleGenomeManipulation.__init__(self)
self._minval = minval
self._maxval = maxval
def apply(self, population):
self._uniform_variate = UniformVariate(self._minval, self._maxval)
for individual in population.getIndividuals():
self._manipulateGenome(individual.getGenome())
def _manipulateValue(self, value):
""" See SimpleGenomeManipulation._manipulateValue() for more information """
return self._uniform_variate.getSample()
|
poojavade/Genomics_Docker | refs/heads/master | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/gemini-0.10.0-py2.7.egg/gemini/scripts/gemini_install.py | 1 | #!/usr/bin/env python
"""Installer for gemini: a lightweight db framework for disease and population genetics.
https://github.com/arq5x/gemini
Handles installation of:
- Required third party software
- Required Python libraries
- Gemini application
- Associated data files
Requires: Python 2.7, git, and compilers (gcc, g++)
Run gemini_install.py -h for usage.
"""
import argparse
import platform
import os
import shutil
import subprocess
import sys
import urllib2
remotes = {"requirements_pip":
"https://raw.github.com/arq5x/gemini/master/requirements.txt",
"requirements_conda":
"",
"versioned_installations":
"https://raw.githubusercontent.com/arq5x/gemini/master/versioning/",
"cloudbiolinux":
"https://github.com/chapmanb/cloudbiolinux.git",
"gemini":
"https://github.com/arq5x/gemini.git",
"anaconda":
"http://repo.continuum.io/miniconda/Miniconda-3.5.5-%s-x86_64.sh"}
def main(args):
check_dependencies()
work_dir = os.path.join(os.getcwd(), "tmpgemini_install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
if args.gemini_version != 'latest':
requirements_pip = os.path.join( remotes['versioned_installations'], args.gemini_version, 'requirements_pip.txt' )
requirements_conda = os.path.join( remotes['versioned_installations'], args.gemini_version, 'requirements_conda.txt' )
try:
urllib2.urlopen( requirements_pip )
except:
sys.exit('Gemini version %s could not be found. Try the latest version.' % args.gemini_version)
remotes.update( {'requirements_pip': requirements_pip, 'requirements_conda': requirements_conda} )
print "Installing isolated base python installation"
make_dirs(args)
anaconda = install_anaconda_python(args, remotes)
print "Installing gemini..."
install_conda_pkgs(anaconda, remotes, args)
gemini = install_gemini(anaconda, remotes, args.datadir, args.tooldir, args.sudo)
if args.install_tools:
cbl = get_cloudbiolinux(remotes["cloudbiolinux"])
fabricrc = write_fabricrc(cbl["fabricrc"], args.tooldir, args.datadir,
"ubuntu", args.sudo)
print "Installing associated tools..."
install_tools(gemini["fab"], cbl["tool_fabfile"], fabricrc)
os.chdir(work_dir)
install_data(gemini["python"], gemini["data_script"], args)
os.chdir(work_dir)
test_script = install_testbase(args.datadir, remotes["gemini"], gemini)
print "Finished: gemini, tools and data installed"
print " Tools installed in:\n %s" % args.tooldir
print " Data installed in:\n %s" % args.datadir
print " Run tests with:\n cd %s && bash %s" % (os.path.dirname(test_script),
os.path.basename(test_script))
print " NOTE: be sure to add %s/bin to your PATH." % args.tooldir
print " NOTE: Install data files for GERP_bp & CADD_scores (not installed by default).\n "
shutil.rmtree(work_dir)
def install_gemini(anaconda, remotes, datadir, tooldir, use_sudo):
"""Install gemini plus python dependencies inside isolated Anaconda environment.
"""
# Work around issue with distribute where asks for 'distribute==0.0'
# try:
# subprocess.check_call([anaconda["easy_install"], "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# try:
# subprocess.check_call([anaconda["pip"], "install", "--upgrade", "distribute"])
# except subprocess.CalledProcessError:
# pass
# Ensure latest version of fabric for running CloudBioLinux
subprocess.check_call([anaconda["pip"], "install", "fabric>=1.7.0"])
# allow downloads excluded in recent pip (1.5 or greater) versions
try:
p = subprocess.Popen([anaconda["pip"], "--version"], stdout=subprocess.PIPE)
pip_version = p.communicate()[0].split()[1]
except:
pip_version = ""
pip_compat = []
if pip_version >= "1.5":
for req in ["python-graph-core", "python-graph-dot"]:
pip_compat += ["--allow-external", req, "--allow-unverified", req]
subprocess.check_call([anaconda["pip"], "install"] + pip_compat + ["-r", remotes["requirements_pip"]])
python_bin = os.path.join(anaconda["dir"], "bin", "python")
_cleanup_problem_files(anaconda["dir"])
_add_missing_inits(python_bin)
for final_name, ve_name in [("gemini", "gemini"), ("gemini_python", "python"),
("gemini_pip", "pip")]:
final_script = os.path.join(tooldir, "bin", final_name)
ve_script = os.path.join(anaconda["dir"], "bin", ve_name)
sudo_cmd = ["sudo"] if use_sudo else []
if os.path.lexists(final_script):
subprocess.check_call(sudo_cmd + ["rm", "-f", final_script])
else:
subprocess.check_call(sudo_cmd + ["mkdir", "-p", os.path.dirname(final_script)])
cmd = ["ln", "-s", ve_script, final_script]
subprocess.check_call(sudo_cmd + cmd)
library_loc = subprocess.check_output("%s -c 'import gemini; print gemini.__file__'" % python_bin,
shell=True)
return {"fab": os.path.join(anaconda["dir"], "bin", "fab"),
"data_script": os.path.join(os.path.dirname(library_loc.strip()), "install-data.py"),
"python": python_bin,
"cmd": os.path.join(anaconda["dir"], "bin", "gemini")}
def install_conda_pkgs(anaconda, remotes, args):
if args.gemini_version != 'latest':
pkgs = ["--file", remotes['requirements_conda']]
else:
pkgs = ["bx-python", "conda", "cython", "ipython", "jinja2", "nose", "numpy",
"pip", "pycrypto", "pyparsing", "pysam", "pyyaml",
"pyzmq", "pandas", "scipy"]
channels = ["-c", "https://conda.binstar.org/bcbio"]
subprocess.check_call([anaconda["conda"], "install", "--yes"] + channels + pkgs)
def install_anaconda_python(args, remotes):
"""Provide isolated installation of Anaconda python.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if platform.mac_ver()[0]:
distribution = "macosx"
else:
distribution = "linux"
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
url = remotes["anaconda"] % ("MacOSX" if distribution == "macosx" else "Linux")
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"easy_install": os.path.join(bindir, "easy_install"),
"dir": anaconda_dir}
def _add_missing_inits(python_bin):
"""pip/setuptools strips __init__.py files with namespace declarations.
I have no idea why, but this adds them back.
"""
library_loc = subprocess.check_output("%s -c 'import pygraph.classes.graph; "
"print pygraph.classes.graph.__file__'" % python_bin,
shell=True)
pygraph_init = os.path.normpath(os.path.join(os.path.dirname(library_loc.strip()), os.pardir,
"__init__.py"))
if not os.path.exists(pygraph_init):
with open(pygraph_init, "w") as out_handle:
out_handle.write("__import__('pkg_resources').declare_namespace(__name__)\n")
def _cleanup_problem_files(venv_dir):
"""Remove problem bottle items in PATH which conflict with site-packages
"""
for cmd in ["bottle.py", "bottle.pyc"]:
bin_cmd = os.path.join(venv_dir, "bin", cmd)
if os.path.exists(bin_cmd):
os.remove(bin_cmd)
def install_tools(fab_cmd, fabfile, fabricrc):
"""Install 3rd party tools used by Gemini using a custom CloudBioLinux flavor.
"""
tools = ["tabix", "grabix", "samtools", "bedtools"]
flavor_dir = os.path.join(os.getcwd(), "gemini-flavor")
if not os.path.exists(flavor_dir):
os.makedirs(flavor_dir)
with open(os.path.join(flavor_dir, "main.yaml"), "w") as out_handle:
out_handle.write("packages:\n")
out_handle.write(" - bio_nextgen\n")
out_handle.write("libraries:\n")
with open(os.path.join(flavor_dir, "custom.yaml"), "w") as out_handle:
out_handle.write("bio_nextgen:\n")
for tool in tools:
out_handle.write(" - %s\n" % tool)
cmd = [fab_cmd, "-f", fabfile, "-H", "localhost", "-c", fabricrc,
"install_biolinux:target=custom,flavor=%s" % flavor_dir]
subprocess.check_call(cmd)
def install_data(python_cmd, data_script, args):
"""Install biological data used by gemini.
"""
data_dir = os.path.join(args.datadir, "gemini_data") if args.sharedpy else args.datadir
cmd = [python_cmd, data_script, data_dir]
if args.install_data:
print "Installing gemini data..."
else:
cmd.append("--nodata")
subprocess.check_call(cmd)
def install_testbase(datadir, repo, gemini):
"""Clone or update gemini code so we have the latest test suite.
"""
gemini_dir = os.path.join(datadir, "gemini")
cur_dir = os.getcwd()
needs_git = True
if os.path.exists(gemini_dir):
os.chdir(gemini_dir)
try:
subprocess.check_call(["git", "pull", "origin", "master", "--tags"])
needs_git = False
except:
os.chdir(cur_dir)
shutil.rmtree(gemini_dir)
if needs_git:
os.chdir(os.path.split(gemini_dir)[0])
subprocess.check_call(["git", "clone", repo])
os.chdir(gemini_dir)
_update_testdir_revision(gemini["cmd"])
os.chdir(cur_dir)
return os.path.join(gemini_dir, "master-test.sh")
def _update_testdir_revision(gemini_cmd):
"""Update test directory to be in sync with a tagged installed version or development.
"""
try:
p = subprocess.Popen([gemini_cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
gversion = p.communicate()[0].split()[1]
except:
gversion = ""
tag = ""
if gversion:
try:
p = subprocess.Popen("git tag -l | grep %s" % gversion, stdout=subprocess.PIPE, shell=True)
tag = p.communicate()[0].strip()
except:
tag = ""
if tag:
subprocess.check_call(["git", "checkout", "tags/%s" % tag])
pass
else:
subprocess.check_call(["git", "reset", "--hard", "HEAD"])
def write_fabricrc(base_file, tooldir, datadir, distribution, use_sudo):
out_file = os.path.join(os.getcwd(), os.path.basename(base_file))
with open(base_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("system_install"):
line = "system_install = %s\n" % tooldir
elif line.startswith("local_install"):
line = "local_install = %s/install\n" % tooldir
elif line.startswith("data_files"):
line = "data_files = %s\n" % datadir
elif line.startswith("distribution"):
line = "distribution = %s\n" % distribution
elif line.startswith("use_sudo"):
line = "use_sudo = %s\n" % use_sudo
elif line.startswith("edition"):
line = "edition = minimal\n"
elif line.startswith("#galaxy_home"):
line = "galaxy_home = %s\n" % os.path.join(datadir, "galaxy")
out_handle.write(line)
return out_file
def make_dirs(args):
sudo_cmd = ["sudo"] if args.sudo else []
for dname in [args.datadir, args.tooldir]:
if not os.path.exists(dname):
subprocess.check_call(sudo_cmd + ["mkdir", "-p", dname])
username = subprocess.check_output("echo $USER", shell=True).strip()
subprocess.check_call(sudo_cmd + ["chown", username, dname])
def get_cloudbiolinux(repo):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call(["git", "clone", repo])
return {"fabricrc": os.path.join(base_dir, "config", "fabricrc.txt"),
"tool_fabfile": os.path.join(base_dir, "fabfile.py")}
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print "Checking required dependencies..."
for cmd, url in [("git", "http://git-scm.com/"),
("wget", "http://www.gnu.org/software/wget/"),
("curl", "http://curl.haxx.se/")]:
try:
retcode = subprocess.call([cmd, "--version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
retcode = 127
if retcode == 127:
raise OSError("gemini requires %s (%s)" % (cmd, url))
else:
print " %s found" % cmd
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Automated installer for gemini framework.")
parser.add_argument("tooldir", help="Directory to install 3rd party software tools",
type=os.path.abspath)
parser.add_argument("datadir", help="Directory to install gemini data files",
type=os.path.abspath)
parser.add_argument("--gemini-version", dest="gemini_version", default="latest",
help="Install one specific gemini version with a fixed dependency chain.")
parser.add_argument("--nosudo", help="Specify we cannot use sudo for commands",
dest="sudo", action="store_false", default=True)
parser.add_argument("--notools", help="Do not install tool dependencies",
dest="install_tools", action="store_false", default=True)
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--sharedpy", help=("Indicate we share an Anaconda Python directory with "
"another project. Creates unique gemini data directory."),
action="store_true", default=False)
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args())
|
jjkester/django-auditlog | refs/heads/master | auditlog/models.py | 1 | import ast
import json
from dateutil import parser
from dateutil.tz import gettz
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldDoesNotExist
from django.db import models, DEFAULT_DB_ALIAS
from django.db.models import QuerySet, Q, Field
from django.utils import formats, timezone
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from jsonfield.fields import JSONField
class LogEntryManager(models.Manager):
"""
Custom manager for the :py:class:`LogEntry` model.
"""
def log_create(self, instance, **kwargs):
"""
Helper method to create a new log entry. This method automatically populates some fields when no explicit value
is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogEntry` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogEntry
"""
changes = kwargs.get('changes', None)
pk = self._get_pk_value(instance)
if changes is not None:
kwargs.setdefault('content_type', ContentType.objects.get_for_model(instance))
kwargs.setdefault('object_pk', pk)
kwargs.setdefault('object_repr', smart_str(instance))
if isinstance(pk, int):
kwargs.setdefault('object_id', pk)
get_additional_data = getattr(instance, 'get_additional_data', None)
if callable(get_additional_data):
kwargs.setdefault('additional_data', get_additional_data())
# Delete log entries with the same pk as a newly created model. This should only be necessary when an pk is
# used twice.
if kwargs.get('action', None) is LogEntry.Action.CREATE:
if kwargs.get('object_id', None) is not None and self.filter(content_type=kwargs.get('content_type'),
object_id=kwargs.get(
'object_id')).exists():
self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).delete()
else:
self.filter(content_type=kwargs.get('content_type'), object_pk=kwargs.get('object_pk', '')).delete()
# save LogEntry to same database instance is using
db = instance._state.db
return self.create(**kwargs) if db is None or db == '' else self.using(db).create(**kwargs)
return None
def get_for_object(self, instance):
"""
Get log entries for the specified model instance.
:param instance: The model instance to get log entries for.
:type instance: Model
:return: QuerySet of log entries for the given model instance.
:rtype: QuerySet
"""
# Return empty queryset if the given model instance is not a model instance.
if not isinstance(instance, models.Model):
return self.none()
content_type = ContentType.objects.get_for_model(instance.__class__)
pk = self._get_pk_value(instance)
if isinstance(pk, int):
return self.filter(content_type=content_type, object_id=pk)
else:
return self.filter(content_type=content_type, object_pk=smart_str(pk))
def get_for_objects(self, queryset):
"""
Get log entries for the objects in the specified queryset.
:param queryset: The queryset to get the log entries for.
:type queryset: QuerySet
:return: The LogEntry objects for the objects in the given queryset.
:rtype: QuerySet
"""
if not isinstance(queryset, QuerySet) or queryset.count() == 0:
return self.none()
content_type = ContentType.objects.get_for_model(queryset.model)
primary_keys = list(queryset.values_list(queryset.model._meta.pk.name, flat=True))
if isinstance(primary_keys[0], int):
return self.filter(content_type=content_type).filter(Q(object_id__in=primary_keys)).distinct()
elif isinstance(queryset.model._meta.pk, models.UUIDField):
primary_keys = [smart_str(pk) for pk in primary_keys]
return self.filter(content_type=content_type).filter(Q(object_pk__in=primary_keys)).distinct()
else:
return self.filter(content_type=content_type).filter(Q(object_pk__in=primary_keys)).distinct()
def get_for_model(self, model):
"""
Get log entries for all objects of a specified type.
:param model: The model to get log entries for.
:type model: class
:return: QuerySet of log entries for the given model.
:rtype: QuerySet
"""
# Return empty queryset if the given object is not valid.
if not issubclass(model, models.Model):
return self.none()
content_type = ContentType.objects.get_for_model(model)
return self.filter(content_type=content_type)
def _get_pk_value(self, instance):
"""
Get the primary key field value for a model instance.
:param instance: The model instance to get the primary key for.
:type instance: Model
:return: The primary key value of the given model instance.
"""
pk_field = instance._meta.pk.name
pk = getattr(instance, pk_field, None)
# Check to make sure that we got an pk not a model object.
if isinstance(pk, models.Model):
pk = self._get_pk_value(pk)
return pk
class LogEntry(models.Model):
"""
Represents an entry in the audit log. The content type is saved along with the textual and numeric (if available)
primary key, as well as the textual representation of the object when it was saved. It holds the action performed
and the fields that were changed in the transaction.
If AuditlogMiddleware is used, the actor will be set automatically. Keep in mind that editing / re-saving LogEntry
instances may set the actor to a wrong value - editing LogEntry instances is not recommended (and it should not be
necessary).
"""
class Action:
"""
The actions that Auditlog distinguishes: creating, updating and deleting objects. Viewing objects is not logged.
The values of the actions are numeric, a higher integer value means a more intrusive action. This may be useful
in some cases when comparing actions because the ``__lt``, ``__lte``, ``__gt``, ``__gte`` lookup filters can be
used in queries.
The valid actions are :py:attr:`Action.CREATE`, :py:attr:`Action.UPDATE` and :py:attr:`Action.DELETE`.
"""
CREATE = 0
UPDATE = 1
DELETE = 2
choices = (
(CREATE, _("create")),
(UPDATE, _("update")),
(DELETE, _("delete")),
)
content_type = models.ForeignKey(to='contenttypes.ContentType', on_delete=models.CASCADE, related_name='+',
verbose_name=_("content type"))
object_pk = models.CharField(db_index=True, max_length=255, verbose_name=_("object pk"))
object_id = models.BigIntegerField(blank=True, db_index=True, null=True, verbose_name=_("object id"))
object_repr = models.TextField(verbose_name=_("object representation"))
action = models.PositiveSmallIntegerField(choices=Action.choices, verbose_name=_("action"))
changes = models.TextField(blank=True, verbose_name=_("change message"))
actor = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True,
related_name='+', verbose_name=_("actor"))
remote_addr = models.GenericIPAddressField(blank=True, null=True, verbose_name=_("remote address"))
timestamp = models.DateTimeField(auto_now_add=True, verbose_name=_("timestamp"))
additional_data = JSONField(blank=True, null=True, verbose_name=_("additional data"))
objects = LogEntryManager()
class Meta:
get_latest_by = 'timestamp'
ordering = ['-timestamp']
verbose_name = _("log entry")
verbose_name_plural = _("log entries")
def __str__(self):
if self.action == self.Action.CREATE:
fstring = _("Created {repr:s}")
elif self.action == self.Action.UPDATE:
fstring = _("Updated {repr:s}")
elif self.action == self.Action.DELETE:
fstring = _("Deleted {repr:s}")
else:
fstring = _("Logged {repr:s}")
return fstring.format(repr=self.object_repr)
@property
def changes_dict(self):
"""
:return: The changes recorded in this log entry as a dictionary object.
"""
try:
return json.loads(self.changes)
except ValueError:
return {}
@property
def changes_str(self, colon=': ', arrow=' \u2192 ', separator='; '):
"""
Return the changes recorded in this log entry as a string. The formatting of the string can be customized by
setting alternate values for colon, arrow and separator. If the formatting is still not satisfying, please use
:py:func:`LogEntry.changes_dict` and format the string yourself.
:param colon: The string to place between the field name and the values.
:param arrow: The string to place between each old and new value.
:param separator: The string to place between each field.
:return: A readable string of the changes in this log entry.
"""
substrings = []
for field, values in self.changes_dict.items():
substring = '{field_name:s}{colon:s}{old:s}{arrow:s}{new:s}'.format(
field_name=field,
colon=colon,
old=values[0],
arrow=arrow,
new=values[1],
)
substrings.append(substring)
return separator.join(substrings)
@property
def changes_display_dict(self):
"""
:return: The changes recorded in this log entry intended for display to users as a dictionary object.
"""
# Get the model and model_fields
from auditlog.registry import auditlog
model = self.content_type.model_class()
model_fields = auditlog.get_model_fields(model._meta.model)
changes_display_dict = {}
# grab the changes_dict and iterate through
for field_name, values in self.changes_dict.items():
# try to get the field attribute on the model
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
changes_display_dict[field_name] = values
continue
values_display = []
# handle choices fields and Postgres ArrayField to get human readable version
choices_dict = None
if getattr(field, 'choices') and len(field.choices) > 0:
choices_dict = dict(field.choices)
if hasattr(field, 'base_field') and isinstance(field.base_field, Field) and getattr(field.base_field, 'choices') and len(field.base_field.choices) > 0:
choices_dict = dict(field.base_field.choices)
if choices_dict:
for value in values:
try:
value = ast.literal_eval(value)
if type(value) is [].__class__:
values_display.append(', '.join([choices_dict.get(val, 'None') for val in value]))
else:
values_display.append(choices_dict.get(value, 'None'))
except ValueError:
values_display.append(choices_dict.get(value, 'None'))
except:
values_display.append(choices_dict.get(value, 'None'))
else:
try:
field_type = field.get_internal_type()
except AttributeError:
# if the field is a relationship it has no internal type and exclude it
continue
for value in values:
# handle case where field is a datetime, date, or time type
if field_type in ["DateTimeField", "DateField", "TimeField"]:
try:
value = parser.parse(value)
if field_type == "DateField":
value = value.date()
elif field_type == "TimeField":
value = value.time()
elif field_type == "DateTimeField":
value = value.replace(tzinfo=timezone.utc)
value = value.astimezone(gettz(settings.TIME_ZONE))
value = formats.localize(value)
except ValueError:
pass
# check if length is longer than 140 and truncate with ellipsis
if len(value) > 140:
value = "{}...".format(value[:140])
values_display.append(value)
verbose_name = model_fields['mapping_fields'].get(field.name, getattr(field, 'verbose_name', field.name))
changes_display_dict[verbose_name] = values_display
return changes_display_dict
class AuditlogHistoryField(GenericRelation):
"""
A subclass of py:class:`django.contrib.contenttypes.fields.GenericRelation` that sets some default variables. This
makes it easier to access Auditlog's log entries, for example in templates.
By default this field will assume that your primary keys are numeric, simply because this is the most common case.
However, if you have a non-integer primary key, you can simply pass ``pk_indexable=False`` to the constructor, and
Auditlog will fall back to using a non-indexed text based field for this model.
Using this field will not automatically register the model for automatic logging. This is done so you can be more
flexible with how you use this field.
:param pk_indexable: Whether the primary key for this model is not an :py:class:`int` or :py:class:`long`.
:type pk_indexable: bool
:param delete_related: By default, including a generic relation into a model will cause all related objects to be
cascade-deleted when the parent object is deleted. Passing False to this overrides this behavior, retaining
the full auditlog history for the object. Defaults to True, because that's Django's default behavior.
:type delete_related: bool
"""
def __init__(self, pk_indexable=True, delete_related=True, **kwargs):
kwargs['to'] = LogEntry
if pk_indexable:
kwargs['object_id_field'] = 'object_id'
else:
kwargs['object_id_field'] = 'object_pk'
kwargs['content_type_field'] = 'content_type'
self.delete_related = delete_related
super(AuditlogHistoryField, self).__init__(**kwargs)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
if self.delete_related:
return super(AuditlogHistoryField, self).bulk_related_objects(objs, using)
# When deleting, Collector.collect() finds related objects using this
# method. However, because we don't want to delete these related
# objects, we simply return an empty list.
return []
# South compatibility for AuditlogHistoryField
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^auditlog\.models\.AuditlogHistoryField"])
raise DeprecationWarning("South support will be dropped in django-auditlog 0.4.0 or later.")
except ImportError:
pass
|
rschiang/shedskin | refs/heads/master | tests/141.py | 6 |
class renderobject:
def __init__(self, shader):
self.shader=shader
class plane(renderobject):
def __init__(self,plane,dist,shader):
renderobject.__init__(self,shader)
self.plane=plane
self.dist=dist
class sphere(renderobject):
def __init__(self, pos, radius, shader):
renderobject.__init__(self,shader)
self.pos=pos
self.radius=radius
class world:
def __init__(self):
self.objects = []
w = world()
w.objects.append(plane(6,7,8))
w.objects.append(sphere(6,7,8))
|
nowismytime/laughing-waffle | refs/heads/master | copy1.py | 1 | #!/usr/bin/env python
import nltk
def fileWrite (disjointSet, hm, gmatrix, hm1):
N = len(gmatrix[0])
fo = open ("test2.dot", "w")
ab1 =0
str1 = "Graph {"
fo.seek(0, ab1)
line = fo.write(str1)
ab1 += 1
str1 = "subgraph[style=invis];"
fo.seek(0, ab1)
line = fo.write(str1)
ab1 += 1
z=0
for i in range(len(disjointSet)):
keys = disjointSet[i].keys()
for key in keys:
if len(disjointSet[i][key])>1:
str1 = "subgraph cluster_"+z.__str__()+"{"
fo.seek(0, ab1)
line = fo.write(str1)
ab1 += 1
z+=1
newset = []
for element in disjointSet[i][key]:
newset.extend(element)
for a in range(len(newset)-1):
for b in range(a+1,len(newset)):
temp11 = newset[a]
x = hm[temp11]
temp11 = newset[b]
y = hm[temp11]
gmatrix[x][y] =0
str = " "+newset[a]+" -- "+newset[b]+";"
fo.seek(0, ab1)
line = fo.write(str)
ab1 += 1
str = "{"
fo.seek(0, ab1)
line = fo.write(str)
ab1 += 1
for i in range(N):
for j in range(N):
if gmatrix[i][j]==1:
str = " "+hm1[i]+" -- "+hm1[j]+";"
fo.seek(0, ab1)
line = fo.write(str)
ab1 += 1
str = "}"
fo.seek(0, ab1)
line = fo.write(str)
ab1 += 1
# floyd warshall algorithm
def flwa(gmatrix):
N = len(gmatrix[0])
dmatrix1 = []
for a in range(N):
temp = []
for b in range(N):
temp.extend([0])
dmatrix1.append(temp)
for a in range(N):
for b in range(N):
if (a!=b) & (gmatrix[a][b]==0):
dmatrix1[a][b]=9999
else:
dmatrix1[a][b]=gmatrix[a][b]
for k in range(N):
for i in range(N):
for j in range(N):
if dmatrix1[i][k]+dmatrix1[k][j] < dmatrix1[i][j]:
dmatrix1[i][j] = dmatrix1[i][k]+dmatrix1[k][j]
max1 =0
for c in range(N):
for d in range(N):
if (dmatrix1[c][d]>max1) & (dmatrix1[c][d]<9999):
max1 = dmatrix1[c][d];
max1 *= 2
for a in range(N):
for b in range(N):
if (a!=b) & (dmatrix1[a][b]==9999):
dmatrix1[a][b]=max1
return dmatrix1
# disjoint sets functions
# create sets for each element
def setCreate (disets, element):
nset = set()
nset.add(element)
hashmap={element: nset}
disets.append(hashmap)
# union two sets containing elements 1 and 2
def union (disets, element1, element2):
first_rep = setFind(disets, element1)
second_rep = setFind(disets, element2)
first_set = set()
second_set = set()
for index in range(len(disets)):
if first_rep in disets[index]:
first_set = disets[index][first_rep]
elif second_rep in disets[index]:
second_set = disets[index][second_rep]
if (len(first_set) != 0) & (len(second_set) != 0):
first_set=first_set.union(second_set)
for index in range(len(disets)):
if first_rep in disets[index]:
disets[index][first_rep] = first_set
for index in range(len(disets)):
if second_rep in disets[index]:
del disets[index][second_rep]
disets.remove(disets[index])
break
# find the representative for the set containing the element
def setFind (disets, element):
for index in range(len(disets)):
keys = disets[index].keys()
for key in keys:
if element in disets[index][key]:
return key
# clustering function
def agcluster (disets, geomatrix, words):
for i in range(len(words)):
setCreate(disets, words[i])
while len(disets)>10:
N = len(geomatrix[0])
imin = -1
jmin = -1
min = 9999
for i in range(N):
for j in range(N):
if (geomatrix[i][j] != 0) & (geomatrix[i][j]<min):
imin = i
jmin = j
min = geomatrix[i][j]
if (imin==-1):
break
first = words[imin]
second = words[jmin]
geomatrix[imin][jmin] = 9999
if setFind(disets, first) != setFind(disets, second):
union(disets, first, second)
#for i in range(len(disets)):
# keys = disets[i].keys()
# for key in keys:
# temp = disets[i][key]
# for t1 in temp:
# print (t1)
# print (" ")
# print ("\n")
return disets
# main function
hm1 = {}
hm2 = {}
finalwords = []
finalwords1 = []
# getting stopwords
with open("stopwords.txt") as f:
stopwords = [line.rstrip('\n') for line in open("stopwords.txt")]
# print(stopwords)
# getting input file
with open("ii.txt") as f:
for line in f:
words = nltk.word_tokenize(line)
finalwords.extend(nltk.pos_tag(words))
# print(finalwords)
# print (len(finalwords))
# removing stopwords
for index in range(len(finalwords)):
words = finalwords[index]
temp = words[0].lower()
if temp not in stopwords:
if (temp != ".") & (temp != ",") & (temp != "–") & (temp != ":"):
finalwords1.append(words)
# print (finalwords1)
# print (len(finalwords1))
# getting hashmaps
i = 0
for index in range(len(finalwords1)):
words = finalwords1[index]
temp = words[0].lower()
if temp not in hm1.keys():
hm1[temp] = i
hm2[i] = temp
i += 1
# generating adjacency matrix
gmatrix = []
for a in range(len(hm1)):
temp = []
for b in range(len(hm1)):
temp.extend([0])
gmatrix.append(temp)
for a in range(len(finalwords1)):
for b in range(a-10,a+10):
if(b >= 0) & (b < len(finalwords1)) & (b != a):
a1 = finalwords1[a][1]
a2 = finalwords1[b][1]
if a1 == a2:
row = hm1[finalwords1[a][0].lower()]
col = hm1[finalwords1[b][0].lower()]
gmatrix[row][col]=1
#print(gmatrix)
# getting geodesic matrix
dmatrix = flwa(gmatrix)
#print(dmatrix)
disets = []
dlist = agcluster(disets,dmatrix,hm2)
fileWrite(disets,hm1,gmatrix,hm2)
for index in range(len(dlist)):
print(dlist[index])
print("\n")
|
ActionAdam/osmc | refs/heads/master | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0b5.py | 253 | data = (
'dyil', # 0x00
'dyilg', # 0x01
'dyilm', # 0x02
'dyilb', # 0x03
'dyils', # 0x04
'dyilt', # 0x05
'dyilp', # 0x06
'dyilh', # 0x07
'dyim', # 0x08
'dyib', # 0x09
'dyibs', # 0x0a
'dyis', # 0x0b
'dyiss', # 0x0c
'dying', # 0x0d
'dyij', # 0x0e
'dyic', # 0x0f
'dyik', # 0x10
'dyit', # 0x11
'dyip', # 0x12
'dyih', # 0x13
'di', # 0x14
'dig', # 0x15
'digg', # 0x16
'digs', # 0x17
'din', # 0x18
'dinj', # 0x19
'dinh', # 0x1a
'did', # 0x1b
'dil', # 0x1c
'dilg', # 0x1d
'dilm', # 0x1e
'dilb', # 0x1f
'dils', # 0x20
'dilt', # 0x21
'dilp', # 0x22
'dilh', # 0x23
'dim', # 0x24
'dib', # 0x25
'dibs', # 0x26
'dis', # 0x27
'diss', # 0x28
'ding', # 0x29
'dij', # 0x2a
'dic', # 0x2b
'dik', # 0x2c
'dit', # 0x2d
'dip', # 0x2e
'dih', # 0x2f
'dda', # 0x30
'ddag', # 0x31
'ddagg', # 0x32
'ddags', # 0x33
'ddan', # 0x34
'ddanj', # 0x35
'ddanh', # 0x36
'ddad', # 0x37
'ddal', # 0x38
'ddalg', # 0x39
'ddalm', # 0x3a
'ddalb', # 0x3b
'ddals', # 0x3c
'ddalt', # 0x3d
'ddalp', # 0x3e
'ddalh', # 0x3f
'ddam', # 0x40
'ddab', # 0x41
'ddabs', # 0x42
'ddas', # 0x43
'ddass', # 0x44
'ddang', # 0x45
'ddaj', # 0x46
'ddac', # 0x47
'ddak', # 0x48
'ddat', # 0x49
'ddap', # 0x4a
'ddah', # 0x4b
'ddae', # 0x4c
'ddaeg', # 0x4d
'ddaegg', # 0x4e
'ddaegs', # 0x4f
'ddaen', # 0x50
'ddaenj', # 0x51
'ddaenh', # 0x52
'ddaed', # 0x53
'ddael', # 0x54
'ddaelg', # 0x55
'ddaelm', # 0x56
'ddaelb', # 0x57
'ddaels', # 0x58
'ddaelt', # 0x59
'ddaelp', # 0x5a
'ddaelh', # 0x5b
'ddaem', # 0x5c
'ddaeb', # 0x5d
'ddaebs', # 0x5e
'ddaes', # 0x5f
'ddaess', # 0x60
'ddaeng', # 0x61
'ddaej', # 0x62
'ddaec', # 0x63
'ddaek', # 0x64
'ddaet', # 0x65
'ddaep', # 0x66
'ddaeh', # 0x67
'ddya', # 0x68
'ddyag', # 0x69
'ddyagg', # 0x6a
'ddyags', # 0x6b
'ddyan', # 0x6c
'ddyanj', # 0x6d
'ddyanh', # 0x6e
'ddyad', # 0x6f
'ddyal', # 0x70
'ddyalg', # 0x71
'ddyalm', # 0x72
'ddyalb', # 0x73
'ddyals', # 0x74
'ddyalt', # 0x75
'ddyalp', # 0x76
'ddyalh', # 0x77
'ddyam', # 0x78
'ddyab', # 0x79
'ddyabs', # 0x7a
'ddyas', # 0x7b
'ddyass', # 0x7c
'ddyang', # 0x7d
'ddyaj', # 0x7e
'ddyac', # 0x7f
'ddyak', # 0x80
'ddyat', # 0x81
'ddyap', # 0x82
'ddyah', # 0x83
'ddyae', # 0x84
'ddyaeg', # 0x85
'ddyaegg', # 0x86
'ddyaegs', # 0x87
'ddyaen', # 0x88
'ddyaenj', # 0x89
'ddyaenh', # 0x8a
'ddyaed', # 0x8b
'ddyael', # 0x8c
'ddyaelg', # 0x8d
'ddyaelm', # 0x8e
'ddyaelb', # 0x8f
'ddyaels', # 0x90
'ddyaelt', # 0x91
'ddyaelp', # 0x92
'ddyaelh', # 0x93
'ddyaem', # 0x94
'ddyaeb', # 0x95
'ddyaebs', # 0x96
'ddyaes', # 0x97
'ddyaess', # 0x98
'ddyaeng', # 0x99
'ddyaej', # 0x9a
'ddyaec', # 0x9b
'ddyaek', # 0x9c
'ddyaet', # 0x9d
'ddyaep', # 0x9e
'ddyaeh', # 0x9f
'ddeo', # 0xa0
'ddeog', # 0xa1
'ddeogg', # 0xa2
'ddeogs', # 0xa3
'ddeon', # 0xa4
'ddeonj', # 0xa5
'ddeonh', # 0xa6
'ddeod', # 0xa7
'ddeol', # 0xa8
'ddeolg', # 0xa9
'ddeolm', # 0xaa
'ddeolb', # 0xab
'ddeols', # 0xac
'ddeolt', # 0xad
'ddeolp', # 0xae
'ddeolh', # 0xaf
'ddeom', # 0xb0
'ddeob', # 0xb1
'ddeobs', # 0xb2
'ddeos', # 0xb3
'ddeoss', # 0xb4
'ddeong', # 0xb5
'ddeoj', # 0xb6
'ddeoc', # 0xb7
'ddeok', # 0xb8
'ddeot', # 0xb9
'ddeop', # 0xba
'ddeoh', # 0xbb
'dde', # 0xbc
'ddeg', # 0xbd
'ddegg', # 0xbe
'ddegs', # 0xbf
'dden', # 0xc0
'ddenj', # 0xc1
'ddenh', # 0xc2
'dded', # 0xc3
'ddel', # 0xc4
'ddelg', # 0xc5
'ddelm', # 0xc6
'ddelb', # 0xc7
'ddels', # 0xc8
'ddelt', # 0xc9
'ddelp', # 0xca
'ddelh', # 0xcb
'ddem', # 0xcc
'ddeb', # 0xcd
'ddebs', # 0xce
'ddes', # 0xcf
'ddess', # 0xd0
'ddeng', # 0xd1
'ddej', # 0xd2
'ddec', # 0xd3
'ddek', # 0xd4
'ddet', # 0xd5
'ddep', # 0xd6
'ddeh', # 0xd7
'ddyeo', # 0xd8
'ddyeog', # 0xd9
'ddyeogg', # 0xda
'ddyeogs', # 0xdb
'ddyeon', # 0xdc
'ddyeonj', # 0xdd
'ddyeonh', # 0xde
'ddyeod', # 0xdf
'ddyeol', # 0xe0
'ddyeolg', # 0xe1
'ddyeolm', # 0xe2
'ddyeolb', # 0xe3
'ddyeols', # 0xe4
'ddyeolt', # 0xe5
'ddyeolp', # 0xe6
'ddyeolh', # 0xe7
'ddyeom', # 0xe8
'ddyeob', # 0xe9
'ddyeobs', # 0xea
'ddyeos', # 0xeb
'ddyeoss', # 0xec
'ddyeong', # 0xed
'ddyeoj', # 0xee
'ddyeoc', # 0xef
'ddyeok', # 0xf0
'ddyeot', # 0xf1
'ddyeop', # 0xf2
'ddyeoh', # 0xf3
'ddye', # 0xf4
'ddyeg', # 0xf5
'ddyegg', # 0xf6
'ddyegs', # 0xf7
'ddyen', # 0xf8
'ddyenj', # 0xf9
'ddyenh', # 0xfa
'ddyed', # 0xfb
'ddyel', # 0xfc
'ddyelg', # 0xfd
'ddyelm', # 0xfe
'ddyelb', # 0xff
)
|
Zac-HD/home-assistant | refs/heads/dev | homeassistant/helpers/entity_values.py | 28 | """A class to hold entity values."""
from collections import OrderedDict
import fnmatch
import re
from homeassistant.core import split_entity_id
class EntityValues(object):
"""Class to store entity id based values."""
def __init__(self, exact=None, domain=None, glob=None):
"""Initialize an EntityConfigDict."""
self._cache = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id):
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
|
moonlet/fuli | refs/heads/master | src/server/fuli_server.py | 1 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import sys
import json
import math
import pymongo
from flask import Flask, request
from flask import render_template
from flask.ext.bootstrap import Bootstrap
def add_python_path(path):
lib_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
if lib_path != sys.path[0]:
sys.path.insert(0, lib_path)
add_python_path('../libs')
import db
import config
app = Flask(__name__)
bootstrap = Bootstrap(app)
def get_pagination(page, total, range_num=3):
page_list = []
# left
for i in xrange(range_num, 0, -1):
cur_page = page - i
if cur_page > 0:
page_list.append(cur_page)
# current
page_list.append(page)
# right
for i in xrange(1, range_num + 1):
cur_page = page + i
if page < cur_page <= total:
page_list.append(cur_page)
prev_page = max(1, page - 1)
next_page = min(total, page + 1)
ret = {
'first_page': 1, 'last_page': total, 'page_list': page_list,
'prev_page': prev_page, 'next_page': next_page, 'cur_page': page,
}
return ret
@app.route('/page/<page>')
def page(page):
ln = 10
conf = config.get_config()
cdn_domain = conf.get('cdn', 'domain')
try:
page = int(page)
except ValueError:
page = 1
timeline = db.get_collection('timeline')
db_items = timeline.find(
skip=(page - 1) * ln,
limit=ln,
sort=[('date', pymongo.DESCENDING)],
)
items = []
for item in db_items:
try:
item['date'] = str(item['date'].strftime('%Y-%m-%d'))
except:
pass
item['_id'] = str(item['_id'])
item['cdn_path'] = os.path.join(cdn_domain, item['cdn_path'])
items.append(item)
# pagination
total = int(math.ceil(float(timeline.find().count()) / ln))
pagination = get_pagination(page, total)
return render_template(
'timeline.html',
items=items,
pagination=pagination,
)
@app.route('/')
def index():
return page(1)
if __name__ == '__main__':
app.run(debug=False, port=8001)
|
drawks/ansible | refs/heads/devel | lib/ansible/plugins/action/uri.py | 70 | # -*- coding: utf-8 -*-
# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
# (c) 2018, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
from ansible.module_utils._text import to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
self._supports_async = True
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
src = self._task.args.get('src', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)
try:
if (src and remote_src) or not src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
raise _AnsibleActionDone(result=self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val))
try:
src = self._find_needle('files', src)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
self._transfer_file(src, tmp_src)
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
)
)
result.update(self._execute_module('uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
gridsmash/gridsmash.github.io | refs/heads/master | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py | 806 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
'}"\) = "(.*)", "(.*)", "(.*)"$'))
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
arokem/scipy | refs/heads/master | scipy/special/tests/test_hypergeometric.py | 11 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_equal
import scipy.special as sc
class TestHyperu(object):
def test_negative_x(self):
a, b, x = np.meshgrid(
[-1, -0.5, 0, 0.5, 1],
[-1, -0.5, 0, 0.5, 1],
np.linspace(-100, -1, 10),
)
assert np.all(np.isnan(sc.hyperu(a, b, x)))
def test_special_cases(self):
assert sc.hyperu(0, 1, 1) == 1.0
@pytest.mark.parametrize('a', [0.5, 1, np.nan])
@pytest.mark.parametrize('b', [1, 2, np.nan])
@pytest.mark.parametrize('x', [0.25, 3, np.nan])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyperu(a, b, x)) == np.any(np.isnan([a, b, x]))
class TestHyp1f1(object):
@pytest.mark.parametrize('a, b, x', [
(np.nan, 1, 1),
(1, np.nan, 1),
(1, 1, np.nan)
])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyp1f1(a, b, x))
def test_poles(self):
assert_equal(sc.hyp1f1(1, [0, -1, -2, -3, -4], 0.5), np.infty)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 0.5, 0.5),
(1, 1, 0.5, 1.6487212707001281468),
(2, 1, 0.5, 2.4730819060501922203),
(1, 2, 0.5, 1.2974425414002562937),
(-10, 1, 0.5, -0.38937441413785204475)
])
def test_special_cases(self, a, b, x, result):
# Hit all the special case branches at the beginning of the
# function. Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(1, 1, 0.44, 1.5527072185113360455),
(-1, 1, 0.44, 0.55999999999999999778),
(100, 100, 0.89, 2.4351296512898745592),
(-100, 100, 0.89, 0.40739062490768104667),
(1.5, 100, 59.99, 3.8073513625965598107),
(-1.5, 100, 59.99, 0.25099240047125826943)
])
def test_geometric_convergence(self, a, b, x, result):
# Test the region where we are relying on the ratio of
#
# (|a| + 1) * |x| / |b|
#
# being small. Desired answers computed using Mpmath
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 1.5, -0.5),
(-10, 1, 1.5, 0.41801777430943080357),
(-25, 1, 1.5, 0.25114491646037839809),
(-50, 1, 1.5, -0.25683643975194756115),
(-51, 1, 1.5, -0.19843162753845452972)
])
def test_a_negative_integer(self, a, b, x, result):
# Desired answers computed using Mpmath. After -51 the
# relative error becomes unsatisfactory and we start returning
# NaN.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-9)
def test_gh_3492(self):
desired = 0.99973683897677527773 # Computed using Mpmath
assert_allclose(
sc.hyp1f1(0.01, 150, -4),
desired,
atol=0,
rtol=1e-15
)
def test_gh_3593(self):
desired = 1.0020033381011970966 # Computed using Mpmath
assert_allclose(
sc.hyp1f1(1, 5, 0.01),
desired,
atol=0,
rtol=1e-15
)
@pytest.mark.parametrize('a, b, x, desired', [
(-1, -2, 2, 2),
(-1, -4, 10, 3.5),
(-2, -2, 1, 2.5)
])
def test_gh_11099(self, a, b, x, desired):
# All desired results computed using Mpmath
assert sc.hyp1f1(a, b, x) == desired
|
CamelBackNotation/CarnotKE | refs/heads/master | jyhton/lib-python/2.7/lib2to3/tests/data/different_encoding.py | 295 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
print u'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ'
def f(x):
print '%s\t-> α(%2i):%s β(%s)'
|
fedora-infra/bodhi | refs/heads/develop | bodhi/tests/server/test_logging.py | 2 | # Copyright © 2019 Red Hat, Inc.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Test bodhi.server.logging."""
import logging
from unittest import mock
from bodhi.server import logging as bodhi_logging
test_log = logging.Logger(__name__)
class TestSetup:
"""Test the setup() function."""
@mock.patch(
'bodhi.server.logging.config.config',
{'pyramid.includes': 'some_plugin\npyramid_sawing\nsome_other_plugin',
'pyramid_sawing.file': '/some/file'})
@mock.patch('bodhi.server.logging.logging.config.dictConfig')
def test_with_sawing(self, dictConfig):
"""Test for when the user is using pyramid_sawing."""
with mock.patch('builtins.open',
mock.mock_open(read_data='some: data')) as mock_open:
bodhi_logging.setup()
mock_open.assert_called_once_with('/some/file')
dictConfig.assert_called_once_with({'some': 'data'})
@mock.patch.dict('bodhi.server.logging.config.config',
{'pyramid.includes': 'some_plugin\nsome_other_plugin'})
@mock.patch('bodhi.server.logging.config.get_configfile',
mock.MagicMock(return_value='/test/file'))
@mock.patch('bodhi.server.logging.paster.setup_logging')
def test_without_sawing(self, setup_logging):
"""Test for when the user is not using pyramid_sawing."""
bodhi_logging.setup()
setup_logging.assert_called_once_with('/test/file')
class TestRateLimiter:
"""
Test the RateLimiter class.
These tests were stolen from
https://github.com/fedora-infra/fedmsg-migration-tools/blob/0cafc8f5/fedmsg_migration_tools/tests/test_filters.py
"""
def test_filter_new_record(self):
"""Assert a new record is not limited."""
record = test_log.makeRecord(
"test_name", logging.INFO, "/my/file.py", 3, "beep boop", tuple(), None)
rate_filter = bodhi_logging.RateLimiter()
assert rate_filter.filter(record)
def test_filter_false(self):
"""Assert if the filename:lineno entry exists and is new, it's filtered out."""
record = test_log.makeRecord(
"test_name", logging.INFO, "/my/file.py", 3, "beep boop", tuple(), None)
rate_filter = bodhi_logging.RateLimiter(rate=2)
rate_filter._sent["/my/file.py:3"] = record.created - 1
assert not rate_filter.filter(record)
def test_rate_is_used(self):
"""Assert custom rates are respected."""
record = test_log.makeRecord(
"test_name", logging.INFO, "/my/file.py", 3, "beep boop", tuple(), None)
rate_filter = bodhi_logging.RateLimiter(rate=2)
rate_filter._sent["/my/file.py:3"] = record.created - 2
assert rate_filter.filter(record)
def test_rate_limited(self):
"""Assert the first call is allowed and the subsequent one is not."""
record = test_log.makeRecord(
"test_name", logging.INFO, "/my/file.py", 3, "beep boop", tuple(), None)
rate_filter = bodhi_logging.RateLimiter(rate=60)
assert rate_filter.filter(record)
assert not rate_filter.filter(record)
def test_different_lines(self):
"""Assert rate limiting is line-dependent."""
record1 = test_log.makeRecord(
"test_name", logging.INFO, "/my/file.py", 3, "beep boop", tuple(), None)
record2 = test_log.makeRecord(
"test_name", logging.INFO, "/my/file.py", 4, "beep boop", tuple(), None)
rate_filter = bodhi_logging.RateLimiter()
assert rate_filter.filter(record1)
assert rate_filter.filter(record2)
|
ossdemura/django-miniblog | refs/heads/dev | Lib/encodings/cp1252.py | 272 | """ Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
KarelJakubec/pip | refs/heads/develop | pip/_vendor/requests/packages/chardet/eucjpprober.py | 2918 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
vlegoff/tsunami | refs/heads/master | src/secondaires/rapport/commandes/rapport/assigner.py | 1 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'assigner' de la commande 'rapport'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmAssigner(Parametre):
"""Commande 'rapport assigner'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "assigner", "assign")
self.schema = "<nom_joueur> <nombre>"
self.groupe = "administrateur"
self.aide_courte = "assigne un rapport"
self.aide_longue = \
"Cette commande assigne le rapport à la personne précisée."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
id = dic_masques["nombre"].nombre
try:
rapport = importeur.rapport.rapports[id]
except KeyError:
personnage << "|err|Ce rapport n'existe pas.|ff|"
else:
joueur = dic_masques["nom_joueur"].joueur
if not joueur.est_immortel():
personnage << "|err|Vous ne pouvez assigner à un mortel.|ff|"
return
rapport.assigne_a = joueur
personnage << "|att|Le rapport #{} a été assigné à {}.|ff|".format(
id, joueur.nom)
|
mikf/gallery-dl | refs/heads/master | gallery_dl/extractor/hentaicosplays.py | 1 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://hentai-cosplays.com/
(also works for hentai-img.com and porn-images-xxx.com)"""
from .common import GalleryExtractor
from .. import text
class HentaicosplaysGalleryExtractor(GalleryExtractor):
"""Extractor for image galleries from
hentai-cosplays.com, hentai-img.com, and porn-images-xxx.com"""
category = "hentaicosplays"
directory_fmt = ("{site}", "{title}")
filename_fmt = "{filename}.{extension}"
archive_fmt = "{title}_{filename}"
pattern = r"((?:https?://)?(?:\w{2}\.)?" \
r"(hentai-cosplays|hentai-img|porn-images-xxx)\.com)/" \
r"(?:image|story)/([\w-]+)"
test = (
("https://hentai-cosplays.com/image/---devilism--tide-kurihara-/", {
"pattern": r"https://static\d?.hentai-cosplays.com/upload/"
r"\d+/\d+/\d+/\d+.jpg$",
"keyword": {
"count": 18,
"site": "hentai-cosplays",
"slug": "---devilism--tide-kurihara-",
"title": "艦 こ れ-devilism の tide Kurihara 憂",
},
}),
("https://fr.porn-images-xxx.com/image/enako-enako-24/", {
"pattern": r"https://static\d?.porn-images-xxx.com/upload/"
r"\d+/\d+/\d+/\d+.jpg$",
"keyword": {
"count": 11,
"site": "porn-images-xxx",
"title": str,
},
}),
("https://ja.hentai-img.com/image/hollow-cora-502/", {
"pattern": r"https://static\d?.hentai-img.com/upload/"
r"\d+/\d+/\d+/\d+.jpg$",
"keyword": {
"count": 2,
"site": "hentai-img",
"title": str,
},
}),
)
def __init__(self, match):
root, self.site, self.slug = match.groups()
self.root = text.ensure_http_scheme(root)
url = "{}/story/{}/".format(self.root, self.slug)
GalleryExtractor.__init__(self, match, url)
def metadata(self, page):
title = text.extract(page, "<title>", "</title>")[0]
return {
"title": text.unescape(title.rpartition(" Story Viewer - ")[0]),
"slug" : self.slug,
"site" : self.site,
}
def images(self, page):
return [
(url, None)
for url in text.extract_iter(
page, '<amp-img class="auto-style" src="', '"')
]
|
esperantoph/informsistemo | refs/heads/master | informsistemo/users/tests/__init__.py | 12133432 | |
hunch/hunch-gift-app | refs/heads/master | django/contrib/localflavor/sk/__init__.py | 12133432 | |
moises1234/PlayPeliculas-Django-Backbone | refs/heads/master | play_peliculasv01/__init__.py | 12133432 | |
NelisVerhoef/scikit-learn | refs/heads/master | sklearn/neural_network/__init__.py | 257 | """
The :mod:`sklearn.neural_network` module includes models based on neural
networks.
"""
from .rbm import BernoulliRBM
__all__ = ['BernoulliRBM']
|
WQuanfeng/wagtail | refs/heads/master | wagtail/wagtailembeds/migrations/0001_initial.py | 34 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Embed',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('url', models.URLField()),
('max_width', models.SmallIntegerField(null=True, blank=True)),
('type', models.CharField(max_length=10, choices=[('video', 'Video'), ('photo', 'Photo'), ('link', 'Link'), ('rich', 'Rich')])),
('html', models.TextField(blank=True)),
('title', models.TextField(blank=True)),
('author_name', models.TextField(blank=True)),
('provider_name', models.TextField(blank=True)),
('thumbnail_url', models.URLField(null=True, blank=True)),
('width', models.IntegerField(null=True, blank=True)),
('height', models.IntegerField(null=True, blank=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='embed',
unique_together=set([('url', 'max_width')]),
),
]
|
aptana/Pydev | refs/heads/development | tests/org.python.pydev.tests/src/pysrc/extendable/calltips/mod1/__init__.py | 11 | from sub1 import method1 |
dudymas/python-openstacksdk | refs/heads/master | openstack/tests/unit/message/test_version.py | 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.message import version
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'id': IDENTIFIER,
'links': '2',
'status': '3',
}
class TestVersion(testtools.TestCase):
def test_basic(self):
sot = version.Version()
self.assertEqual('version', sot.resource_key)
self.assertEqual('versions', sot.resources_key)
self.assertEqual('/', sot.base_path)
self.assertEqual('messaging', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_retrieve)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = version.Version(EXAMPLE)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['status'], sot.status)
|
bobwalker99/Pydev | refs/heads/master | plugins/org.python.pydev.jython/Lib/email/Charset.py | 180 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'iso-8859-16': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10':'iso-8859-16',
'latin-10':'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
# Set the input charset after filtering through the aliases and/or codecs
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
|
tkf/comparatist | refs/heads/master | python/comparatist/utils/importer.py | 2 | def import_object(name, current_module=None):
"""
Import an object at "dotted path".
>>> from importlib import import_module
>>> import_object('importlib.import_module') is import_module
True
Any object such as package, module, sub-module, function and class
can be imported:
>>> import_object('json') # doctest: +ELLIPSIS
<module 'json' from '...'>
>>> import_object('json.load') # doctest: +ELLIPSIS
<function load at ...>
>>> import_object('os.path') # doctest: +ELLIPSIS
<module ...>
>>> import_object('os.path.join') # doctest: +ELLIPSIS
<function ...join at ...>
To support relative import, `current_module` has to be provided.
Typically, it's ``__name__``.::
import_object('spam.egg', __name__)
"""
if '.' not in name:
return __import__(name)
else:
if name.startswith('.'):
name = current_module.rsplit('.', 1)[0] + name
(modpath, objname) = name.rsplit('.', 1)
module = __import__(modpath, fromlist=[objname])
return getattr(module, objname)
|
ZhaoCJ/django | refs/heads/master | django/contrib/gis/db/backends/spatialite/adapter.py | 624 | from django.db.backends.sqlite3.base import Database
from django.contrib.gis.db.backends.adapter import WKTAdapter
class SpatiaLiteAdapter(WKTAdapter):
"SQLite adaptor for geometry objects."
def __conform__(self, protocol):
if protocol is Database.PrepareProtocol:
return str(self)
|
Ebag333/Pyfa | refs/heads/master | eos/effects/rigdrawbackreductionprojectile.py | 2 | # rigDrawbackReductionProjectile
#
# Used by:
# Skill: Projectile Weapon Rigging
type = "passive"
def handler(fit, src, context):
lvl = src.level
fit.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Rig Projectile Weapon", "drawback",
src.getModifiedItemAttr("rigDrawbackBonus") * lvl)
|
Be-ing/lorax | refs/heads/master | src/pylorax/buildstamp.py | 6 | #
# buildstamp.py
#
# Copyright (C) 2010-2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
#
import logging
logger = logging.getLogger("pylorax.buildstamp")
import datetime
class BuildStamp(object):
def __init__(self, product, version, bugurl, isfinal, buildarch):
self.product = product
self.version = version
self.bugurl = bugurl
self.isfinal = isfinal
now = datetime.datetime.now()
now = now.strftime("%Y%m%d%H%M")
self.uuid = "{0}.{1}".format(now, buildarch)
def write(self, outfile):
# get lorax version
try:
import pylorax.version
except ImportError:
vernum = "devel"
else:
vernum = pylorax.version.num
logger.info("writing .buildstamp file")
with open(outfile, "w") as fobj:
fobj.write("[Main]\n")
fobj.write("Product={0.product}\n".format(self))
fobj.write("Version={0.version}\n".format(self))
fobj.write("BugURL={0.bugurl}\n".format(self))
fobj.write("IsFinal={0.isfinal}\n".format(self))
fobj.write("UUID={0.uuid}\n".format(self))
fobj.write("[Compose]\n")
fobj.write("Lorax={0}\n".format(vernum))
|
SergiuTudos/Selenium2Library | refs/heads/master | src/Selenium2Library/utils/events/event.py | 2 | import abc
class Event(object):
@abc.abstractmethod
def trigger(self, *args, **kwargs):
pass
|
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/object/tangible/wearables/ring/item_ring_set_smuggler_utility_b_01_01.py | 2 | import sys
def setup(core, object):
object.setAttachment('radial_filename', 'ring/unity')
object.setAttachment('objType', 'ring')
object.setStfFilename('static_item_n')
object.setStfName('item_ring_set_smuggler_utility_b_01_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_ring_set_smuggler_utility_b_01_01')
object.setStringAttribute('class_required', 'Smuggler')
object.setIntAttribute('required_combat_level', 85)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_cooldown_line_sm_false_hope', 2)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:luck_modified', 30)
object.setStringAttribute('@set_bonus:piece_bonus_count_3', '@set_bonus:set_bonus_smuggler_utility_b_1')
object.setStringAttribute('@set_bonus:piece_bonus_count_4', '@set_bonus:set_bonus_smuggler_utility_b_2')
object.setStringAttribute('@set_bonus:piece_bonus_count_5', '@set_bonus:set_bonus_smuggler_utility_b_3')
object.setAttachment('setBonus', 'set_smuggler_utility_b')
return
|
amir-qayyum-khan/edx-platform | refs/heads/master | common/test/acceptance/__init__.py | 12133432 | |
qma/pants | refs/heads/master | contrib/node/tests/python/pants_test/contrib/node/__init__.py | 12133432 | |
cheynepierce/Force.com-Metadata-Client | refs/heads/master | api/__init__.py | 12133432 | |
dogukantufekci/workplace_saas | refs/heads/master | workplace_saas/workplace_saas/views.py | 2 | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(('GET',))
def workplace(request, format=None):
return Response({
'users': reverse('users:users', request=request, format=format),
'people': reverse('people:people', request=request, format=format),
}) |
rubenIzquierdo/opinion_miner_deluxePP | refs/heads/master | tag_file.py | 1 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
from extract_features_expression import main as expression_feature_extractor
from extract_features_target import main as target_feature_extractor
from extract_features_holder import main as holder_feature_extractor
from extract_sequences import extract_sequences
import match_entities_by_distance as entity_matcher
from subprocess import Popen, PIPE
from path_crf import PATH_TO_CRF_TEST
from KafNafParserPy import *
from polarity_classifier import PolarityClassifier
__desc = 'Opinion Miner Deluxe'
__last_edited = '7jan2016'
__version = '3.0'
__here__ = os.path.realpath(os.path.dirname(__file__))
def add_opinions(opinion_triples,kaf_naf_obj):
term_id_for_token_id = {}
for term in kaf_naf_obj.get_terms():
for token_id in term.get_span().get_span_ids():
term_id_for_token_id[token_id] = term.get_id()
opinion_ids_used = set()
for opinion in kaf_naf_obj.get_opinions():
opinion_ids_used.add(opinion.get_id())
#Adding linguistic processor
my_lp = Clp()
my_lp.set_name(__desc)
my_lp.set_version(__last_edited+'_'+__version)
my_lp.set_timestamp() ##Set to the current date and time
kaf_naf_obj.add_linguistic_processor('opinions',my_lp)
num_opinion = 0
for E, T, H in opinion_triples:
E_term_ids = [term_id_for_token_id[tokenid] for tokenid in E.token_id_list if tokenid in term_id_for_token_id]
if T is None:
T_term_ids = []
else:
T_term_ids = [term_id_for_token_id[tokenid] for tokenid in T.token_id_list if tokenid in term_id_for_token_id]
if H is None:
H_term_ids =[]
else:
H_term_ids = [term_id_for_token_id[tokenid] for tokenid in H.token_id_list if tokenid in term_id_for_token_id]
new_id = None
while True:
new_id = 'o'+str(num_opinion+1)
if new_id not in opinion_ids_used:
opinion_ids_used.add(new_id)
break
else:
num_opinion += 1
new_opinion = Copinion(type=kaf_naf_obj.get_type())
new_opinion.set_id(new_id)
#Create the holder
if len(H_term_ids) != 0:
span_hol = Cspan()
span_hol.create_from_ids(H_term_ids)
my_hol = Cholder()
my_hol.set_span(span_hol)
hol_text = ' '.join(H.word_list)
my_hol.set_comment(hol_text)
new_opinion.set_holder(my_hol)
#Creating target
if len(T_term_ids) != 0:
span_tar = Cspan()
span_tar.create_from_ids(T_term_ids)
my_tar = opinion_data.Ctarget()
my_tar.set_span(span_tar)
tar_text = ' '.join(T.word_list)
my_tar.set_comment(tar_text)
new_opinion.set_target(my_tar)
#########################
##Creating expression
span_exp = Cspan()
span_exp.create_from_ids(E_term_ids)
my_exp = Cexpression()
my_exp.set_span(span_exp)
my_exp.set_polarity('DSE')
#if include_polarity_strength:
my_exp.set_strength("1")
exp_text = ' '.join(E.word_list)
my_exp.set_comment(exp_text)
new_opinion.set_expression(my_exp)
kaf_naf_obj.add_opinion(new_opinion)
#########################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Detects opinions in KAF/NAF files', epilog='Example of use: cat example.naf | %(prog)s -d hotel')
parser.add_argument('--version', action='version', version='%(prog)s __version')
#parser.add_argument('-f',dest='input_file', required=True,help='Input KAF/NAF file')
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('-d', dest='domain', help='Domain for the model (hotel,news)')
input_group.add_argument('-f', dest='path_to_folder', help='Path to a folder containing the model')
parser.add_argument('-log',dest='log',action='store_true',help='Show log information')
parser.add_argument('-polarity', dest='polarity', action='store_true', help='Run the polarity (positive/negative) classifier too')
parser.add_argument('-keep-opinions',dest='keep_opinions',action='store_true',help='Keep the opinions from the input (by default will be deleted)')
if len(sys.argv) == 1:
#To print by default the help, in case
sys.argv.append('-h')
args = parser.parse_args(sys.argv[1:])
if sys.stdin.isatty():
print('Input stream required', file=sys.stderr)
print('Example usage: cat my_file.naf | %s' % sys.argv[0], file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(-1)
if args.log:
print('Path to CRF TEST: %s' % PATH_TO_CRF_TEST, file=sys.stderr)
kaf_naf_obj = KafNafParser(sys.stdin)
if not args.keep_opinions:
kaf_naf_obj.remove_opinion_layer()
# We need to set this manually because the identifier for CRF will be the concatenation
# of the filename and the token id, and it's a <File> object if we create the kaf_naf_obj
# from a open stream
kaf_naf_obj.filename = 'stdin'
language = kaf_naf_obj.get_language()
if args.log:
print('Language in the file: %s' % language, file=sys.stderr)
model_folder=None
if args.domain:
model_folder='models/models_%s_%s' % (args.domain,language)
else:
model_folder=args.path_to_folder
if args.log:
print('Model folder: %s' % model_folder, file=sys.stderr)
if not os.path.exists(model_folder):
print('There are no models for the domain %s' % args.domain, file=sys.stderr)
print(' Model folder should be: %s' % model_folder, file=sys.stderr)
sys.exit(-1)
#########################################
######## BEGIN EXPRESSION PART ####
#########################################
# 1) CALL TO THE FEATURE EXTRACTOR FOR EXPRESSIONS
#feature_file = expression_feature_extractor(inputfile,'tag', model_folder)
feature_file = expression_feature_extractor(kaf_naf_obj,'tag', model_folder, log=args.log)
# 2) CALL TO THE MODEL
expression_tagger_cmd = []
expression_tagger_cmd.append(PATH_TO_CRF_TEST)
expression_tagger_cmd.append('-m')
expression_tagger_cmd.append(model_folder+'/model.expression')
expression_tagger_cmd.append(feature_file)
expression_tagger = Popen(' '.join(expression_tagger_cmd), shell=True, stdout=PIPE, stderr=PIPE)
expression_out, expression_error = expression_tagger.communicate()
#This variable stores a list of lines with the CRF output
expression_out_lines = expression_out.splitlines()
######
# The expression sequences detected:
#[(['example_en.naf#w4'], ['nice']),
# (['example_en.naf#w9', 'example_en.naf#w10', 'example_en.naf#w11'], ['the', 'best', '!!'])]
expression_sequences = extract_sequences(expression_out_lines,'DSE')
#########################################
######## END EXPRESSION PART ####
#########################################
#########################################
######## BEGIN TARGET PART ####
#########################################
target_features_file = target_feature_extractor(kaf_naf_obj,'tag',model_folder,detected_dse=expression_sequences, log=args.log)
target_tagger_cmd = []
target_tagger_cmd.append(PATH_TO_CRF_TEST)
target_tagger_cmd.append('-m')
target_tagger_cmd.append(model_folder+'/model.target')
target_tagger_cmd.append(target_features_file)
target_tagger = Popen(' '.join(target_tagger_cmd), shell=True, stdout=PIPE, stderr=PIPE)
target_out, target_error = target_tagger.communicate()
#This variable stores a list of lines with the CRF output
target_out_lines = target_out.splitlines()
target_sequences = extract_sequences(target_out_lines,'TARGET')
#########################################
######## END TARGET PART ####
#########################################
#########################################
######## BEGIN HOLDER PART ####
#########################################
holder_features_file = holder_feature_extractor(kaf_naf_obj,'tag', model_folder, detected_dse=expression_sequences, log=args.log)
holder_tagger_cmd = []
holder_tagger_cmd.append(PATH_TO_CRF_TEST)
holder_tagger_cmd.append('-m')
holder_tagger_cmd.append(model_folder+'/model.holder')
holder_tagger_cmd.append(holder_features_file)
holder_tagger = Popen(' '.join(holder_tagger_cmd), shell=True, stdout=PIPE, stderr=PIPE)
holder_out, holder_error = holder_tagger.communicate()
#This variable stores a list of lines with the CRF output
holder_out_lines = holder_out.splitlines()
holder_sequences = extract_sequences(holder_out_lines,'HOLDER')
#########################################
######## END HOLDER PART ####
#########################################
#################################################
######## EXPRESSION/TARGET PART ####
#################################################
expression_entities = []
num_exp = 0
for list_ids, list_words in expression_sequences:
exp_entity = entity_matcher.Centity()
#ids contain filename
ids_with_no_filename = []
for this_id in list_ids:
p = this_id.rfind('#')
ids_with_no_filename.append(this_id[p+1:])
filename = this_id[:p]
exp_entity.create('exp#%d' % num_exp, 'DSE', filename, ids_with_no_filename, list_words)
expression_entities.append(exp_entity)
num_exp+=1
target_entities = []
num_tar = 0
for list_ids, list_words in target_sequences:
tar_entity = entity_matcher.Centity()
#ids contain filename
ids_with_no_filename = []
for this_id in list_ids:
p = this_id.rfind('#')
ids_with_no_filename.append(this_id[p+1:])
filename = this_id[:p]
tar_entity.create('tar#%d' % num_tar, 'TARGET', filename, ids_with_no_filename, list_words)
target_entities.append(tar_entity)
num_tar+=1
#We set target fixed and one expression is selected for every target
#matched_exp_tar = entity_matcher.match_entities(expression_entities,target_entities)
#We set expressions fixed
matched_tar_exp = entity_matcher.match_entities(target_entities, expression_entities, kaf_naf_obj)
holder_entities = []
num_hol = 0
for list_ids, list_words in holder_sequences:
hol_entity = entity_matcher.Centity()
#ids contain filename
ids_with_no_filename = []
for this_id in list_ids:
p = this_id.rfind('#')
ids_with_no_filename.append(this_id[p+1:])
filename = this_id[:p]
hol_entity.create('hol#%d' % num_tar, 'HOLDER', filename, ids_with_no_filename, list_words)
holder_entities.append(hol_entity)
num_hol+=1
matched_hol_exp = entity_matcher.match_entities(holder_entities, expression_entities, kaf_naf_obj)
###CREATE THE FINAL TRIPLES
final_triples = []
for expression in expression_entities:
selected_target = None
selected_holder = None
for this_target, this_exp in matched_tar_exp:
if expression.id == this_exp.id:
selected_target = this_target
break
for this_holder, this_exp in matched_hol_exp:
if expression.id == this_exp.id:
selected_holder = this_holder
break
final_triples.append((expression, selected_target, selected_holder))
if args.log:
print('FOUND ENTITIES', file=sys.stderr)
print(' Expressions', file=sys.stderr)
for list_ids, list_words in expression_sequences:
print(' ==>', ' '.join(list_words), str(list_ids), file=sys.stderr)
print(' Targets', file=sys.stderr)
for list_ids, list_words in target_sequences:
print(' ==>', ' '.join(list_words), str(list_ids), file=sys.stderr)
print(' Holders', file=sys.stderr)
for list_ids, list_words in holder_sequences:
print(' ==>', ' '.join(list_words), str(list_ids), file=sys.stderr)
print(file=sys.stderr)
print(file=sys.stderr)
print(' Complete opinions', file=sys.stderr)
for e, t, h in final_triples:
print(' ==>', file=sys.stderr)
print(' Expression:', e.to_line(), file=sys.stderr)
if t is None:
print(' Target: NONE', file=sys.stderr)
else:
print(' Target:', t.to_line(), file=sys.stderr)
if h is None:
print(' Holder: NONE', file=sys.stderr)
else:
print(' Holder:', h.to_line(), file=sys.stderr)
#Remove feature_file feature_file
#Remove also the target file target_features_file
os.remove(feature_file)
os.remove(target_features_file)
os.remove(holder_features_file)
## CREATE THE KAF/NAF OPINIONS
add_opinions(final_triples,kaf_naf_obj)
if args.polarity:
my_polarity_classifier = PolarityClassifier(language)
my_polarity_classifier.load_models(os.path.join(__here__,'polarity_models',language))
my_polarity_classifier.classify_kaf_naf_object(kaf_naf_obj)
kaf_naf_obj.dump()
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/treemap/marker/colorbar/_xanchor.py | 1 | import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="treemap.marker.colorbar", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs
)
|
dkiefner/skat-buddy | refs/heads/master | skat-buddy/game/state/game_state_bid.py | 1 | from exceptions import InvalidCardSize, InvalidPlayerMove
from game.game_state_machine import GameState, PlayerAction
from game.state.game_state_play import GameStatePlay
from model.player import Player
# ------------------------------------------------------------
# Concrete game state class for bidding
# ------------------------------------------------------------
# TODO actions to make additional declarations like Schneider, Ouvert, etc.
class GameStateBid(GameState):
available_bid_values = [18, 20, 22, 23, 24, 27, 30, 33, 35, 36, 40, 44, 45, 46, 48, 50, 54, 55, 59, 60, 63, 66, 70,
72, 77, 80, 84, 88, 90, 96, 99, 100, 108, 110, 117, 120, 121, 126, 130, 132, 135, 140, 143,
144, 150, 153, 154, 156, 160, 162, 165, 168, 170, 176, 180, 187, 192, 198, 204, 216, 240,
264]
def __init__(self, game):
super().__init__(game)
self.current_bid_state = BidStateCallTurn(game, self.bid_state_finished_handler)
@staticmethod
def bid_pass(game, player):
GameStateBid.check_player_has_passed(game, player)
player.type = Player.Type.DEFENDER
game.passed_bid_players.append(player)
@staticmethod
def check_player_has_passed(game, player):
if player in game.passed_bid_players:
raise InvalidPlayerMove("Player " + player.name + " has already passed")
def handle_action(self, action):
if not isinstance(self.current_bid_state, BidStateEnd):
self.current_bid_state.handle_action(action)
elif isinstance(action, PickUpSkatAction):
self.pick_up_skat(action.player)
elif isinstance(action, PutDownSkatAction):
self.put_down_skat(action.player, action.cards_to_put)
elif isinstance(action, DeclareGameVariantAction):
self.declare_game(action.player, action.game_variant)
else:
super().handle_action(action)
def bid_state_finished_handler(self):
self.current_bid_state = self.current_bid_state.get_next_state()
self.current_bid_state.state_finished_handler = self.bid_state_finished_handler
def pick_up_skat(self, player):
if player.type is not Player.Type.DECLARER:
raise InvalidPlayerMove("Player " + player.name + " is not declarer, so he cannot pick up the Skat")
player.cards.extend(self.game.skat)
self.game.skat.clear()
def put_down_skat(self, player, cards_to_put):
if player.type is not Player.Type.DECLARER:
raise InvalidPlayerMove("Player " + player.name + " is not declarer, so he cannot put something into Skat")
elif len(cards_to_put) is not 2:
raise InvalidCardSize("Player has to put down exactly 2 cards not " + str(len(cards_to_put)))
self.game.skat.extend(cards_to_put)
[player.cards.remove(card) for card in cards_to_put]
def declare_game(self, player, game_variant):
if player.type is not Player.Type.DECLARER:
raise InvalidPlayerMove("Player " + player.name + " is not declarer, so he cannot declare the game variant")
# set game variant for this game
self.game.game_variant = game_variant
# start next state
self.handle_state_finished()
def get_next_state(self):
return GameStatePlay(self.game)
# ------------------------------------------------------------
# Sub states for bidding
# ------------------------------------------------------------
class BidStateCallTurn(GameState):
def handle_action(self, action):
if isinstance(action, BidCallAction):
self.bid_call(action.player, action.value)
self.handle_state_finished()
elif isinstance(action, BidPassAction):
GameStateBid.bid_pass(self.game, action.player)
self.handle_state_finished()
else:
super().handle_action(action)
def bid_call(self, player, value):
GameStateBid.check_player_has_passed(self.game, player)
if value not in GameStateBid.available_bid_values:
raise InvalidPlayerMove(
"Player " + player.name + " cannot bid " + str(value) + ". This bid is not available")
self.game.bid_value = value
def get_next_state(self):
if len(self.game.passed_bid_players) == 2:
return BidStateEnd(self.game)
else:
return BidStateResponseTurn(self.game)
class BidStateResponseTurn(GameState):
def handle_action(self, action):
if isinstance(action, BidAcceptAction):
self.bid_accept(action.player, action.value)
self.handle_state_finished()
elif isinstance(action, BidPassAction):
GameStateBid.bid_pass(self.game, action.player)
self.handle_state_finished()
else:
super().handle_action(action)
def bid_accept(self, player, value):
GameStateBid.check_player_has_passed(self.game, player)
if value is not self.game.bid_value:
raise InvalidPlayerMove("Player " + player.name + " cannot accept bid of "
+ str(value) + ". Current bid is " + str(self.game.bid_value))
def get_next_state(self):
if len(self.game.passed_bid_players) == 2:
return BidStateEnd(self.game)
else:
return BidStateCallTurn(self.game)
class BidStateEnd(GameState):
def __init__(self, game):
super().__init__(game)
self.set_declarer()
def set_declarer(self):
if self.game.get_first_seat() not in self.game.passed_bid_players:
declarer = self.game.get_first_seat()
elif self.game.get_second_seat() not in self.game.passed_bid_players:
declarer = self.game.get_second_seat()
else:
declarer = self.game.get_third_seat()
declarer.type = Player.Type.DECLARER
def handle_action(self, action):
super().handle_action(action)
def get_next_state(self):
return None
# ------------------------------------------------------------
# Concrete action classes
# ------------------------------------------------------------
class BidAction(PlayerAction):
def __init__(self, player, value):
super().__init__(player)
self.value = value
class BidCallAction(BidAction):
def __init__(self, player, value):
super().__init__(player, value)
class BidAcceptAction(BidAction):
def __init__(self, player, value):
super().__init__(player, value)
class BidPassAction(BidAction):
def __init__(self, player, value):
super().__init__(player, value)
class PickUpSkatAction(PlayerAction):
def __init__(self, player):
super().__init__(player)
class PutDownSkatAction(PlayerAction):
def __init__(self, player, cards_to_put):
super().__init__(player)
self.cards_to_put = cards_to_put
class DeclareGameVariantAction(PlayerAction):
def __init__(self, player, game_variant):
super().__init__(player)
self.game_variant = game_variant
|
cliqz/socorro | refs/heads/master | socorro/external/es/crashstorage.py | 8 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import elasticsearch
from socorro.external.crashstorage_base import CrashStorageBase
from socorro.external.es.index_creator import IndexCreator
from socorro.lib import datetimeutil
from configman import Namespace
from configman.converters import class_converter
#==============================================================================
class ESCrashStorage(CrashStorageBase):
"""This sends processed crash reports to Elasticsearch."""
required_config = Namespace()
required_config.add_option(
'transaction_executor_class',
default="socorro.database.transaction_executor."
"TransactionExecutorWithLimitedBackoff",
doc='a class that will manage transactions',
from_string_converter=class_converter,
)
required_config.elasticsearch = Namespace()
required_config.elasticsearch.add_option(
'elasticsearch_class',
default='socorro.external.es.connection_context.ConnectionContext',
from_string_converter=class_converter,
reference_value_from='resource.elasticsearch',
)
# This cache reduces attempts to create indices, thus lowering overhead
# each time a document is indexed.
indices_cache = set()
#--------------------------------------------------------------------------
def __init__(self, config, quit_check_callback=None):
"""Init, you know.
"""
super(ESCrashStorage, self).__init__(
config,
quit_check_callback
)
# Ok, it's sane, so let's continue.
self.es_context = self.config.elasticsearch.elasticsearch_class(
config=self.config.elasticsearch
)
self.transaction = config.transaction_executor_class(
config,
self.es_context,
quit_check_callback
)
#--------------------------------------------------------------------------
def get_index_for_crash(self, crash_date):
"""Return the submission URL for a crash; based on the submission URL
from config and the date of the crash.
If the index name contains a datetime pattern (ex. %Y%m%d) then the
crash_date will be parsed and appended to the index name.
"""
index = self.config.elasticsearch.elasticsearch_index
if not index:
return None
elif '%' in index:
# Note that crash_date must be a datetime object!
index = crash_date.strftime(index)
return index
#--------------------------------------------------------------------------
def save_raw_and_processed(self, raw_crash, dumps, processed_crash,
crash_id):
"""This is the only write mechanism that is actually employed in normal
usage.
"""
crash_document = {
'crash_id': crash_id,
'processed_crash': processed_crash,
'raw_crash': raw_crash
}
self.transaction(
self._submit_crash_to_elasticsearch,
crash_document=crash_document
)
#--------------------------------------------------------------------------
def _submit_crash_to_elasticsearch(self, connection, crash_document):
"""Submit a crash report to elasticsearch.
"""
# Massage the crash such that the date_processed field is formatted
# in the fashion of our established mapping.
# First create a datetime object from the string in the crash report.
crash_date = datetimeutil.string_to_datetime(
crash_document['processed_crash']['date_processed']
)
# Then convert it back to a string with the expected formatting.
crash_date_with_t = datetimeutil.date_to_string(crash_date)
# Finally, re-insert that string back into the report for indexing.
crash_document['processed_crash']['date_processed'] = crash_date_with_t
# Obtain the index name.
es_index = self.get_index_for_crash(crash_date)
es_doctype = self.config.elasticsearch.elasticsearch_doctype
crash_id = crash_document['crash_id']
# Attempt to create the index; it's OK if it already exists.
if es_index not in self.indices_cache:
index_creator = IndexCreator(config=self.config)
index_creator.create_socorro_index(es_index)
# Submit the crash for indexing.
try:
connection.index(
index=es_index,
doc_type=es_doctype,
body=crash_document,
id=crash_id
)
except elasticsearch.exceptions.ElasticsearchException as e:
self.config.logger.critical(
'Submission to Elasticsearch failed for %s (%s)',
crash_id,
e,
exc_info=True
)
raise
from socorro.lib.converters import change_default
from socorro.lib.datetimeutil import string_to_datetime
from socorro.external.crashstorage_base import Redactor
#==============================================================================
class ESCrashStorageNoStackwalkerOutput(ESCrashStorage):
required_config = Namespace()
required_config.namespace('es_redactor')
required_config.es_redactor.add_option(
name="redactor_class",
doc="the name of the class that implements a 'redact' method",
default='socorro.external.crashstorage_base.Redactor',
from_string_converter=class_converter,
)
required_config.es_redactor.forbidden_keys = change_default(
Redactor,
"forbidden_keys",
"json_dump, "
"upload_file_minidump_flash1.json_dump, "
"upload_file_minidump_flash2.json_dump, "
"upload_file_minidump_browser.json_dump"
)
#--------------------------------------------------------------------------
def __init__(self, config, quit_check_callback=None):
"""Init, you know.
"""
super(ESCrashStorageNoStackwalkerOutput, self).__init__(
config,
quit_check_callback
)
self.redactor = config.es_redactor.redactor_class(config.es_redactor)
self.config.logger.warning(
"beware, this crashstorage class is destructive to the "
"processed crash - if you're using a polycrashstore you may "
"find the modified processed crash saved to the other crashstores"
)
#--------------------------------------------------------------------------
@staticmethod
def reconstitute_datetimes(processed_crash):
datetime_fields = [
'submitted_timestamp',
'date_processed',
'client_crash_date',
'started_datetime',
'startedDateTime',
'completed_datetime',
'completeddatetime',
]
for a_key in datetime_fields:
try:
processed_crash[a_key] = string_to_datetime(
processed_crash[a_key]
)
except KeyError:
# not there? we don't care
pass
#--------------------------------------------------------------------------
def save_raw_and_processed(self, raw_crash, dumps, processed_crash,
crash_id):
"""This is the only write mechanism that is actually employed in normal
usage.
"""
self.reconstitute_datetimes(processed_crash)
self.redactor.redact(processed_crash)
super(ESCrashStorageNoStackwalkerOutput, self).save_raw_and_processed(
raw_crash,
dumps,
processed_crash,
crash_id
)
|
trivoldus28/pulsarch-verilog | refs/heads/master | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/statvfs.py | 23 | """Constants for interpreting the results of os.statvfs() and os.fstatvfs()."""
# Indices for statvfs struct members in the tuple returned by
# os.statvfs() and os.fstatvfs().
F_BSIZE = 0 # Preferred file system block size
F_FRSIZE = 1 # Fundamental file system block size
F_BLOCKS = 2 # Total number of file system blocks (FRSIZE)
F_BFREE = 3 # Total number of free blocks
F_BAVAIL = 4 # Free blocks available to non-superuser
F_FILES = 5 # Total number of file nodes
F_FFREE = 6 # Total number of free file nodes
F_FAVAIL = 7 # Free nodes available to non-superuser
F_FLAG = 8 # Flags (see your local statvfs man page)
F_NAMEMAX = 9 # Maximum file name length
|
odoomrp/odoomrp-wip | refs/heads/8.0 | mrp_subcontracting/models/purchase_order.py | 24 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
mrp_operation = fields.Many2one(
'mrp.production.workcenter.line', 'MPR Operation')
mrp_production = fields.Many2one(
'mrp.production', string='MRP Production', store=True,
related="mrp_operation.production_id")
@api.multi
def wkf_confirm_order(self):
self.ensure_one()
picking_obj = self.env['stock.picking']
result = super(PurchaseOrder, self).wkf_confirm_order()
picking = False
if self.mrp_operation:
for move in self.mrp_operation.production_id.move_lines:
if move.work_order.id == self.mrp_operation.id:
if not picking:
wc_line = self.mrp_operation.routing_wc_line
vals = {'origin': self.mrp_operation.name,
'picking_type_id': wc_line.picking_type_id.id,
'invoice_state': 'none',
'partner_id': self.partner_id.id,
'mrp_production':
self.mrp_operation.production_id.id}
picking = picking_obj.create(vals)
self.mrp_operation.out_picking = picking.id
move.picking_id = picking.id
return result
@api.one
def action_picking_create(self):
picking_obj = self.env['stock.picking']
result = super(PurchaseOrder, self).action_picking_create()
if self.mrp_operation:
cond = [('origin', '=', self.name)]
picking = picking_obj.search(cond, limit=1)
self.mrp_operation.in_picking = picking.id
picking.mrp_production = self.mrp_operation.production_id.id
return result
|
rabipanda/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py | 72 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import PowerTransform
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class PowerTransformBijectorTest(test.TestCase):
"""Tests correctness of the power transformation."""
def testBijector(self):
with self.test_session():
c = 0.2
bijector = PowerTransform(
power=c, event_ndims=1, validate_args=True)
self.assertEqual("power_transform", bijector.name)
x = np.array([[[-1.], [2.], [-5. + 1e-4]]])
y = (1. + x * c)**(1. / c)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
(c - 1.) * np.sum(np.log(y), axis=-1),
bijector.inverse_log_det_jacobian(y).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y).eval(),
bijector.forward_log_det_jacobian(x).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.test_session():
bijector = PowerTransform(
power=0.2, validate_args=True)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.test_session():
bijector = PowerTransform(
power=0.2, event_ndims=0, validate_args=True)
x = np.linspace(-4.999, 10, num=10).astype(np.float32)
y = np.logspace(0.001, 10, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
mikanbako/ri_ar | refs/heads/master | ri_advertiser/ri_advertiser/__init__.py | 12133432 | |
hatwar/buyback-frappe | refs/heads/master | frappe/integrations/doctype/social_login_keys/__init__.py | 12133432 | |
jbiason/chesttimer | refs/heads/master | api/chesttimer/api/__init__.py | 12133432 | |
mathspace/django | refs/heads/master | tests/migrations/test_auto_now_add/__init__.py | 12133432 | |
fmyzjs/horizon-hacker | refs/heads/master | openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/__init__.py | 12133432 | |
MFoster/breeze | refs/heads/master | tests/regressiontests/many_to_one_regress/__init__.py | 12133432 | |
kailIII/emaresa | refs/heads/7.0 | aeroo/report_aeroo/wizard/add_print_button.py | 6 | ##############################################################################
#
# Copyright (c) 2008-2012 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <info@alistek.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This module is GPLv3 or newer and incompatible
# with OpenERP SA "AGPL + Private Use License"!
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import pooler
from tools.translate import _
from osv import osv
from osv import fields
special_reports = [
'printscreen.list'
]
def _reopen(self, res_id, model):
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
}
class aeroo_add_print_button(osv.osv_memory):
'''
Add Print Button
'''
_name = 'aeroo.add_print_button'
_description = 'Add print button'
def _check(self, cr, uid, context):
ir_values_obj = self.pool.get('ir.values')
report = self.pool.get(context['active_model']).browse(cr, uid, context['active_id'], context=context)
if report.report_name in special_reports:
return 'exception'
if report.report_wizard:
act_win_obj = self.pool.get('ir.actions.act_window')
act_win_ids = act_win_obj.search(cr, uid, [('res_model','=','aeroo.print_actions')], context=context)
for act_win in act_win_obj.browse(cr, uid, act_win_ids, context=context):
act_win_context = eval(act_win.context, {})
if act_win_context.get('report_action_id')==report.id:
return 'exist'
return 'add'
else:
ids = ir_values_obj.search(cr, uid, [('value','=',report.type+','+str(report.id))])
if not ids:
return 'add'
else:
return 'exist'
def do_action(self, cr, uid, ids, context):
this = self.browse(cr, uid, ids[0], context=context)
report = self.pool.get(context['active_model']).browse(cr, uid, context['active_id'], context=context)
event_id = self.pool.get('ir.values').set_action(cr, uid, report.report_name, 'client_print_multi', report.model, 'ir.actions.report.xml,%d' % context['active_id'])
if report.report_wizard:
report._set_report_wizard(report.id)
this.write({'state':'done'}, context=context)
if not this.open_action:
return _reopen(self, this.id, this._model)
mod_obj = pooler.get_pool(cr.dbname).get('ir.model.data')
act_obj = pooler.get_pool(cr.dbname).get('ir.actions.act_window')
mod_id = mod_obj.search(cr, uid, [('name', '=', 'act_values_form_action')])[0]
res_id = mod_obj.read(cr, uid, mod_id, ['res_id'])['res_id']
act_win = act_obj.read(cr, uid, res_id, [])
act_win['domain'] = [('id','=',event_id)]
act_win['name'] = _('Client Events')
return act_win
_columns = {
'open_action':fields.boolean('Open added action'),
'state':fields.selection([
('add','Add'),
('exist','Exist'),
('exception','Exception'),
('done','Done'),
],'State', select=True, readonly=True),
}
_defaults = {
'state': _check,
}
aeroo_add_print_button()
|
redhat-openstack/neutron | refs/heads/f22-patches | neutron/tests/unit/test_api_v2_extension.py | 14 | # Copyright 2014 Intel Corporation.
# Copyright 2014 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo.config import cfg
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron import quota
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
class ExtensionTestCase(testlib_api.WebTestCase,
testlib_plugin.PluginSetupHelper):
def _resotre_attr_map(self):
attributes.RESOURCE_ATTRIBUTE_MAP = self._saved_attr_map
def _setUpExtension(self, plugin, service_type,
resource_attribute_map, extension_class,
resource_prefix, plural_mappings=None,
translate_resource_name=False,
allow_pagination=False, allow_sorting=False,
supported_extension_aliases=None,
use_quota=False,
):
self._resource_prefix = resource_prefix
self._plural_mappings = plural_mappings or {}
self._translate_resource_name = translate_resource_name
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self._saved_attr_map = attributes.RESOURCE_ATTRIBUTE_MAP.copy()
# Restore the global RESOURCE_ATTRIBUTE_MAP
self.addCleanup(self._resotre_attr_map)
# Create the default configurations
self.config_parse()
#just stubbing core plugin with plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('core_plugin', plugin)
if service_type:
cfg.CONF.set_override('service_plugins', [plugin])
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
if service_type:
instance.get_plugin_type.return_value = service_type
if supported_extension_aliases is not None:
instance.supported_extension_aliases = supported_extension_aliases
if allow_pagination:
cfg.CONF.set_override('allow_pagination', True)
# instance.__native_pagination_support = True
native_pagination_attr_name = ("_%s__native_pagination_support"
% instance.__class__.__name__)
setattr(instance, native_pagination_attr_name, True)
if allow_sorting:
cfg.CONF.set_override('allow_sorting', True)
# instance.__native_sorting_support = True
native_sorting_attr_name = ("_%s__native_sorting_support"
% instance.__class__.__name__)
setattr(instance, native_sorting_attr_name, True)
if use_quota:
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
class ExtensionTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
resource_attribute_map)
return extension_class.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
ext_mgr = ExtensionTestExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
def _test_entity_delete(self, entity):
"""Does the entity deletion based on naming convention."""
entity_id = str(uuid.uuid4())
path = self._resource_prefix + '/' if self._resource_prefix else ''
path += self._plural_mappings.get(entity, entity + 's')
if self._translate_resource_name:
path = path.replace('_', '-')
res = self.api.delete(
test_api_v2._get_path(path, id=entity_id, fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value, "delete_" + entity)
delete_entity.assert_called_with(mock.ANY, entity_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
|
andymckay/zamboni | refs/heads/master | apps/amo/tests/test_models.py | 1 | from mock import Mock
from nose.tools import eq_
import amo.models
from amo.models import manual_order
from amo.tests import TestCase
from amo import models as context
from mkt.webapps.models import Addon
class ManualOrderTest(TestCase):
fixtures = ('base/apps', 'base/addon_3615', 'base/addon_5299_gcal',
'base/addon_40')
def test_ordering(self):
"""Given a specific set of primary keys, assure that we return addons
in that order."""
semi_arbitrary_order = [40, 5299, 3615]
addons = manual_order(Addon.objects.all(), semi_arbitrary_order)
eq_(semi_arbitrary_order, [addon.id for addon in addons])
def test_skip_cache():
eq_(getattr(context._locals, 'skip_cache', False), False)
with context.skip_cache():
eq_(context._locals.skip_cache, True)
with context.skip_cache():
eq_(context._locals.skip_cache, True)
eq_(context._locals.skip_cache, True)
eq_(context._locals.skip_cache, False)
def test_use_master():
local = context.multidb.pinning._locals
eq_(getattr(local, 'pinned', False), False)
with context.use_master():
eq_(local.pinned, True)
with context.use_master():
eq_(local.pinned, True)
eq_(local.pinned, True)
eq_(local.pinned, False)
class TestModelBase(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.saved_cb = amo.models._on_change_callbacks.copy()
amo.models._on_change_callbacks.clear()
self.cb = Mock()
self.cb.__name__ = 'testing_mock_callback'
Addon.on_change(self.cb)
def tearDown(self):
amo.models._on_change_callbacks = self.saved_cb
def test_multiple_ignored(self):
cb = Mock()
cb.__name__ = 'something'
old = len(amo.models._on_change_callbacks[Addon])
Addon.on_change(cb)
eq_(len(amo.models._on_change_callbacks[Addon]), old + 1)
Addon.on_change(cb)
eq_(len(amo.models._on_change_callbacks[Addon]), old + 1)
def test_change_called_on_new_instance_save(self):
for create_addon in (Addon, Addon.objects.create):
addon = create_addon(public_stats=False, type=amo.ADDON_EXTENSION)
addon.public_stats = True
addon.save()
assert self.cb.called
kw = self.cb.call_args[1]
eq_(kw['old_attr']['public_stats'], False)
eq_(kw['new_attr']['public_stats'], True)
eq_(kw['instance'].id, addon.id)
eq_(kw['sender'], Addon)
def test_change_called_on_update(self):
addon = Addon.objects.get(pk=3615)
addon.update(public_stats=False)
assert self.cb.called
kw = self.cb.call_args[1]
eq_(kw['old_attr']['public_stats'], True)
eq_(kw['new_attr']['public_stats'], False)
eq_(kw['instance'].id, addon.id)
eq_(kw['sender'], Addon)
def test_change_called_on_save(self):
addon = Addon.objects.get(pk=3615)
addon.public_stats = False
addon.save()
assert self.cb.called
kw = self.cb.call_args[1]
eq_(kw['old_attr']['public_stats'], True)
eq_(kw['new_attr']['public_stats'], False)
eq_(kw['instance'].id, addon.id)
eq_(kw['sender'], Addon)
def test_change_is_not_recursive(self):
class fn:
called = False
def callback(old_attr=None, new_attr=None, instance=None,
sender=None, **kw):
fn.called = True
# Both save and update should be protected:
instance.update(public_stats=False)
instance.save()
Addon.on_change(callback)
addon = Addon.objects.get(pk=3615)
addon.save()
assert fn.called
# No exception = pass
def test_safer_get_or_create(self):
data = {'guid': '123', 'type': amo.ADDON_EXTENSION}
a, c = Addon.objects.safer_get_or_create(**data)
assert c
b, c = Addon.objects.safer_get_or_create(**data)
assert not c
eq_(a, b)
def test_cache_key():
# Test that we are not taking the db into account when building our
# cache keys for django-cache-machine. See bug 928881.
eq_(Addon._cache_key(1, 'default'), Addon._cache_key(1, 'slave'))
|
asedunov/intellij-community | refs/heads/master | python/helpers/py3only/docutils/languages/ja.py | 52 | # -*- coding: utf-8 -*-
# $Id: ja.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Hisashi Morita <hisashim@kt.rim.or.jp>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Japanese-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': '著者',
'authors': '著者',
'organization': '組織',
'address': '住所',
'contact': '連絡先',
'version': 'バージョン',
'revision': 'リビジョン',
'status': 'ステータス',
'date': '日付',
'copyright': '著作権',
'dedication': '献辞',
'abstract': '概要',
'attention': '注目!',
'caution': '注意!',
'danger': '!危険!',
'error': 'エラー',
'hint': 'ヒント',
'important': '重要',
'note': '備考',
'tip': '通報',
'warning': '警告',
'contents': '目次'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'著者': 'author',
' n/a': 'authors',
'組織': 'organization',
'住所': 'address',
'連絡先': 'contact',
'バージョン': 'version',
'リビジョン': 'revision',
'ステータス': 'status',
'日付': 'date',
'著作権': 'copyright',
'献辞': 'dedication',
'概要': 'abstract'}
"""Japanese (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
holmes-app/holmes-api | refs/heads/master | tests/unit/validators/test_meta_tags.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from mock import Mock
from preggy import expect
from holmes.config import Config
from holmes.reviewer import Reviewer
from holmes.validators.meta_tags import MetaTagsValidator
from tests.unit.base import ValidatorTestCase
from tests.fixtures import PageFactory
class TestMetaTagsValidator(ValidatorTestCase):
def test_can_validate_pages_with_metatags(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
validator = MetaTagsValidator(reviewer)
validator.add_violation = Mock()
validator.review.data['meta.tags'] = [
{'content': 'utf-8', 'property': None, 'key': 'charset'},
{'content': 'text/html;charset=UTF-8', 'property': 'http-equiv', 'key': 'Content-Type'},
]
validator.validate()
expect(validator.add_violation.called).to_be_false()
def test_can_validate_page_without_meta_tags(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[]
)
validator = MetaTagsValidator(reviewer)
validator.add_violation = Mock()
validator.review.data['meta.tags'] = []
validator.validate()
validator.add_violation.assert_called_once_with(
key='absent.metatags',
value='No metatags.',
points=100
)
def test_can_validate_page_with_metatag_description_too_long(self):
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=Config(),
validators=[],
cache=self.sync_cache
)
reviewer.violation_definitions = {
'page.metatags.description_too_big': {'default_value': 300},
}
validator = MetaTagsValidator(reviewer)
validator.add_violation = Mock()
validator.review.data['meta.tags'] = [
{'content': 'X' * 301, 'property': 'name', 'key': 'description'},
]
validator.validate()
validator.add_violation.assert_called_once_with(
key='page.metatags.description_too_big',
value={'max_size': 300},
points=20
)
validator.add_violation = Mock()
validator.review.data['meta.tags'] = [
{'content': 'X' * 300, 'property': 'name', 'key': 'description'},
]
validator.validate()
expect(validator.add_violation.called).to_be_false()
def test_can_get_violation_definitions(self):
reviewer = Mock()
validator = MetaTagsValidator(reviewer)
definitions = validator.get_violation_definitions()
expect(definitions).to_length(2)
expect('absent.metatags' in definitions).to_be_true()
expect('page.metatags.description_too_big' in definitions).to_be_true()
def test_can_get_default_violations_values(self):
config = Config()
config.METATAG_DESCRIPTION_MAX_SIZE = 300
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=config,
validators=[]
)
validator = MetaTagsValidator(reviewer)
violations_values = validator.get_default_violations_values(config)
expect(violations_values).to_include('page.metatags.description_too_big')
expect(violations_values['page.metatags.description_too_big']).to_length(2)
expect(violations_values['page.metatags.description_too_big']).to_be_like({
'value': config.METATAG_DESCRIPTION_MAX_SIZE,
'description': config.get_description('METATAG_DESCRIPTION_MAX_SIZE')
})
|
neilLasrado/erpnext | refs/heads/develop | erpnext/projects/doctype/project_user/__init__.py | 12133432 | |
pquentin/django | refs/heads/stable/1.8.x | django/db/backends/__init__.py | 12133432 | |
oceanobservatories/mi-instrument | refs/heads/master | mi/instrument/teledyne/workhorse/__init__.py | 12133432 | |
yavuzovski/playground | refs/heads/master | python/django/RESTTest/.venv/lib/python3.4/site-packages/django/forms/renderers.py | 45 | import os
from django.conf import settings
from django.template.backends.django import DjangoTemplates
from django.template.loader import get_template
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
try:
from django.template.backends.jinja2 import Jinja2
except ImportError:
def Jinja2(params):
raise ImportError("jinja2 isn't installed")
ROOT = upath(os.path.dirname(__file__))
@lru_cache.lru_cache()
def get_default_renderer():
renderer_class = import_string(settings.FORM_RENDERER)
return renderer_class()
class BaseRenderer(object):
def get_template(self, template_name):
raise NotImplementedError('subclasses must implement get_template()')
def render(self, template_name, context, request=None):
template = self.get_template(template_name)
return template.render(context, request=request).strip()
class EngineMixin(object):
def get_template(self, template_name):
return self.engine.get_template(template_name)
@cached_property
def engine(self):
return self.backend({
'APP_DIRS': True,
'DIRS': [os.path.join(ROOT, self.backend.app_dirname)],
'NAME': 'djangoforms',
'OPTIONS': {},
})
class DjangoTemplates(EngineMixin, BaseRenderer):
"""
Load Django templates from the built-in widget templates in
django/forms/templates and from apps' 'templates' directory.
"""
backend = DjangoTemplates
class Jinja2(EngineMixin, BaseRenderer):
"""
Load Jinja2 templates from the built-in widget templates in
django/forms/jinja2 and from apps' 'jinja2' directory.
"""
backend = Jinja2
class TemplatesSetting(BaseRenderer):
"""
Load templates using template.loader.get_template() which is configured
based on settings.TEMPLATES.
"""
def get_template(self, template_name):
return get_template(template_name)
|
goibibo/django-mongo-sessions | refs/heads/master | setup.py | 2 | from setuptools import setup
classifiers = '''\
Framework :: Django
Environment :: Web Environment
Intended Audience :: Developers
Topic :: Internet :: WWW/HTTP
License :: OSI Approved :: Apache Software License
Development Status :: 5 - Production/Stable
Natural Language :: English
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
'''
description = 'mongodb as Django sessions backend'
packages = ['mongo_sessions']
# no codecs\with for python 2.5
def long_description():
f = open('README.rst')
rst = f.read()
f.close()
return rst
setup(
name='django-mongo-sessions',
version='0.0.4',
packages=packages,
description=description,
long_description=long_description(),
author='hellysmile',
author_email='hellysmile@gmail.com',
url='https://github.com/hellysmile/django-mongo-sessions',
zip_safe=False,
install_requires=[
'django >= 1.4',
'pymongo >= 2.4.2'
],
license='http://www.apache.org/licenses/LICENSE-2.0',
classifiers=filter(None, classifiers.split('\n')),
keywords=[
"django", "mongo", "sessions"
]
)
|
nrjcoin-project/p2pool | refs/heads/master | p2pool/util/jsonrpc.py | 261 | from __future__ import division
import json
import weakref
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import failure, log
from twisted.web import client, error
from p2pool.util import deferral, deferred_resource, memoize
class Error(Exception):
def __init__(self, code, message, data=None):
if type(self) is Error:
raise TypeError("can't directly instantiate Error class; use Error_for_code")
if not isinstance(code, int):
raise TypeError('code must be an int')
#if not isinstance(message, unicode):
# raise TypeError('message must be a unicode')
self.code, self.message, self.data = code, message, data
def __str__(self):
return '%i %s' % (self.code, self.message) + (' %r' % (self.data, ) if self.data is not None else '')
def _to_obj(self):
return {
'code': self.code,
'message': self.message,
'data': self.data,
}
@memoize.memoize_with_backing(weakref.WeakValueDictionary())
def Error_for_code(code):
class NarrowError(Error):
def __init__(self, *args, **kwargs):
Error.__init__(self, code, *args, **kwargs)
return NarrowError
class Proxy(object):
def __init__(self, func, services=[]):
self._func = func
self._services = services
def __getattr__(self, attr):
if attr.startswith('rpc_'):
return lambda *params: self._func('.'.join(self._services + [attr[len('rpc_'):]]), params)
elif attr.startswith('svc_'):
return Proxy(self._func, self._services + [attr[len('svc_'):]])
else:
raise AttributeError('%r object has no attribute %r' % (self.__class__.__name__, attr))
@defer.inlineCallbacks
def _handle(data, provider, preargs=(), response_handler=None):
id_ = None
try:
try:
try:
req = json.loads(data)
except Exception:
raise Error_for_code(-32700)(u'Parse error')
if 'result' in req or 'error' in req:
response_handler(req['id'], req['result'] if 'error' not in req or req['error'] is None else
failure.Failure(Error_for_code(req['error']['code'])(req['error']['message'], req['error'].get('data', None))))
defer.returnValue(None)
id_ = req.get('id', None)
method = req.get('method', None)
if not isinstance(method, basestring):
raise Error_for_code(-32600)(u'Invalid Request')
params = req.get('params', [])
if not isinstance(params, list):
raise Error_for_code(-32600)(u'Invalid Request')
for service_name in method.split('.')[:-1]:
provider = getattr(provider, 'svc_' + service_name, None)
if provider is None:
raise Error_for_code(-32601)(u'Service not found')
method_meth = getattr(provider, 'rpc_' + method.split('.')[-1], None)
if method_meth is None:
raise Error_for_code(-32601)(u'Method not found')
result = yield method_meth(*list(preargs) + list(params))
error = None
except Error:
raise
except Exception:
log.err(None, 'Squelched JSON error:')
raise Error_for_code(-32099)(u'Unknown error')
except Error, e:
result = None
error = e._to_obj()
defer.returnValue(json.dumps(dict(
jsonrpc='2.0',
id=id_,
result=result,
error=error,
)))
# HTTP
@defer.inlineCallbacks
def _http_do(url, headers, timeout, method, params):
id_ = 0
try:
data = yield client.getPage(
url=url,
method='POST',
headers=dict(headers, **{'Content-Type': 'application/json'}),
postdata=json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': id_,
}),
timeout=timeout,
)
except error.Error, e:
try:
resp = json.loads(e.response)
except:
raise e
else:
resp = json.loads(data)
if resp['id'] != id_:
raise ValueError('invalid id')
if 'error' in resp and resp['error'] is not None:
raise Error_for_code(resp['error']['code'])(resp['error']['message'], resp['error'].get('data', None))
defer.returnValue(resp['result'])
HTTPProxy = lambda url, headers={}, timeout=5: Proxy(lambda method, params: _http_do(url, headers, timeout, method, params))
class HTTPServer(deferred_resource.DeferredResource):
def __init__(self, provider):
deferred_resource.DeferredResource.__init__(self)
self._provider = provider
@defer.inlineCallbacks
def render_POST(self, request):
data = yield _handle(request.content.read(), self._provider, preargs=[request])
assert data is not None
request.setHeader('Content-Type', 'application/json')
request.setHeader('Content-Length', len(data))
request.write(data)
class LineBasedPeer(basic.LineOnlyReceiver):
delimiter = '\n'
def __init__(self):
#basic.LineOnlyReceiver.__init__(self)
self._matcher = deferral.GenericDeferrer(max_id=2**30, func=lambda id, method, params: self.sendLine(json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': id,
})))
self.other = Proxy(self._matcher)
def lineReceived(self, line):
_handle(line, self, response_handler=self._matcher.got_response).addCallback(lambda line2: self.sendLine(line2) if line2 is not None else None)
|
v-iam/azure-sdk-for-python | refs/heads/master | azure-graphrbac/azure/graphrbac/models/service_principal_paged.py | 3 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ServicePrincipalPaged(Paged):
"""
A paging container for iterating over a list of ServicePrincipal object
"""
_attribute_map = {
'next_link': {'key': 'odata\\.nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ServicePrincipal]'}
}
def __init__(self, *args, **kwargs):
super(ServicePrincipalPaged, self).__init__(*args, **kwargs)
|
jraedler/DyMat | refs/heads/master | DyMat/Export/MATLAB.py | 2 | # Copyright (c) 2011, Joerg Raedler (Berlin, Germany)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import scipy.io
def export(dm, varList, fileName=None, formatOptions={}):
"""Export DyMat data to a simple MATLAB file"""
if not fileName:
fileName = dm.fileName+'.mat'
vList = dm.sortByBlocks(varList)
md = {}
for block in vList:
for n in vList[block]:
md[n] = dm.data(n)
absc = '%s_%02i' % (dm._absc[0], block)
md[str(absc)] = dm.abscissa(block, True)
scipy.io.savemat(fileName, md, oned_as='row')
|
eckucukoglu/arm-linux-gnueabihf | refs/heads/master | lib/python2.7/test/test_anydbm.py | 94 | #! /usr/bin/env python
"""Test script for the anydbm module
based on testdumbdbm.py
"""
import os
import unittest
import glob
from test import test_support
_fname = test_support.TESTFN
# Silence Py3k warning
anydbm = test_support.import_module('anydbm', deprecated=True)
def _delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(_fname + "*"):
try:
os.unlink(f)
except OSError:
pass
class AnyDBMTestCase(unittest.TestCase):
_dict = {'0': '',
'a': 'Python:',
'b': 'Programming',
'c': 'the',
'd': 'way',
'f': 'Guido',
'g': 'intended'
}
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_anydbm_creation(self):
f = anydbm.open(_fname, 'c')
self.assertEqual(f.keys(), [])
for key in self._dict:
f[key] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_modification(self):
self.init_db()
f = anydbm.open(_fname, 'c')
self._dict['g'] = f['g'] = "indented"
self.read_helper(f)
f.close()
def test_anydbm_read(self):
self.init_db()
f = anydbm.open(_fname, 'r')
self.read_helper(f)
f.close()
def test_anydbm_keys(self):
self.init_db()
f = anydbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key])
def init_db(self):
f = anydbm.open(_fname, 'n')
for k in self._dict:
f[k] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = f.keys()
keys.sort()
dkeys = self._dict.keys()
dkeys.sort()
self.assertEqual(keys, dkeys)
return keys
def tearDown(self):
_delete_files()
def setUp(self):
_delete_files()
def test_main():
try:
test_support.run_unittest(AnyDBMTestCase)
finally:
_delete_files()
if __name__ == "__main__":
test_main()
|
joshua-cogliati-inl/moose | refs/heads/devel | framework/contrib/nsiqcppstyle/nsiqunittest/__init__.py | 12133432 | |
mechatroner/rainbow_csv | refs/heads/master | rbql_core/test/__init__.py | 12133432 | |
darjeeling/django | refs/heads/master | tests/sites_tests/__init__.py | 12133432 | |
kosz85/django | refs/heads/master | tests/migrations/test_migrations_conflict/__init__.py | 12133432 | |
MountainWei/nova | refs/heads/master | nova/api/openstack/compute/images.py | 33 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import common
from nova.api.openstack.compute.views import images as views_images
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
import nova.image
import nova.utils
ALIAS = 'images'
SUPPORTED_FILTERS = {
'name': 'name',
'status': 'status',
'changes-since': 'changes-since',
'server': 'property-instance_uuid',
'type': 'property-image_type',
'minRam': 'min_ram',
'minDisk': 'min_disk',
}
class ImagesController(wsgi.Controller):
"""Base controller for retrieving/displaying images."""
_view_builder_class = views_images.ViewBuilder
def __init__(self, **kwargs):
super(ImagesController, self).__init__(**kwargs)
self._image_api = nova.image.API()
def _get_filters(self, req):
"""Return a dictionary of query param filters from the request.
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS or param.startswith('property-'):
# map filter name or carry through if property-*
filter_name = SUPPORTED_FILTERS.get(param, param)
filters[filter_name] = req.params.get(param)
# ensure server filter is the instance uuid
filter_name = 'property-instance_uuid'
try:
filters[filter_name] = filters[filter_name].rsplit('/', 1)[1]
except (AttributeError, IndexError, KeyError):
pass
filter_name = 'status'
if filter_name in filters:
# The Image API expects us to use lowercase strings for status
filters[filter_name] = filters[filter_name].lower()
return filters
@extensions.expected_errors(404)
def show(self, req, id):
"""Return detailed information about a specific image.
:param req: `wsgi.Request` object
:param id: Image identifier
"""
context = req.environ['nova.context']
try:
image = self._image_api.get(context, id)
except (exception.ImageNotFound, exception.InvalidImageRef):
explanation = _("Image not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
req.cache_db_items('images', [image], 'id')
return self._view_builder.show(req, image)
@extensions.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, id):
"""Delete an image, if allowed.
:param req: `wsgi.Request` object
:param id: Image identifier (integer)
"""
context = req.environ['nova.context']
try:
self._image_api.delete(context, id)
except exception.ImageNotFound:
explanation = _("Image not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
except exception.ImageNotAuthorized:
# The image service raises this exception on delete if glanceclient
# raises HTTPForbidden.
explanation = _("You are not allowed to delete the image.")
raise webob.exc.HTTPForbidden(explanation=explanation)
@extensions.expected_errors(400)
def index(self, req):
"""Return an index listing of images available to the request.
:param req: `wsgi.Request` object
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
page_params = common.get_pagination_params(req)
try:
images = self._image_api.get_all(context, filters=filters,
**page_params)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return self._view_builder.index(req, images)
@extensions.expected_errors(400)
def detail(self, req):
"""Return a detailed index listing of images available to the request.
:param req: `wsgi.Request` object.
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
page_params = common.get_pagination_params(req)
try:
images = self._image_api.get_all(context, filters=filters,
**page_params)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
req.cache_db_items('images', images, 'id')
return self._view_builder.detail(req, images)
class Images(extensions.V21APIExtensionBase):
"""Proxying API for Images."""
name = "Images"
alias = ALIAS
version = 1
def get_resources(self):
coll_actions = {'detail': 'GET'}
resource = extensions.ResourceExtension(ALIAS,
ImagesController(),
collection_actions=coll_actions)
return [resource]
def get_controller_extensions(self):
return []
|
TinajaLabs/tinajagate | refs/heads/master | downloads/python-suds-0.4/suds/sax/parser.py | 181 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
from logging import getLogger
import suds.metrics
from suds import *
from suds.sax import *
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from suds.sax.attribute import Attribute
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
from cStringIO import StringIO
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name), parent=top)
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
currentqname = current.qname()
if name == currentqname:
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(StringIO(string))
sax.parse(source)
timer.stop()
metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0] |
mcmenaminadrian/vmufat | refs/heads/tmpsquash | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
ageron/tensorflow | refs/heads/master | tensorflow/contrib/timeseries/python/timeseries/state_space_models/test_utils.py | 19 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing state space models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
def transition_power_test_template(test_case, model, num_steps):
"""Tests the transition_to_powers function of a state space model."""
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
previous_matrix = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=transition_matrix.dtype)
true_single_step_update = math_ops.matmul(previous_matrix,
transition_matrix)
model_output_tensor = model.transition_to_powers(powers=array_ops.stack(
[step_number, step_number]))
with test_case.test_session():
starting_matrix = linalg_ops.eye(
state_dimension, batch_shape=array_ops.shape(num_steps)).eval()
evaled_current_matrix = starting_matrix
for iteration_number in range(num_steps):
model_output = model_output_tensor.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
evaled_current_matrix,
model_output[0],
rtol=1e-8 if evaled_current_matrix.dtype == numpy.float64 else 1e-4)
evaled_current_matrix = true_single_step_update.eval(
feed_dict={previous_matrix: evaled_current_matrix})
def noise_accumulator_test_template(test_case, model, num_steps):
"""Tests `model`'s transition_power_noise_accumulator."""
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
noise_transform = ops.convert_to_tensor(
model.get_noise_transform(), dtype=model.dtype)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
state_noise_dimension = tensor_shape.dimension_value(noise_transform.shape[1])
gen_noise_addition = math_utils.sign_magnitude_positive_definite(
raw=random_ops.random_normal(
shape=[state_noise_dimension, state_noise_dimension],
dtype=model.dtype))
gen_starting_noise = math_utils.sign_magnitude_positive_definite(
random_ops.random_normal(
shape=[state_dimension, state_dimension], dtype=model.dtype))
starting_noise = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
starting_transitioned = math_ops.matmul(
math_ops.matmul(transition_matrix, starting_noise),
transition_matrix,
adjoint_b=True)
with test_case.test_session():
evaled_starting_noise = gen_starting_noise.eval()
current_starting_noise_transitioned = evaled_starting_noise
current_noise = evaled_starting_noise
evaled_noise_addition = gen_noise_addition.eval()
evaled_noise_addition_transformed = math_ops.matmul(
math_ops.matmul(noise_transform, evaled_noise_addition),
noise_transform,
adjoint_b=True).eval()
model.state_transition_noise_covariance = evaled_noise_addition
model._window_initializer( # pylint: disable=protected-access
times=math_ops.range(num_steps + 1)[..., None], state=(None, None, 0))
model_update = model.transition_power_noise_accumulator(
num_steps=step_number)
for iteration_number in range(num_steps):
model_new_noise = model_update.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
current_noise,
model_new_noise + current_starting_noise_transitioned,
rtol=1e-8 if current_noise.dtype == numpy.float64 else 1e-3)
current_starting_noise_transitioned = starting_transitioned.eval(
feed_dict={starting_noise: current_starting_noise_transitioned})
current_noise = (
starting_transitioned.eval(
feed_dict={starting_noise: current_noise})
+ evaled_noise_addition_transformed)
|
zacps/zulip | refs/heads/master | contrib_bots/run.py | 3 | #!/usr/bin/env python
from __future__ import print_function
import importlib
import logging
import optparse
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '../api/zulip')):
sys.path.append('../api')
from zulip import Client
class RestrictedClient(object):
def __init__(self, client):
# Only expose a subset of our Client's functionality
user_profile = client.get_profile()
self.send_message = client.send_message
try:
self.full_name = user_profile['full_name']
self.email = user_profile['email']
except KeyError:
logging.error('Cannot fetch user profile, make sure you have set'
' up the zuliprc file correctly.')
sys.exit(1)
def get_lib_module(lib_fn):
lib_fn = os.path.abspath(lib_fn)
if not os.path.dirname(lib_fn).startswith(os.path.join(our_dir, 'lib')):
print('Sorry, we will only import code from contrib_bots/lib.')
sys.exit(1)
if not lib_fn.endswith('.py'):
print('Please use a .py extension for library files.')
sys.exit(1)
sys.path.append('lib')
base_lib_fn = os.path.basename(os.path.splitext(lib_fn)[0])
module_name = 'lib.' + base_lib_fn
module = importlib.import_module(module_name)
return module
def run_message_handler_for_bot(lib_module, quiet, config_file):
# Make sure you set up your ~/.zuliprc
client = Client(config_file=config_file)
restricted_client = RestrictedClient(client)
message_handler = lib_module.handler_class()
class StateHandler(object):
def __init__(self):
self.state = None
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
state_handler = StateHandler()
if not quiet:
print(message_handler.usage())
def handle_message(message):
logging.info('waiting for next message')
if message_handler.triage_message(message=message,
client=restricted_client):
message_handler.handle_message(
message=message,
client=restricted_client,
state_handler=state_handler
)
logging.info('starting message handling...')
client.call_on_each_message(handle_message)
def run():
usage = '''
./run.py <lib file>
Example: ./run.py lib/followup.py
(This program loads bot-related code from the
library code and then runs a message loop,
feeding messages to the library code to handle.)
Please make sure you have a current ~/.zuliprc
file with the credentials you want to use for
this bot.
See lib/readme.md for more context.
'''
parser = optparse.OptionParser(usage=usage)
parser.add_option('--quiet', '-q',
action='store_true',
help='Turn off logging output.')
parser.add_option('--config-file',
action='store',
help='(alternate config file to ~/.zuliprc)')
(options, args) = parser.parse_args()
if len(args) == 0:
print('You must specify a library!')
sys.exit(1)
lib_module = get_lib_module(lib_fn=args[0])
if not options.quiet:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
run_message_handler_for_bot(
lib_module=lib_module,
config_file=options.config_file,
quiet=options.quiet
)
if __name__ == '__main__':
run()
|
SB-BISS/RLACOSarsaLambda | refs/heads/master | model/__init__.py | 12133432 | |
zouyapeng/horizon_change | refs/heads/juno | horizon/management/commands/__init__.py | 12133432 | |
flisoltacna/flisoltacna2014 | refs/heads/master | flisol/apps/home/__init__.py | 12133432 | |
pwong-mapr/private-hue | refs/heads/HUE-1096-abe | apps/jobsub/src/jobsub/migrations/__init__.py | 12133432 | |
Uberlearner/uberlearner | refs/heads/master | uberlearner/main/management/commands/__init__.py | 12133432 | |
samanehsan/osf.io | refs/heads/develop | website/search/__init__.py | 12133432 | |
Critical-Impact/ffrpg-gen | refs/heads/master | django/settings/__init__.py | 12133432 | |
bikong2/django | refs/heads/master | tests/get_object_or_404/__init__.py | 12133432 | |
michelts/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/contrib/gis/tests/geoapp/__init__.py | 12133432 | |
sugryo/wget | refs/heads/master | testenv/exc/__init__.py | 12133432 | |
developerinlondon/ansible-modules-extras | refs/heads/devel | messaging/__init__.py | 12133432 | |
Nettacker/Nettacker | refs/heads/master | lib/scan/drupal_version/engine.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Pradeep Jairamani , github.com/pradeepjairamani
import socket
import socks
import time
import json
import threading
import string
import random
import sys
import re
import os
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
import requests
def extra_requirements_dict():
return {
"drupal_version_ports": [80, 443]
}
def conn(targ, port, timeout_sec, socks_proxy):
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit(':')[0]),
int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sys.stdout.flush()
s.settimeout(timeout_sec)
s.connect((targ, port))
return s
except Exception:
return None
def drupal_version(target, port, timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd):
try:
s = conn(target, port, timeout_sec, socks_proxy)
if not s:
return False
else:
if target_type(target) != "HTTP" and port == 443:
target = 'https://' + target
if target_type(target) != "HTTP" and port == 80:
target = 'http://' + target
req = requests.get(target+'/CHANGELOG.txt')
try:
regex = 'Drupal (\d+\.\d+),'
pattern = re.compile(regex)
version = re.findall(pattern, req.text)
if version:
return version[0]
else:
return False
except Exception:
return False
except Exception:
# some error warning
return False
def __drupal_version(target, port, timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd):
version = drupal_version(target, port, timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd)
if version:
info(messages(language, "found").format(
target, "drupal Version", version))
__log_into_file(thread_tmp_filename, 'w', '0', language)
data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': port, 'TYPE':'drupal_version_scan',
'DESCRIPTION': messages(language, "found").format(target, "drupal Version", version), 'TIME': now(),
'CATEGORY': "vuln",
'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
return True
else:
return False
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(target) != 'HTTP':
# requirements check
new_extra_requirements = extra_requirements_dict()
if methods_args is not None:
for extra_requirement in extra_requirements_dict():
if extra_requirement in methods_args:
new_extra_requirements[
extra_requirement] = methods_args[extra_requirement]
extra_requirements = new_extra_requirements
if ports is None:
ports = extra_requirements["drupal_version_ports"]
if target_type(target) == 'HTTP':
target = target_to_host(target)
threads = []
total_req = len(ports)
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
trying = 0
keyboard_interrupt_flag = False
for port in ports:
port = int(port)
t = threading.Thread(target=__drupal_version,
args=(target, int(port), timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(
messages(language, "trying_message").format(trying, total_req, num, total, target, port, 'drupal_version_scan'))
while 1:
try:
if threading.activeCount() >= thread_number:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
keyboard_interrupt_flag = True
break
if keyboard_interrupt_flag:
break
# wait for threads
kill_switch = 0
kill_time = int(
timeout_sec / 0.1) if int(timeout_sec / 0.1) != 0 else 1
while 1:
time.sleep(0.1)
kill_switch += 1
try:
if threading.activeCount() == 1 or kill_switch == kill_time:
break
except KeyboardInterrupt:
break
thread_write = int(open(thread_tmp_filename).read().rsplit()[0])
if thread_write == 1 and verbose_level != 0:
info(messages(language, "not_found"))
data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'drupal_version_scan',
'DESCRIPTION': messages(language, "not_found"), 'TIME': now(),
'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
os.remove(thread_tmp_filename)
else:
warn(messages(language, "input_target_error").format(
'drupal_version_scan', target))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.